GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67
68 #define NUM_IB_PORTS 1
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define DEFAULT_KRCVQS            2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128 /* sizes for both the QP and RSM map tables */
129 #define NUM_MAP_ENTRIES         256
130 #define NUM_MAP_REGS             32
131
132 /* Bit offset into the GUID which carries HFI id information */
133 #define GUID_HFI_INDEX_SHIFT     39
134
135 /* extract the emulation revision */
136 #define emulator_rev(dd) ((dd)->irev >> 8)
137 /* parallel and serial emulation versions are 3 and 4 respectively */
138 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140
141 /* RSM fields */
142
143 /* packet type */
144 #define IB_PACKET_TYPE         2ull
145 #define QW_SHIFT               6ull
146 /* QPN[7..1] */
147 #define QPN_WIDTH              7ull
148
149 /* LRH.BTH: QW 0, OFFSET 48 - for match */
150 #define LRH_BTH_QW             0ull
151 #define LRH_BTH_BIT_OFFSET     48ull
152 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
153 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154 #define LRH_BTH_SELECT
155 #define LRH_BTH_MASK           3ull
156 #define LRH_BTH_VALUE          2ull
157
158 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
159 #define LRH_SC_QW              0ull
160 #define LRH_SC_BIT_OFFSET      56ull
161 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
162 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163 #define LRH_SC_MASK            128ull
164 #define LRH_SC_VALUE           0ull
165
166 /* SC[n..0] QW 0, OFFSET 60 - for select */
167 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
168
169 /* QPN[m+n:1] QW 1, OFFSET 1 */
170 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
171
172 /* defines to build power on SC2VL table */
173 #define SC2VL_VAL( \
174         num, \
175         sc0, sc0val, \
176         sc1, sc1val, \
177         sc2, sc2val, \
178         sc3, sc3val, \
179         sc4, sc4val, \
180         sc5, sc5val, \
181         sc6, sc6val, \
182         sc7, sc7val) \
183 ( \
184         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
192 )
193
194 #define DC_SC_VL_VAL( \
195         range, \
196         e0, e0val, \
197         e1, e1val, \
198         e2, e2val, \
199         e3, e3val, \
200         e4, e4val, \
201         e5, e5val, \
202         e6, e6val, \
203         e7, e7val, \
204         e8, e8val, \
205         e9, e9val, \
206         e10, e10val, \
207         e11, e11val, \
208         e12, e12val, \
209         e13, e13val, \
210         e14, e14val, \
211         e15, e15val) \
212 ( \
213         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229 )
230
231 /* all CceStatus sub-block freeze bits */
232 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233                         | CCE_STATUS_RXE_FROZE_SMASK \
234                         | CCE_STATUS_TXE_FROZE_SMASK \
235                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
236 /* all CceStatus sub-block TXE pause bits */
237 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238                         | CCE_STATUS_TXE_PAUSED_SMASK \
239                         | CCE_STATUS_SDMA_PAUSED_SMASK)
240 /* all CceStatus sub-block RXE pause bits */
241 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242
243 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
245
246 /*
247  * CCE Error flags.
248  */
249 static struct flag_table cce_err_status_flags[] = {
250 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
251                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
253                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
259                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
261                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
265                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
277                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
279                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
281                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
283                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
285                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
287                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
289                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
291                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
293                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
295                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
297                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
299                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
301                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
303                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
305                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
307                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
309                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312 /*31*/  FLAG_ENTRY0("LATriggered",
313                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
315                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
317                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
323                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
325                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
327                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
329                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
331                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332 /*41-63 reserved*/
333 };
334
335 /*
336  * Misc Error flags
337  */
338 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339 static struct flag_table misc_err_status_flags[] = {
340 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
353 };
354
355 /*
356  * TXE PIO Error flags and consequences
357  */
358 static struct flag_table pio_err_status_flags[] = {
359 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
360         SEC_WRITE_DROPPED,
361         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
363         SEC_SPC_FREEZE,
364         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365 /* 2*/  FLAG_ENTRY("PioCsrParity",
366         SEC_SPC_FREEZE,
367         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
369         SEC_SPC_FREEZE,
370         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
372         SEC_SPC_FREEZE,
373         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
375         SEC_SPC_FREEZE,
376         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
378         SEC_SPC_FREEZE,
379         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
381         SEC_SPC_FREEZE,
382         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
384         SEC_SPC_FREEZE,
385         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
387         SEC_SPC_FREEZE,
388         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
390         SEC_SPC_FREEZE,
391         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
393         SEC_SPC_FREEZE,
394         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
396         SEC_SPC_FREEZE,
397         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
399         0,
400         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
402         0,
403         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
405         SEC_SPC_FREEZE,
406         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
408         SEC_SPC_FREEZE,
409         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410 /*17*/  FLAG_ENTRY("PioInitSmIn",
411         0,
412         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
414         SEC_SPC_FREEZE,
415         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
417         SEC_SPC_FREEZE,
418         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
420         0,
421         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422 /*21*/  FLAG_ENTRY("PioWriteDataParity",
423         SEC_SPC_FREEZE,
424         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425 /*22*/  FLAG_ENTRY("PioStateMachine",
426         SEC_SPC_FREEZE,
427         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
429         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
430         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
432         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
433         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
435         SEC_SPC_FREEZE,
436         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437 /*26*/  FLAG_ENTRY("PioVlfSopParity",
438         SEC_SPC_FREEZE,
439         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440 /*27*/  FLAG_ENTRY("PioVlFifoParity",
441         SEC_SPC_FREEZE,
442         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
444         SEC_SPC_FREEZE,
445         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
447         SEC_SPC_FREEZE,
448         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
449 /*30-31 reserved*/
450 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
457         SEC_SPC_FREEZE,
458         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
460         SEC_SPC_FREEZE,
461         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
462 /*36-63 reserved*/
463 };
464
465 /* TXE PIO errors that cause an SPC freeze */
466 #define ALL_PIO_FREEZE_ERR \
467         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
496
497 /*
498  * TXE SDMA Error flags
499  */
500 static struct flag_table sdma_err_status_flags[] = {
501 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
502                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
504                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
509 /*04-63 reserved*/
510 };
511
512 /* TXE SDMA errors that cause an SPC freeze */
513 #define ALL_SDMA_FREEZE_ERR  \
514                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
517
518 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
519 #define PORT_DISCARD_EGRESS_ERRS \
520         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
523
524 /*
525  * TXE Egress Error flags
526  */
527 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528 static struct flag_table egress_err_status_flags[] = {
529 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
531 /* 2 reserved */
532 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
536 /* 6 reserved */
537 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
541 /* 9-10 reserved */
542 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
550 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
552 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
554 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
556 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
558 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
560 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
562 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
564 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
566 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
568 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
570 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
572 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
574 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
576 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
578 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
580 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
623 };
624
625 /*
626  * TXE Egress Error Info flags
627  */
628 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629 static struct flag_table egress_err_info_flags[] = {
630 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
631 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
632 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
639 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
652 };
653
654 /* TXE Egress errors that cause an SPC freeze */
655 #define ALL_TXE_EGRESS_FREEZE_ERR \
656         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660         | SEES(TX_LAUNCH_CSR_PARITY) \
661         | SEES(TX_SBRD_CTL_CSR_PARITY) \
662         | SEES(TX_CONFIG_PARITY) \
663         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672         | SEES(TX_CREDIT_RETURN_PARITY))
673
674 /*
675  * TXE Send error flags
676  */
677 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678 static struct flag_table send_err_status_flags[] = {
679 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
680 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
682 };
683
684 /*
685  * TXE Send Context Error flags and consequences
686  */
687 static struct flag_table sc_err_status_flags[] = {
688 /* 0*/  FLAG_ENTRY("InconsistentSop",
689                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
690                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691 /* 1*/  FLAG_ENTRY("DisallowedPacket",
692                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
693                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
695                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
696                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697 /* 3*/  FLAG_ENTRY("WriteOverflow",
698                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
699                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
701                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
702                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
703 /* 5-63 reserved*/
704 };
705
706 /*
707  * RXE Receive Error flags
708  */
709 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710 static struct flag_table rxe_err_status_flags[] = {
711 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
733 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734                 RXES(RBUF_BLOCK_LIST_READ_COR)),
735 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
739 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755                 RXES(RBUF_FL_INITDONE_PARITY)),
756 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762                 RXES(LOOKUP_DES_PART1_UNC_COR)),
763 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764                 RXES(LOOKUP_DES_PART2_PARITY)),
765 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
787 };
788
789 /* RXE errors that will trigger an SPC freeze */
790 #define ALL_RXE_FREEZE_ERR  \
791         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
835
836 #define RXE_FREEZE_ABORT_MASK \
837         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840
841 /*
842  * DCC Error Flags
843  */
844 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845 static struct flag_table dcc_err_flags[] = {
846         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
892 };
893
894 /*
895  * LCB error flags
896  */
897 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898 static struct flag_table lcb_err_flags[] = {
899 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
935 };
936
937 /*
938  * DC8051 Error Flags
939  */
940 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941 static struct flag_table dc8051_err_flags[] = {
942         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
951                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
952         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
953 };
954
955 /*
956  * DC8051 Information Error flags
957  *
958  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
959  */
960 static struct flag_table dc8051_info_err_flags[] = {
961         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
962         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
963         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
964         FLAG_ENTRY0("Serdes internal loopback failure",
965                     FAILED_SERDES_INTERNAL_LOOPBACK),
966         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
967         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
968         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
969         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
970         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
971         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
973         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
974         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
975         FLAG_ENTRY0("External Device Request Timeout",
976                     EXTERNAL_DEVICE_REQ_TIMEOUT),
977 };
978
979 /*
980  * DC8051 Information Host Information flags
981  *
982  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
983  */
984 static struct flag_table dc8051_info_host_msg_flags[] = {
985         FLAG_ENTRY0("Host request done", 0x0001),
986         FLAG_ENTRY0("BC SMA message", 0x0002),
987         FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990         FLAG_ENTRY0("External device config request", 0x0020),
991         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992         FLAG_ENTRY0("LinkUp achieved", 0x0080),
993         FLAG_ENTRY0("Link going down", 0x0100),
994 };
995
996 static u32 encoded_size(u32 size);
997 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1000                                u8 *continuous);
1001 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004                                       u8 *remote_tx_rate, u16 *link_widths);
1005 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006                                      u8 *flag_bits, u16 *link_widths);
1007 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1008                                   u8 *device_rev);
1009 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012                             u8 *tx_polarity_inversion,
1013                             u8 *rx_polarity_inversion, u8 *max_rate);
1014 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015                                 unsigned int context, u64 err_status);
1016 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017 static void handle_dcc_err(struct hfi1_devdata *dd,
1018                            unsigned int context, u64 err_status);
1019 static void handle_lcb_err(struct hfi1_devdata *dd,
1020                            unsigned int context, u64 err_status);
1021 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void set_partition_keys(struct hfi1_pportdata *);
1030 static const char *link_state_name(u32 state);
1031 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1032                                           u32 state);
1033 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1034                            u64 *out_data);
1035 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036 static int thermal_init(struct hfi1_devdata *dd);
1037
1038 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1039                                   int msecs);
1040 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1041 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1042 static void handle_temp_err(struct hfi1_devdata *);
1043 static void dc_shutdown(struct hfi1_devdata *);
1044 static void dc_start(struct hfi1_devdata *);
1045 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1046                            unsigned int *np);
1047 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1048
1049 /*
1050  * Error interrupt table entry.  This is used as input to the interrupt
1051  * "clear down" routine used for all second tier error interrupt register.
1052  * Second tier interrupt registers have a single bit representing them
1053  * in the top-level CceIntStatus.
1054  */
1055 struct err_reg_info {
1056         u32 status;             /* status CSR offset */
1057         u32 clear;              /* clear CSR offset */
1058         u32 mask;               /* mask CSR offset */
1059         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1060         const char *desc;
1061 };
1062
1063 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1064 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1065 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1066
1067 /*
1068  * Helpers for building HFI and DC error interrupt table entries.  Different
1069  * helpers are needed because of inconsistent register names.
1070  */
1071 #define EE(reg, handler, desc) \
1072         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1073                 handler, desc }
1074 #define DC_EE1(reg, handler, desc) \
1075         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1076 #define DC_EE2(reg, handler, desc) \
1077         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1078
1079 /*
1080  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1081  * another register containing more information.
1082  */
1083 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1084 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1085 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1086 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1087 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1088 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1089 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1090 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1091 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1092         /* the rest are reserved */
1093 };
1094
1095 /*
1096  * Index into the Various section of the interrupt sources
1097  * corresponding to the Critical Temperature interrupt.
1098  */
1099 #define TCRIT_INT_SOURCE 4
1100
1101 /*
1102  * SDMA error interrupt entry - refers to another register containing more
1103  * information.
1104  */
1105 static const struct err_reg_info sdma_eng_err =
1106         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1107
1108 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1109 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1110 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1111 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1112 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1113 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1114         /* rest are reserved */
1115 };
1116
1117 /*
1118  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1119  * register can not be derived from the MTU value because 10K is not
1120  * a power of 2. Therefore, we need a constant. Everything else can
1121  * be calculated.
1122  */
1123 #define DCC_CFG_PORT_MTU_CAP_10240 7
1124
1125 /*
1126  * Table of the DC grouping of error interrupts.  Each entry refers to
1127  * another register containing more information.
1128  */
1129 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1130 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1131 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1132 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1133 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1134         /* the rest are reserved */
1135 };
1136
1137 struct cntr_entry {
1138         /*
1139          * counter name
1140          */
1141         char *name;
1142
1143         /*
1144          * csr to read for name (if applicable)
1145          */
1146         u64 csr;
1147
1148         /*
1149          * offset into dd or ppd to store the counter's value
1150          */
1151         int offset;
1152
1153         /*
1154          * flags
1155          */
1156         u8 flags;
1157
1158         /*
1159          * accessor for stat element, context either dd or ppd
1160          */
1161         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1162                        int mode, u64 data);
1163 };
1164
1165 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1166 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1167
1168 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1169 { \
1170         name, \
1171         csr, \
1172         offset, \
1173         flags, \
1174         accessor \
1175 }
1176
1177 /* 32bit RXE */
1178 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1179 CNTR_ELEM(#name, \
1180           (counter * 8 + RCV_COUNTER_ARRAY32), \
1181           0, flags | CNTR_32BIT, \
1182           port_access_u32_csr)
1183
1184 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1185 CNTR_ELEM(#name, \
1186           (counter * 8 + RCV_COUNTER_ARRAY32), \
1187           0, flags | CNTR_32BIT, \
1188           dev_access_u32_csr)
1189
1190 /* 64bit RXE */
1191 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1192 CNTR_ELEM(#name, \
1193           (counter * 8 + RCV_COUNTER_ARRAY64), \
1194           0, flags, \
1195           port_access_u64_csr)
1196
1197 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1198 CNTR_ELEM(#name, \
1199           (counter * 8 + RCV_COUNTER_ARRAY64), \
1200           0, flags, \
1201           dev_access_u64_csr)
1202
1203 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1204 #define OVR_ELM(ctx) \
1205 CNTR_ELEM("RcvHdrOvr" #ctx, \
1206           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1207           0, CNTR_NORMAL, port_access_u64_csr)
1208
1209 /* 32bit TXE */
1210 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1211 CNTR_ELEM(#name, \
1212           (counter * 8 + SEND_COUNTER_ARRAY32), \
1213           0, flags | CNTR_32BIT, \
1214           port_access_u32_csr)
1215
1216 /* 64bit TXE */
1217 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1218 CNTR_ELEM(#name, \
1219           (counter * 8 + SEND_COUNTER_ARRAY64), \
1220           0, flags, \
1221           port_access_u64_csr)
1222
1223 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1224 CNTR_ELEM(#name,\
1225           counter * 8 + SEND_COUNTER_ARRAY64, \
1226           0, \
1227           flags, \
1228           dev_access_u64_csr)
1229
1230 /* CCE */
1231 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + CCE_COUNTER_ARRAY32), \
1234           0, flags | CNTR_32BIT, \
1235           dev_access_u32_csr)
1236
1237 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1240           0, flags | CNTR_32BIT, \
1241           dev_access_u32_csr)
1242
1243 /* DC */
1244 #define DC_PERF_CNTR(name, counter, flags) \
1245 CNTR_ELEM(#name, \
1246           counter, \
1247           0, \
1248           flags, \
1249           dev_access_u64_csr)
1250
1251 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1252 CNTR_ELEM(#name, \
1253           counter, \
1254           0, \
1255           flags, \
1256           dc_access_lcb_cntr)
1257
1258 /* ibp counters */
1259 #define SW_IBP_CNTR(name, cntr) \
1260 CNTR_ELEM(#name, \
1261           0, \
1262           0, \
1263           CNTR_SYNTH, \
1264           access_ibp_##cntr)
1265
1266 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1267 {
1268         if (dd->flags & HFI1_PRESENT) {
1269                 return readq((void __iomem *)dd->kregbase + offset);
1270         }
1271         return -1;
1272 }
1273
1274 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1275 {
1276         if (dd->flags & HFI1_PRESENT)
1277                 writeq(value, (void __iomem *)dd->kregbase + offset);
1278 }
1279
1280 void __iomem *get_csr_addr(
1281         struct hfi1_devdata *dd,
1282         u32 offset)
1283 {
1284         return (void __iomem *)dd->kregbase + offset;
1285 }
1286
1287 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1288                                  int mode, u64 value)
1289 {
1290         u64 ret;
1291
1292         if (mode == CNTR_MODE_R) {
1293                 ret = read_csr(dd, csr);
1294         } else if (mode == CNTR_MODE_W) {
1295                 write_csr(dd, csr, value);
1296                 ret = value;
1297         } else {
1298                 dd_dev_err(dd, "Invalid cntr register access mode");
1299                 return 0;
1300         }
1301
1302         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1303         return ret;
1304 }
1305
1306 /* Dev Access */
1307 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1308                               void *context, int vl, int mode, u64 data)
1309 {
1310         struct hfi1_devdata *dd = context;
1311         u64 csr = entry->csr;
1312
1313         if (entry->flags & CNTR_SDMA) {
1314                 if (vl == CNTR_INVALID_VL)
1315                         return 0;
1316                 csr += 0x100 * vl;
1317         } else {
1318                 if (vl != CNTR_INVALID_VL)
1319                         return 0;
1320         }
1321         return read_write_csr(dd, csr, mode, data);
1322 }
1323
1324 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1325                               void *context, int idx, int mode, u64 data)
1326 {
1327         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328
1329         if (dd->per_sdma && idx < dd->num_sdma)
1330                 return dd->per_sdma[idx].err_cnt;
1331         return 0;
1332 }
1333
1334 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1335                               void *context, int idx, int mode, u64 data)
1336 {
1337         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338
1339         if (dd->per_sdma && idx < dd->num_sdma)
1340                 return dd->per_sdma[idx].sdma_int_cnt;
1341         return 0;
1342 }
1343
1344 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1345                                    void *context, int idx, int mode, u64 data)
1346 {
1347         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349         if (dd->per_sdma && idx < dd->num_sdma)
1350                 return dd->per_sdma[idx].idle_int_cnt;
1351         return 0;
1352 }
1353
1354 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1355                                        void *context, int idx, int mode,
1356                                        u64 data)
1357 {
1358         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1359
1360         if (dd->per_sdma && idx < dd->num_sdma)
1361                 return dd->per_sdma[idx].progress_int_cnt;
1362         return 0;
1363 }
1364
1365 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1366                               int vl, int mode, u64 data)
1367 {
1368         struct hfi1_devdata *dd = context;
1369
1370         u64 val = 0;
1371         u64 csr = entry->csr;
1372
1373         if (entry->flags & CNTR_VL) {
1374                 if (vl == CNTR_INVALID_VL)
1375                         return 0;
1376                 csr += 8 * vl;
1377         } else {
1378                 if (vl != CNTR_INVALID_VL)
1379                         return 0;
1380         }
1381
1382         val = read_write_csr(dd, csr, mode, data);
1383         return val;
1384 }
1385
1386 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1387                               int vl, int mode, u64 data)
1388 {
1389         struct hfi1_devdata *dd = context;
1390         u32 csr = entry->csr;
1391         int ret = 0;
1392
1393         if (vl != CNTR_INVALID_VL)
1394                 return 0;
1395         if (mode == CNTR_MODE_R)
1396                 ret = read_lcb_csr(dd, csr, &data);
1397         else if (mode == CNTR_MODE_W)
1398                 ret = write_lcb_csr(dd, csr, data);
1399
1400         if (ret) {
1401                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1402                 return 0;
1403         }
1404
1405         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1406         return data;
1407 }
1408
1409 /* Port Access */
1410 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1411                                int vl, int mode, u64 data)
1412 {
1413         struct hfi1_pportdata *ppd = context;
1414
1415         if (vl != CNTR_INVALID_VL)
1416                 return 0;
1417         return read_write_csr(ppd->dd, entry->csr, mode, data);
1418 }
1419
1420 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1421                                void *context, int vl, int mode, u64 data)
1422 {
1423         struct hfi1_pportdata *ppd = context;
1424         u64 val;
1425         u64 csr = entry->csr;
1426
1427         if (entry->flags & CNTR_VL) {
1428                 if (vl == CNTR_INVALID_VL)
1429                         return 0;
1430                 csr += 8 * vl;
1431         } else {
1432                 if (vl != CNTR_INVALID_VL)
1433                         return 0;
1434         }
1435         val = read_write_csr(ppd->dd, csr, mode, data);
1436         return val;
1437 }
1438
1439 /* Software defined */
1440 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1441                                 u64 data)
1442 {
1443         u64 ret;
1444
1445         if (mode == CNTR_MODE_R) {
1446                 ret = *cntr;
1447         } else if (mode == CNTR_MODE_W) {
1448                 *cntr = data;
1449                 ret = data;
1450         } else {
1451                 dd_dev_err(dd, "Invalid cntr sw access mode");
1452                 return 0;
1453         }
1454
1455         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1456
1457         return ret;
1458 }
1459
1460 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1461                                  int vl, int mode, u64 data)
1462 {
1463         struct hfi1_pportdata *ppd = context;
1464
1465         if (vl != CNTR_INVALID_VL)
1466                 return 0;
1467         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1468 }
1469
1470 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1471                                  int vl, int mode, u64 data)
1472 {
1473         struct hfi1_pportdata *ppd = context;
1474
1475         if (vl != CNTR_INVALID_VL)
1476                 return 0;
1477         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1478 }
1479
1480 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1481                                        void *context, int vl, int mode,
1482                                        u64 data)
1483 {
1484         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1485
1486         if (vl != CNTR_INVALID_VL)
1487                 return 0;
1488         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1489 }
1490
1491 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1492                                    void *context, int vl, int mode, u64 data)
1493 {
1494         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1495         u64 zero = 0;
1496         u64 *counter;
1497
1498         if (vl == CNTR_INVALID_VL)
1499                 counter = &ppd->port_xmit_discards;
1500         else if (vl >= 0 && vl < C_VL_COUNT)
1501                 counter = &ppd->port_xmit_discards_vl[vl];
1502         else
1503                 counter = &zero;
1504
1505         return read_write_sw(ppd->dd, counter, mode, data);
1506 }
1507
1508 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1509                                        void *context, int vl, int mode,
1510                                        u64 data)
1511 {
1512         struct hfi1_pportdata *ppd = context;
1513
1514         if (vl != CNTR_INVALID_VL)
1515                 return 0;
1516
1517         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1518                              mode, data);
1519 }
1520
1521 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1522                                       void *context, int vl, int mode, u64 data)
1523 {
1524         struct hfi1_pportdata *ppd = context;
1525
1526         if (vl != CNTR_INVALID_VL)
1527                 return 0;
1528
1529         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1530                              mode, data);
1531 }
1532
1533 u64 get_all_cpu_total(u64 __percpu *cntr)
1534 {
1535         int cpu;
1536         u64 counter = 0;
1537
1538         for_each_possible_cpu(cpu)
1539                 counter += *per_cpu_ptr(cntr, cpu);
1540         return counter;
1541 }
1542
1543 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1544                           u64 __percpu *cntr,
1545                           int vl, int mode, u64 data)
1546 {
1547         u64 ret = 0;
1548
1549         if (vl != CNTR_INVALID_VL)
1550                 return 0;
1551
1552         if (mode == CNTR_MODE_R) {
1553                 ret = get_all_cpu_total(cntr) - *z_val;
1554         } else if (mode == CNTR_MODE_W) {
1555                 /* A write can only zero the counter */
1556                 if (data == 0)
1557                         *z_val = get_all_cpu_total(cntr);
1558                 else
1559                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1560         } else {
1561                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1562                 return 0;
1563         }
1564
1565         return ret;
1566 }
1567
1568 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1569                               void *context, int vl, int mode, u64 data)
1570 {
1571         struct hfi1_devdata *dd = context;
1572
1573         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1574                               mode, data);
1575 }
1576
1577 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_devdata *dd = context;
1581
1582         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1583                               mode, data);
1584 }
1585
1586 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1587                               void *context, int vl, int mode, u64 data)
1588 {
1589         struct hfi1_devdata *dd = context;
1590
1591         return dd->verbs_dev.n_piowait;
1592 }
1593
1594 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1595                                void *context, int vl, int mode, u64 data)
1596 {
1597         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598
1599         return dd->verbs_dev.n_piodrain;
1600 }
1601
1602 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1603                               void *context, int vl, int mode, u64 data)
1604 {
1605         struct hfi1_devdata *dd = context;
1606
1607         return dd->verbs_dev.n_txwait;
1608 }
1609
1610 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1611                                void *context, int vl, int mode, u64 data)
1612 {
1613         struct hfi1_devdata *dd = context;
1614
1615         return dd->verbs_dev.n_kmem_wait;
1616 }
1617
1618 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1619                                    void *context, int vl, int mode, u64 data)
1620 {
1621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622
1623         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1624                               mode, data);
1625 }
1626
1627 /* Software counters for the error status bits within MISC_ERR_STATUS */
1628 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1629                                              void *context, int vl, int mode,
1630                                              u64 data)
1631 {
1632         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1633
1634         return dd->misc_err_status_cnt[12];
1635 }
1636
1637 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1638                                           void *context, int vl, int mode,
1639                                           u64 data)
1640 {
1641         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1642
1643         return dd->misc_err_status_cnt[11];
1644 }
1645
1646 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1647                                                void *context, int vl, int mode,
1648                                                u64 data)
1649 {
1650         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1651
1652         return dd->misc_err_status_cnt[10];
1653 }
1654
1655 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1656                                                  void *context, int vl,
1657                                                  int mode, u64 data)
1658 {
1659         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1660
1661         return dd->misc_err_status_cnt[9];
1662 }
1663
1664 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1665                                            void *context, int vl, int mode,
1666                                            u64 data)
1667 {
1668         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1669
1670         return dd->misc_err_status_cnt[8];
1671 }
1672
1673 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1674                                 const struct cntr_entry *entry,
1675                                 void *context, int vl, int mode, u64 data)
1676 {
1677         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1678
1679         return dd->misc_err_status_cnt[7];
1680 }
1681
1682 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1683                                                 void *context, int vl,
1684                                                 int mode, u64 data)
1685 {
1686         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1687
1688         return dd->misc_err_status_cnt[6];
1689 }
1690
1691 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1692                                               void *context, int vl, int mode,
1693                                               u64 data)
1694 {
1695         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1696
1697         return dd->misc_err_status_cnt[5];
1698 }
1699
1700 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1701                                             void *context, int vl, int mode,
1702                                             u64 data)
1703 {
1704         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1705
1706         return dd->misc_err_status_cnt[4];
1707 }
1708
1709 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1710                                                  void *context, int vl,
1711                                                  int mode, u64 data)
1712 {
1713         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714
1715         return dd->misc_err_status_cnt[3];
1716 }
1717
1718 static u64 access_misc_csr_write_bad_addr_err_cnt(
1719                                 const struct cntr_entry *entry,
1720                                 void *context, int vl, int mode, u64 data)
1721 {
1722         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723
1724         return dd->misc_err_status_cnt[2];
1725 }
1726
1727 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1728                                                  void *context, int vl,
1729                                                  int mode, u64 data)
1730 {
1731         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732
1733         return dd->misc_err_status_cnt[1];
1734 }
1735
1736 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1737                                           void *context, int vl, int mode,
1738                                           u64 data)
1739 {
1740         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742         return dd->misc_err_status_cnt[0];
1743 }
1744
1745 /*
1746  * Software counter for the aggregate of
1747  * individual CceErrStatus counters
1748  */
1749 static u64 access_sw_cce_err_status_aggregated_cnt(
1750                                 const struct cntr_entry *entry,
1751                                 void *context, int vl, int mode, u64 data)
1752 {
1753         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755         return dd->sw_cce_err_status_aggregate;
1756 }
1757
1758 /*
1759  * Software counters corresponding to each of the
1760  * error status bits within CceErrStatus
1761  */
1762 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1763                                               void *context, int vl, int mode,
1764                                               u64 data)
1765 {
1766         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767
1768         return dd->cce_err_status_cnt[40];
1769 }
1770
1771 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1772                                           void *context, int vl, int mode,
1773                                           u64 data)
1774 {
1775         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776
1777         return dd->cce_err_status_cnt[39];
1778 }
1779
1780 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1781                                           void *context, int vl, int mode,
1782                                           u64 data)
1783 {
1784         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785
1786         return dd->cce_err_status_cnt[38];
1787 }
1788
1789 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1790                                              void *context, int vl, int mode,
1791                                              u64 data)
1792 {
1793         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794
1795         return dd->cce_err_status_cnt[37];
1796 }
1797
1798 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1799                                              void *context, int vl, int mode,
1800                                              u64 data)
1801 {
1802         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803
1804         return dd->cce_err_status_cnt[36];
1805 }
1806
1807 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1808                                 const struct cntr_entry *entry,
1809                                 void *context, int vl, int mode, u64 data)
1810 {
1811         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812
1813         return dd->cce_err_status_cnt[35];
1814 }
1815
1816 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1817                                 const struct cntr_entry *entry,
1818                                 void *context, int vl, int mode, u64 data)
1819 {
1820         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822         return dd->cce_err_status_cnt[34];
1823 }
1824
1825 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1826                                                  void *context, int vl,
1827                                                  int mode, u64 data)
1828 {
1829         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830
1831         return dd->cce_err_status_cnt[33];
1832 }
1833
1834 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1835                                                 void *context, int vl, int mode,
1836                                                 u64 data)
1837 {
1838         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840         return dd->cce_err_status_cnt[32];
1841 }
1842
1843 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1844                                    void *context, int vl, int mode, u64 data)
1845 {
1846         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847
1848         return dd->cce_err_status_cnt[31];
1849 }
1850
1851 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1852                                                void *context, int vl, int mode,
1853                                                u64 data)
1854 {
1855         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856
1857         return dd->cce_err_status_cnt[30];
1858 }
1859
1860 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1861                                               void *context, int vl, int mode,
1862                                               u64 data)
1863 {
1864         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865
1866         return dd->cce_err_status_cnt[29];
1867 }
1868
1869 static u64 access_pcic_transmit_back_parity_err_cnt(
1870                                 const struct cntr_entry *entry,
1871                                 void *context, int vl, int mode, u64 data)
1872 {
1873         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874
1875         return dd->cce_err_status_cnt[28];
1876 }
1877
1878 static u64 access_pcic_transmit_front_parity_err_cnt(
1879                                 const struct cntr_entry *entry,
1880                                 void *context, int vl, int mode, u64 data)
1881 {
1882         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883
1884         return dd->cce_err_status_cnt[27];
1885 }
1886
1887 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1888                                              void *context, int vl, int mode,
1889                                              u64 data)
1890 {
1891         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892
1893         return dd->cce_err_status_cnt[26];
1894 }
1895
1896 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1897                                             void *context, int vl, int mode,
1898                                             u64 data)
1899 {
1900         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901
1902         return dd->cce_err_status_cnt[25];
1903 }
1904
1905 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1906                                               void *context, int vl, int mode,
1907                                               u64 data)
1908 {
1909         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910
1911         return dd->cce_err_status_cnt[24];
1912 }
1913
1914 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1915                                              void *context, int vl, int mode,
1916                                              u64 data)
1917 {
1918         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919
1920         return dd->cce_err_status_cnt[23];
1921 }
1922
1923 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1924                                                  void *context, int vl,
1925                                                  int mode, u64 data)
1926 {
1927         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928
1929         return dd->cce_err_status_cnt[22];
1930 }
1931
1932 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1933                                          void *context, int vl, int mode,
1934                                          u64 data)
1935 {
1936         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937
1938         return dd->cce_err_status_cnt[21];
1939 }
1940
1941 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1942                                 const struct cntr_entry *entry,
1943                                 void *context, int vl, int mode, u64 data)
1944 {
1945         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946
1947         return dd->cce_err_status_cnt[20];
1948 }
1949
1950 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1951                                                  void *context, int vl,
1952                                                  int mode, u64 data)
1953 {
1954         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955
1956         return dd->cce_err_status_cnt[19];
1957 }
1958
1959 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1960                                              void *context, int vl, int mode,
1961                                              u64 data)
1962 {
1963         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964
1965         return dd->cce_err_status_cnt[18];
1966 }
1967
1968 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1969                                             void *context, int vl, int mode,
1970                                             u64 data)
1971 {
1972         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973
1974         return dd->cce_err_status_cnt[17];
1975 }
1976
1977 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1978                                               void *context, int vl, int mode,
1979                                               u64 data)
1980 {
1981         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982
1983         return dd->cce_err_status_cnt[16];
1984 }
1985
1986 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1987                                              void *context, int vl, int mode,
1988                                              u64 data)
1989 {
1990         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991
1992         return dd->cce_err_status_cnt[15];
1993 }
1994
1995 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1996                                                  void *context, int vl,
1997                                                  int mode, u64 data)
1998 {
1999         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000
2001         return dd->cce_err_status_cnt[14];
2002 }
2003
2004 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2005                                              void *context, int vl, int mode,
2006                                              u64 data)
2007 {
2008         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009
2010         return dd->cce_err_status_cnt[13];
2011 }
2012
2013 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2014                                 const struct cntr_entry *entry,
2015                                 void *context, int vl, int mode, u64 data)
2016 {
2017         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018
2019         return dd->cce_err_status_cnt[12];
2020 }
2021
2022 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2023                                 const struct cntr_entry *entry,
2024                                 void *context, int vl, int mode, u64 data)
2025 {
2026         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027
2028         return dd->cce_err_status_cnt[11];
2029 }
2030
2031 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2032                                 const struct cntr_entry *entry,
2033                                 void *context, int vl, int mode, u64 data)
2034 {
2035         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036
2037         return dd->cce_err_status_cnt[10];
2038 }
2039
2040 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2041                                 const struct cntr_entry *entry,
2042                                 void *context, int vl, int mode, u64 data)
2043 {
2044         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045
2046         return dd->cce_err_status_cnt[9];
2047 }
2048
2049 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2050                                 const struct cntr_entry *entry,
2051                                 void *context, int vl, int mode, u64 data)
2052 {
2053         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054
2055         return dd->cce_err_status_cnt[8];
2056 }
2057
2058 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2059                                                  void *context, int vl,
2060                                                  int mode, u64 data)
2061 {
2062         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063
2064         return dd->cce_err_status_cnt[7];
2065 }
2066
2067 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2068                                 const struct cntr_entry *entry,
2069                                 void *context, int vl, int mode, u64 data)
2070 {
2071         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072
2073         return dd->cce_err_status_cnt[6];
2074 }
2075
2076 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2077                                                void *context, int vl, int mode,
2078                                                u64 data)
2079 {
2080         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081
2082         return dd->cce_err_status_cnt[5];
2083 }
2084
2085 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2086                                           void *context, int vl, int mode,
2087                                           u64 data)
2088 {
2089         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090
2091         return dd->cce_err_status_cnt[4];
2092 }
2093
2094 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2095                                 const struct cntr_entry *entry,
2096                                 void *context, int vl, int mode, u64 data)
2097 {
2098         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099
2100         return dd->cce_err_status_cnt[3];
2101 }
2102
2103 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2104                                                  void *context, int vl,
2105                                                  int mode, u64 data)
2106 {
2107         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108
2109         return dd->cce_err_status_cnt[2];
2110 }
2111
2112 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2113                                                 void *context, int vl,
2114                                                 int mode, u64 data)
2115 {
2116         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117
2118         return dd->cce_err_status_cnt[1];
2119 }
2120
2121 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2122                                          void *context, int vl, int mode,
2123                                          u64 data)
2124 {
2125         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127         return dd->cce_err_status_cnt[0];
2128 }
2129
2130 /*
2131  * Software counters corresponding to each of the
2132  * error status bits within RcvErrStatus
2133  */
2134 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2135                                         void *context, int vl, int mode,
2136                                         u64 data)
2137 {
2138         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139
2140         return dd->rcv_err_status_cnt[63];
2141 }
2142
2143 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2144                                                 void *context, int vl,
2145                                                 int mode, u64 data)
2146 {
2147         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148
2149         return dd->rcv_err_status_cnt[62];
2150 }
2151
2152 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2153                                                void *context, int vl, int mode,
2154                                                u64 data)
2155 {
2156         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157
2158         return dd->rcv_err_status_cnt[61];
2159 }
2160
2161 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2162                                          void *context, int vl, int mode,
2163                                          u64 data)
2164 {
2165         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166
2167         return dd->rcv_err_status_cnt[60];
2168 }
2169
2170 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2171                                                  void *context, int vl,
2172                                                  int mode, u64 data)
2173 {
2174         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175
2176         return dd->rcv_err_status_cnt[59];
2177 }
2178
2179 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2180                                                  void *context, int vl,
2181                                                  int mode, u64 data)
2182 {
2183         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184
2185         return dd->rcv_err_status_cnt[58];
2186 }
2187
2188 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2189                                             void *context, int vl, int mode,
2190                                             u64 data)
2191 {
2192         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193
2194         return dd->rcv_err_status_cnt[57];
2195 }
2196
2197 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2198                                            void *context, int vl, int mode,
2199                                            u64 data)
2200 {
2201         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202
2203         return dd->rcv_err_status_cnt[56];
2204 }
2205
2206 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2207                                            void *context, int vl, int mode,
2208                                            u64 data)
2209 {
2210         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211
2212         return dd->rcv_err_status_cnt[55];
2213 }
2214
2215 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2216                                 const struct cntr_entry *entry,
2217                                 void *context, int vl, int mode, u64 data)
2218 {
2219         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220
2221         return dd->rcv_err_status_cnt[54];
2222 }
2223
2224 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2225                                 const struct cntr_entry *entry,
2226                                 void *context, int vl, int mode, u64 data)
2227 {
2228         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229
2230         return dd->rcv_err_status_cnt[53];
2231 }
2232
2233 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2234                                                  void *context, int vl,
2235                                                  int mode, u64 data)
2236 {
2237         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238
2239         return dd->rcv_err_status_cnt[52];
2240 }
2241
2242 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2243                                                  void *context, int vl,
2244                                                  int mode, u64 data)
2245 {
2246         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247
2248         return dd->rcv_err_status_cnt[51];
2249 }
2250
2251 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2252                                                  void *context, int vl,
2253                                                  int mode, u64 data)
2254 {
2255         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256
2257         return dd->rcv_err_status_cnt[50];
2258 }
2259
2260 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2261                                                  void *context, int vl,
2262                                                  int mode, u64 data)
2263 {
2264         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265
2266         return dd->rcv_err_status_cnt[49];
2267 }
2268
2269 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2270                                                  void *context, int vl,
2271                                                  int mode, u64 data)
2272 {
2273         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274
2275         return dd->rcv_err_status_cnt[48];
2276 }
2277
2278 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2279                                                  void *context, int vl,
2280                                                  int mode, u64 data)
2281 {
2282         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283
2284         return dd->rcv_err_status_cnt[47];
2285 }
2286
2287 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2288                                          void *context, int vl, int mode,
2289                                          u64 data)
2290 {
2291         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292
2293         return dd->rcv_err_status_cnt[46];
2294 }
2295
2296 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2297                                 const struct cntr_entry *entry,
2298                                 void *context, int vl, int mode, u64 data)
2299 {
2300         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301
2302         return dd->rcv_err_status_cnt[45];
2303 }
2304
2305 static u64 access_rx_lookup_csr_parity_err_cnt(
2306                                 const struct cntr_entry *entry,
2307                                 void *context, int vl, int mode, u64 data)
2308 {
2309         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310
2311         return dd->rcv_err_status_cnt[44];
2312 }
2313
2314 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2315                                 const struct cntr_entry *entry,
2316                                 void *context, int vl, int mode, u64 data)
2317 {
2318         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319
2320         return dd->rcv_err_status_cnt[43];
2321 }
2322
2323 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2324                                 const struct cntr_entry *entry,
2325                                 void *context, int vl, int mode, u64 data)
2326 {
2327         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328
2329         return dd->rcv_err_status_cnt[42];
2330 }
2331
2332 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2333                                 const struct cntr_entry *entry,
2334                                 void *context, int vl, int mode, u64 data)
2335 {
2336         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337
2338         return dd->rcv_err_status_cnt[41];
2339 }
2340
2341 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2342                                 const struct cntr_entry *entry,
2343                                 void *context, int vl, int mode, u64 data)
2344 {
2345         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346
2347         return dd->rcv_err_status_cnt[40];
2348 }
2349
2350 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2351                                 const struct cntr_entry *entry,
2352                                 void *context, int vl, int mode, u64 data)
2353 {
2354         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355
2356         return dd->rcv_err_status_cnt[39];
2357 }
2358
2359 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2360                                 const struct cntr_entry *entry,
2361                                 void *context, int vl, int mode, u64 data)
2362 {
2363         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364
2365         return dd->rcv_err_status_cnt[38];
2366 }
2367
2368 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2369                                 const struct cntr_entry *entry,
2370                                 void *context, int vl, int mode, u64 data)
2371 {
2372         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373
2374         return dd->rcv_err_status_cnt[37];
2375 }
2376
2377 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2378                                 const struct cntr_entry *entry,
2379                                 void *context, int vl, int mode, u64 data)
2380 {
2381         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382
2383         return dd->rcv_err_status_cnt[36];
2384 }
2385
2386 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2387                                 const struct cntr_entry *entry,
2388                                 void *context, int vl, int mode, u64 data)
2389 {
2390         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391
2392         return dd->rcv_err_status_cnt[35];
2393 }
2394
2395 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2396                                 const struct cntr_entry *entry,
2397                                 void *context, int vl, int mode, u64 data)
2398 {
2399         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400
2401         return dd->rcv_err_status_cnt[34];
2402 }
2403
2404 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2405                                 const struct cntr_entry *entry,
2406                                 void *context, int vl, int mode, u64 data)
2407 {
2408         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409
2410         return dd->rcv_err_status_cnt[33];
2411 }
2412
2413 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2414                                         void *context, int vl, int mode,
2415                                         u64 data)
2416 {
2417         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418
2419         return dd->rcv_err_status_cnt[32];
2420 }
2421
2422 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2423                                        void *context, int vl, int mode,
2424                                        u64 data)
2425 {
2426         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427
2428         return dd->rcv_err_status_cnt[31];
2429 }
2430
2431 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2432                                           void *context, int vl, int mode,
2433                                           u64 data)
2434 {
2435         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436
2437         return dd->rcv_err_status_cnt[30];
2438 }
2439
2440 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2441                                              void *context, int vl, int mode,
2442                                              u64 data)
2443 {
2444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445
2446         return dd->rcv_err_status_cnt[29];
2447 }
2448
2449 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2450                                                  void *context, int vl,
2451                                                  int mode, u64 data)
2452 {
2453         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454
2455         return dd->rcv_err_status_cnt[28];
2456 }
2457
2458 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2459                                 const struct cntr_entry *entry,
2460                                 void *context, int vl, int mode, u64 data)
2461 {
2462         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463
2464         return dd->rcv_err_status_cnt[27];
2465 }
2466
2467 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2468                                 const struct cntr_entry *entry,
2469                                 void *context, int vl, int mode, u64 data)
2470 {
2471         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472
2473         return dd->rcv_err_status_cnt[26];
2474 }
2475
2476 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2477                                 const struct cntr_entry *entry,
2478                                 void *context, int vl, int mode, u64 data)
2479 {
2480         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481
2482         return dd->rcv_err_status_cnt[25];
2483 }
2484
2485 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2486                                 const struct cntr_entry *entry,
2487                                 void *context, int vl, int mode, u64 data)
2488 {
2489         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490
2491         return dd->rcv_err_status_cnt[24];
2492 }
2493
2494 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2495                                 const struct cntr_entry *entry,
2496                                 void *context, int vl, int mode, u64 data)
2497 {
2498         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499
2500         return dd->rcv_err_status_cnt[23];
2501 }
2502
2503 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2504                                 const struct cntr_entry *entry,
2505                                 void *context, int vl, int mode, u64 data)
2506 {
2507         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508
2509         return dd->rcv_err_status_cnt[22];
2510 }
2511
2512 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2513                                 const struct cntr_entry *entry,
2514                                 void *context, int vl, int mode, u64 data)
2515 {
2516         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517
2518         return dd->rcv_err_status_cnt[21];
2519 }
2520
2521 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2522                                 const struct cntr_entry *entry,
2523                                 void *context, int vl, int mode, u64 data)
2524 {
2525         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526
2527         return dd->rcv_err_status_cnt[20];
2528 }
2529
2530 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2531                                 const struct cntr_entry *entry,
2532                                 void *context, int vl, int mode, u64 data)
2533 {
2534         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535
2536         return dd->rcv_err_status_cnt[19];
2537 }
2538
2539 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2540                                                  void *context, int vl,
2541                                                  int mode, u64 data)
2542 {
2543         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544
2545         return dd->rcv_err_status_cnt[18];
2546 }
2547
2548 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2549                                                  void *context, int vl,
2550                                                  int mode, u64 data)
2551 {
2552         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553
2554         return dd->rcv_err_status_cnt[17];
2555 }
2556
2557 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2558                                 const struct cntr_entry *entry,
2559                                 void *context, int vl, int mode, u64 data)
2560 {
2561         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562
2563         return dd->rcv_err_status_cnt[16];
2564 }
2565
2566 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2567                                 const struct cntr_entry *entry,
2568                                 void *context, int vl, int mode, u64 data)
2569 {
2570         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571
2572         return dd->rcv_err_status_cnt[15];
2573 }
2574
2575 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2576                                                 void *context, int vl,
2577                                                 int mode, u64 data)
2578 {
2579         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580
2581         return dd->rcv_err_status_cnt[14];
2582 }
2583
2584 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2585                                                 void *context, int vl,
2586                                                 int mode, u64 data)
2587 {
2588         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589
2590         return dd->rcv_err_status_cnt[13];
2591 }
2592
2593 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2594                                               void *context, int vl, int mode,
2595                                               u64 data)
2596 {
2597         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598
2599         return dd->rcv_err_status_cnt[12];
2600 }
2601
2602 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2603                                           void *context, int vl, int mode,
2604                                           u64 data)
2605 {
2606         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607
2608         return dd->rcv_err_status_cnt[11];
2609 }
2610
2611 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2612                                           void *context, int vl, int mode,
2613                                           u64 data)
2614 {
2615         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616
2617         return dd->rcv_err_status_cnt[10];
2618 }
2619
2620 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2621                                                void *context, int vl, int mode,
2622                                                u64 data)
2623 {
2624         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625
2626         return dd->rcv_err_status_cnt[9];
2627 }
2628
2629 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2630                                             void *context, int vl, int mode,
2631                                             u64 data)
2632 {
2633         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634
2635         return dd->rcv_err_status_cnt[8];
2636 }
2637
2638 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2639                                 const struct cntr_entry *entry,
2640                                 void *context, int vl, int mode, u64 data)
2641 {
2642         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643
2644         return dd->rcv_err_status_cnt[7];
2645 }
2646
2647 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2648                                 const struct cntr_entry *entry,
2649                                 void *context, int vl, int mode, u64 data)
2650 {
2651         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652
2653         return dd->rcv_err_status_cnt[6];
2654 }
2655
2656 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2657                                           void *context, int vl, int mode,
2658                                           u64 data)
2659 {
2660         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661
2662         return dd->rcv_err_status_cnt[5];
2663 }
2664
2665 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2666                                           void *context, int vl, int mode,
2667                                           u64 data)
2668 {
2669         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670
2671         return dd->rcv_err_status_cnt[4];
2672 }
2673
2674 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2675                                          void *context, int vl, int mode,
2676                                          u64 data)
2677 {
2678         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679
2680         return dd->rcv_err_status_cnt[3];
2681 }
2682
2683 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2684                                          void *context, int vl, int mode,
2685                                          u64 data)
2686 {
2687         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688
2689         return dd->rcv_err_status_cnt[2];
2690 }
2691
2692 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2693                                             void *context, int vl, int mode,
2694                                             u64 data)
2695 {
2696         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697
2698         return dd->rcv_err_status_cnt[1];
2699 }
2700
2701 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2702                                          void *context, int vl, int mode,
2703                                          u64 data)
2704 {
2705         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707         return dd->rcv_err_status_cnt[0];
2708 }
2709
2710 /*
2711  * Software counters corresponding to each of the
2712  * error status bits within SendPioErrStatus
2713  */
2714 static u64 access_pio_pec_sop_head_parity_err_cnt(
2715                                 const struct cntr_entry *entry,
2716                                 void *context, int vl, int mode, u64 data)
2717 {
2718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719
2720         return dd->send_pio_err_status_cnt[35];
2721 }
2722
2723 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2724                                 const struct cntr_entry *entry,
2725                                 void *context, int vl, int mode, u64 data)
2726 {
2727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728
2729         return dd->send_pio_err_status_cnt[34];
2730 }
2731
2732 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2733                                 const struct cntr_entry *entry,
2734                                 void *context, int vl, int mode, u64 data)
2735 {
2736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737
2738         return dd->send_pio_err_status_cnt[33];
2739 }
2740
2741 static u64 access_pio_current_free_cnt_parity_err_cnt(
2742                                 const struct cntr_entry *entry,
2743                                 void *context, int vl, int mode, u64 data)
2744 {
2745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746
2747         return dd->send_pio_err_status_cnt[32];
2748 }
2749
2750 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2751                                           void *context, int vl, int mode,
2752                                           u64 data)
2753 {
2754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755
2756         return dd->send_pio_err_status_cnt[31];
2757 }
2758
2759 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2760                                           void *context, int vl, int mode,
2761                                           u64 data)
2762 {
2763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764
2765         return dd->send_pio_err_status_cnt[30];
2766 }
2767
2768 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2769                                            void *context, int vl, int mode,
2770                                            u64 data)
2771 {
2772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773
2774         return dd->send_pio_err_status_cnt[29];
2775 }
2776
2777 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2778                                 const struct cntr_entry *entry,
2779                                 void *context, int vl, int mode, u64 data)
2780 {
2781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782
2783         return dd->send_pio_err_status_cnt[28];
2784 }
2785
2786 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2787                                              void *context, int vl, int mode,
2788                                              u64 data)
2789 {
2790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791
2792         return dd->send_pio_err_status_cnt[27];
2793 }
2794
2795 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2796                                              void *context, int vl, int mode,
2797                                              u64 data)
2798 {
2799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800
2801         return dd->send_pio_err_status_cnt[26];
2802 }
2803
2804 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2805                                                 void *context, int vl,
2806                                                 int mode, u64 data)
2807 {
2808         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809
2810         return dd->send_pio_err_status_cnt[25];
2811 }
2812
2813 static u64 access_pio_block_qw_count_parity_err_cnt(
2814                                 const struct cntr_entry *entry,
2815                                 void *context, int vl, int mode, u64 data)
2816 {
2817         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818
2819         return dd->send_pio_err_status_cnt[24];
2820 }
2821
2822 static u64 access_pio_write_qw_valid_parity_err_cnt(
2823                                 const struct cntr_entry *entry,
2824                                 void *context, int vl, int mode, u64 data)
2825 {
2826         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827
2828         return dd->send_pio_err_status_cnt[23];
2829 }
2830
2831 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2832                                             void *context, int vl, int mode,
2833                                             u64 data)
2834 {
2835         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836
2837         return dd->send_pio_err_status_cnt[22];
2838 }
2839
2840 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2841                                                 void *context, int vl,
2842                                                 int mode, u64 data)
2843 {
2844         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845
2846         return dd->send_pio_err_status_cnt[21];
2847 }
2848
2849 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2850                                                 void *context, int vl,
2851                                                 int mode, u64 data)
2852 {
2853         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854
2855         return dd->send_pio_err_status_cnt[20];
2856 }
2857
2858 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2859                                                 void *context, int vl,
2860                                                 int mode, u64 data)
2861 {
2862         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863
2864         return dd->send_pio_err_status_cnt[19];
2865 }
2866
2867 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2868                                 const struct cntr_entry *entry,
2869                                 void *context, int vl, int mode, u64 data)
2870 {
2871         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872
2873         return dd->send_pio_err_status_cnt[18];
2874 }
2875
2876 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2877                                          void *context, int vl, int mode,
2878                                          u64 data)
2879 {
2880         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881
2882         return dd->send_pio_err_status_cnt[17];
2883 }
2884
2885 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2886                                             void *context, int vl, int mode,
2887                                             u64 data)
2888 {
2889         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890
2891         return dd->send_pio_err_status_cnt[16];
2892 }
2893
2894 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2895                                 const struct cntr_entry *entry,
2896                                 void *context, int vl, int mode, u64 data)
2897 {
2898         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899
2900         return dd->send_pio_err_status_cnt[15];
2901 }
2902
2903 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2904                                 const struct cntr_entry *entry,
2905                                 void *context, int vl, int mode, u64 data)
2906 {
2907         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908
2909         return dd->send_pio_err_status_cnt[14];
2910 }
2911
2912 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2913                                 const struct cntr_entry *entry,
2914                                 void *context, int vl, int mode, u64 data)
2915 {
2916         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917
2918         return dd->send_pio_err_status_cnt[13];
2919 }
2920
2921 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2922                                 const struct cntr_entry *entry,
2923                                 void *context, int vl, int mode, u64 data)
2924 {
2925         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926
2927         return dd->send_pio_err_status_cnt[12];
2928 }
2929
2930 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2931                                 const struct cntr_entry *entry,
2932                                 void *context, int vl, int mode, u64 data)
2933 {
2934         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935
2936         return dd->send_pio_err_status_cnt[11];
2937 }
2938
2939 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2940                                 const struct cntr_entry *entry,
2941                                 void *context, int vl, int mode, u64 data)
2942 {
2943         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944
2945         return dd->send_pio_err_status_cnt[10];
2946 }
2947
2948 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2949                                 const struct cntr_entry *entry,
2950                                 void *context, int vl, int mode, u64 data)
2951 {
2952         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953
2954         return dd->send_pio_err_status_cnt[9];
2955 }
2956
2957 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2958                                 const struct cntr_entry *entry,
2959                                 void *context, int vl, int mode, u64 data)
2960 {
2961         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962
2963         return dd->send_pio_err_status_cnt[8];
2964 }
2965
2966 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2967                                 const struct cntr_entry *entry,
2968                                 void *context, int vl, int mode, u64 data)
2969 {
2970         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971
2972         return dd->send_pio_err_status_cnt[7];
2973 }
2974
2975 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2976                                               void *context, int vl, int mode,
2977                                               u64 data)
2978 {
2979         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980
2981         return dd->send_pio_err_status_cnt[6];
2982 }
2983
2984 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2985                                               void *context, int vl, int mode,
2986                                               u64 data)
2987 {
2988         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989
2990         return dd->send_pio_err_status_cnt[5];
2991 }
2992
2993 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2994                                            void *context, int vl, int mode,
2995                                            u64 data)
2996 {
2997         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998
2999         return dd->send_pio_err_status_cnt[4];
3000 }
3001
3002 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3003                                            void *context, int vl, int mode,
3004                                            u64 data)
3005 {
3006         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007
3008         return dd->send_pio_err_status_cnt[3];
3009 }
3010
3011 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3012                                          void *context, int vl, int mode,
3013                                          u64 data)
3014 {
3015         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016
3017         return dd->send_pio_err_status_cnt[2];
3018 }
3019
3020 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3021                                                 void *context, int vl,
3022                                                 int mode, u64 data)
3023 {
3024         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025
3026         return dd->send_pio_err_status_cnt[1];
3027 }
3028
3029 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3030                                              void *context, int vl, int mode,
3031                                              u64 data)
3032 {
3033         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035         return dd->send_pio_err_status_cnt[0];
3036 }
3037
3038 /*
3039  * Software counters corresponding to each of the
3040  * error status bits within SendDmaErrStatus
3041  */
3042 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3043                                 const struct cntr_entry *entry,
3044                                 void *context, int vl, int mode, u64 data)
3045 {
3046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048         return dd->send_dma_err_status_cnt[3];
3049 }
3050
3051 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3052                                 const struct cntr_entry *entry,
3053                                 void *context, int vl, int mode, u64 data)
3054 {
3055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057         return dd->send_dma_err_status_cnt[2];
3058 }
3059
3060 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3061                                           void *context, int vl, int mode,
3062                                           u64 data)
3063 {
3064         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066         return dd->send_dma_err_status_cnt[1];
3067 }
3068
3069 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3070                                        void *context, int vl, int mode,
3071                                        u64 data)
3072 {
3073         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075         return dd->send_dma_err_status_cnt[0];
3076 }
3077
3078 /*
3079  * Software counters corresponding to each of the
3080  * error status bits within SendEgressErrStatus
3081  */
3082 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3083                                 const struct cntr_entry *entry,
3084                                 void *context, int vl, int mode, u64 data)
3085 {
3086         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087
3088         return dd->send_egress_err_status_cnt[63];
3089 }
3090
3091 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3092                                 const struct cntr_entry *entry,
3093                                 void *context, int vl, int mode, u64 data)
3094 {
3095         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096
3097         return dd->send_egress_err_status_cnt[62];
3098 }
3099
3100 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3101                                              void *context, int vl, int mode,
3102                                              u64 data)
3103 {
3104         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105
3106         return dd->send_egress_err_status_cnt[61];
3107 }
3108
3109 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3110                                                  void *context, int vl,
3111                                                  int mode, u64 data)
3112 {
3113         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3114
3115         return dd->send_egress_err_status_cnt[60];
3116 }
3117
3118 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3119                                 const struct cntr_entry *entry,
3120                                 void *context, int vl, int mode, u64 data)
3121 {
3122         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3123
3124         return dd->send_egress_err_status_cnt[59];
3125 }
3126
3127 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3128                                         void *context, int vl, int mode,
3129                                         u64 data)
3130 {
3131         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3132
3133         return dd->send_egress_err_status_cnt[58];
3134 }
3135
3136 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3137                                             void *context, int vl, int mode,
3138                                             u64 data)
3139 {
3140         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141
3142         return dd->send_egress_err_status_cnt[57];
3143 }
3144
3145 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3146                                               void *context, int vl, int mode,
3147                                               u64 data)
3148 {
3149         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150
3151         return dd->send_egress_err_status_cnt[56];
3152 }
3153
3154 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3155                                               void *context, int vl, int mode,
3156                                               u64 data)
3157 {
3158         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159
3160         return dd->send_egress_err_status_cnt[55];
3161 }
3162
3163 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3164                                               void *context, int vl, int mode,
3165                                               u64 data)
3166 {
3167         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168
3169         return dd->send_egress_err_status_cnt[54];
3170 }
3171
3172 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3173                                               void *context, int vl, int mode,
3174                                               u64 data)
3175 {
3176         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177
3178         return dd->send_egress_err_status_cnt[53];
3179 }
3180
3181 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3182                                               void *context, int vl, int mode,
3183                                               u64 data)
3184 {
3185         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186
3187         return dd->send_egress_err_status_cnt[52];
3188 }
3189
3190 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3191                                               void *context, int vl, int mode,
3192                                               u64 data)
3193 {
3194         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195
3196         return dd->send_egress_err_status_cnt[51];
3197 }
3198
3199 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3200                                               void *context, int vl, int mode,
3201                                               u64 data)
3202 {
3203         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204
3205         return dd->send_egress_err_status_cnt[50];
3206 }
3207
3208 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3209                                               void *context, int vl, int mode,
3210                                               u64 data)
3211 {
3212         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213
3214         return dd->send_egress_err_status_cnt[49];
3215 }
3216
3217 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3218                                               void *context, int vl, int mode,
3219                                               u64 data)
3220 {
3221         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222
3223         return dd->send_egress_err_status_cnt[48];
3224 }
3225
3226 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3227                                               void *context, int vl, int mode,
3228                                               u64 data)
3229 {
3230         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231
3232         return dd->send_egress_err_status_cnt[47];
3233 }
3234
3235 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3236                                             void *context, int vl, int mode,
3237                                             u64 data)
3238 {
3239         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240
3241         return dd->send_egress_err_status_cnt[46];
3242 }
3243
3244 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3245                                              void *context, int vl, int mode,
3246                                              u64 data)
3247 {
3248         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249
3250         return dd->send_egress_err_status_cnt[45];
3251 }
3252
3253 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3254                                                  void *context, int vl,
3255                                                  int mode, u64 data)
3256 {
3257         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258
3259         return dd->send_egress_err_status_cnt[44];
3260 }
3261
3262 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3263                                 const struct cntr_entry *entry,
3264                                 void *context, int vl, int mode, u64 data)
3265 {
3266         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267
3268         return dd->send_egress_err_status_cnt[43];
3269 }
3270
3271 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3272                                         void *context, int vl, int mode,
3273                                         u64 data)
3274 {
3275         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276
3277         return dd->send_egress_err_status_cnt[42];
3278 }
3279
3280 static u64 access_tx_credit_return_partiy_err_cnt(
3281                                 const struct cntr_entry *entry,
3282                                 void *context, int vl, int mode, u64 data)
3283 {
3284         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285
3286         return dd->send_egress_err_status_cnt[41];
3287 }
3288
3289 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3290                                 const struct cntr_entry *entry,
3291                                 void *context, int vl, int mode, u64 data)
3292 {
3293         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294
3295         return dd->send_egress_err_status_cnt[40];
3296 }
3297
3298 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3299                                 const struct cntr_entry *entry,
3300                                 void *context, int vl, int mode, u64 data)
3301 {
3302         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303
3304         return dd->send_egress_err_status_cnt[39];
3305 }
3306
3307 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3308                                 const struct cntr_entry *entry,
3309                                 void *context, int vl, int mode, u64 data)
3310 {
3311         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312
3313         return dd->send_egress_err_status_cnt[38];
3314 }
3315
3316 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3317                                 const struct cntr_entry *entry,
3318                                 void *context, int vl, int mode, u64 data)
3319 {
3320         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321
3322         return dd->send_egress_err_status_cnt[37];
3323 }
3324
3325 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3326                                 const struct cntr_entry *entry,
3327                                 void *context, int vl, int mode, u64 data)
3328 {
3329         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330
3331         return dd->send_egress_err_status_cnt[36];
3332 }
3333
3334 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3335                                 const struct cntr_entry *entry,
3336                                 void *context, int vl, int mode, u64 data)
3337 {
3338         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339
3340         return dd->send_egress_err_status_cnt[35];
3341 }
3342
3343 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3344                                 const struct cntr_entry *entry,
3345                                 void *context, int vl, int mode, u64 data)
3346 {
3347         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348
3349         return dd->send_egress_err_status_cnt[34];
3350 }
3351
3352 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3353                                 const struct cntr_entry *entry,
3354                                 void *context, int vl, int mode, u64 data)
3355 {
3356         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357
3358         return dd->send_egress_err_status_cnt[33];
3359 }
3360
3361 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3362                                 const struct cntr_entry *entry,
3363                                 void *context, int vl, int mode, u64 data)
3364 {
3365         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366
3367         return dd->send_egress_err_status_cnt[32];
3368 }
3369
3370 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3371                                 const struct cntr_entry *entry,
3372                                 void *context, int vl, int mode, u64 data)
3373 {
3374         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375
3376         return dd->send_egress_err_status_cnt[31];
3377 }
3378
3379 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3380                                 const struct cntr_entry *entry,
3381                                 void *context, int vl, int mode, u64 data)
3382 {
3383         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384
3385         return dd->send_egress_err_status_cnt[30];
3386 }
3387
3388 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3389                                 const struct cntr_entry *entry,
3390                                 void *context, int vl, int mode, u64 data)
3391 {
3392         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393
3394         return dd->send_egress_err_status_cnt[29];
3395 }
3396
3397 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3398                                 const struct cntr_entry *entry,
3399                                 void *context, int vl, int mode, u64 data)
3400 {
3401         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402
3403         return dd->send_egress_err_status_cnt[28];
3404 }
3405
3406 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3407                                 const struct cntr_entry *entry,
3408                                 void *context, int vl, int mode, u64 data)
3409 {
3410         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411
3412         return dd->send_egress_err_status_cnt[27];
3413 }
3414
3415 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3416                                 const struct cntr_entry *entry,
3417                                 void *context, int vl, int mode, u64 data)
3418 {
3419         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420
3421         return dd->send_egress_err_status_cnt[26];
3422 }
3423
3424 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3425                                 const struct cntr_entry *entry,
3426                                 void *context, int vl, int mode, u64 data)
3427 {
3428         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429
3430         return dd->send_egress_err_status_cnt[25];
3431 }
3432
3433 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3434                                 const struct cntr_entry *entry,
3435                                 void *context, int vl, int mode, u64 data)
3436 {
3437         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438
3439         return dd->send_egress_err_status_cnt[24];
3440 }
3441
3442 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3443                                 const struct cntr_entry *entry,
3444                                 void *context, int vl, int mode, u64 data)
3445 {
3446         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447
3448         return dd->send_egress_err_status_cnt[23];
3449 }
3450
3451 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3452                                 const struct cntr_entry *entry,
3453                                 void *context, int vl, int mode, u64 data)
3454 {
3455         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456
3457         return dd->send_egress_err_status_cnt[22];
3458 }
3459
3460 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3461                                 const struct cntr_entry *entry,
3462                                 void *context, int vl, int mode, u64 data)
3463 {
3464         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465
3466         return dd->send_egress_err_status_cnt[21];
3467 }
3468
3469 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3470                                 const struct cntr_entry *entry,
3471                                 void *context, int vl, int mode, u64 data)
3472 {
3473         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474
3475         return dd->send_egress_err_status_cnt[20];
3476 }
3477
3478 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3479                                 const struct cntr_entry *entry,
3480                                 void *context, int vl, int mode, u64 data)
3481 {
3482         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483
3484         return dd->send_egress_err_status_cnt[19];
3485 }
3486
3487 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3488                                 const struct cntr_entry *entry,
3489                                 void *context, int vl, int mode, u64 data)
3490 {
3491         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492
3493         return dd->send_egress_err_status_cnt[18];
3494 }
3495
3496 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3497                                 const struct cntr_entry *entry,
3498                                 void *context, int vl, int mode, u64 data)
3499 {
3500         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501
3502         return dd->send_egress_err_status_cnt[17];
3503 }
3504
3505 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3506                                 const struct cntr_entry *entry,
3507                                 void *context, int vl, int mode, u64 data)
3508 {
3509         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510
3511         return dd->send_egress_err_status_cnt[16];
3512 }
3513
3514 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3515                                            void *context, int vl, int mode,
3516                                            u64 data)
3517 {
3518         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519
3520         return dd->send_egress_err_status_cnt[15];
3521 }
3522
3523 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3524                                                  void *context, int vl,
3525                                                  int mode, u64 data)
3526 {
3527         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528
3529         return dd->send_egress_err_status_cnt[14];
3530 }
3531
3532 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3533                                                void *context, int vl, int mode,
3534                                                u64 data)
3535 {
3536         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537
3538         return dd->send_egress_err_status_cnt[13];
3539 }
3540
3541 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3542                                         void *context, int vl, int mode,
3543                                         u64 data)
3544 {
3545         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546
3547         return dd->send_egress_err_status_cnt[12];
3548 }
3549
3550 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3551                                 const struct cntr_entry *entry,
3552                                 void *context, int vl, int mode, u64 data)
3553 {
3554         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555
3556         return dd->send_egress_err_status_cnt[11];
3557 }
3558
3559 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3560                                              void *context, int vl, int mode,
3561                                              u64 data)
3562 {
3563         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564
3565         return dd->send_egress_err_status_cnt[10];
3566 }
3567
3568 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3569                                             void *context, int vl, int mode,
3570                                             u64 data)
3571 {
3572         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573
3574         return dd->send_egress_err_status_cnt[9];
3575 }
3576
3577 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3578                                 const struct cntr_entry *entry,
3579                                 void *context, int vl, int mode, u64 data)
3580 {
3581         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582
3583         return dd->send_egress_err_status_cnt[8];
3584 }
3585
3586 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3587                                 const struct cntr_entry *entry,
3588                                 void *context, int vl, int mode, u64 data)
3589 {
3590         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591
3592         return dd->send_egress_err_status_cnt[7];
3593 }
3594
3595 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3596                                             void *context, int vl, int mode,
3597                                             u64 data)
3598 {
3599         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600
3601         return dd->send_egress_err_status_cnt[6];
3602 }
3603
3604 static u64 access_tx_incorrect_link_state_err_cnt(
3605                                 const struct cntr_entry *entry,
3606                                 void *context, int vl, int mode, u64 data)
3607 {
3608         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609
3610         return dd->send_egress_err_status_cnt[5];
3611 }
3612
3613 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3614                                       void *context, int vl, int mode,
3615                                       u64 data)
3616 {
3617         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618
3619         return dd->send_egress_err_status_cnt[4];
3620 }
3621
3622 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3623                                 const struct cntr_entry *entry,
3624                                 void *context, int vl, int mode, u64 data)
3625 {
3626         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627
3628         return dd->send_egress_err_status_cnt[3];
3629 }
3630
3631 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3632                                             void *context, int vl, int mode,
3633                                             u64 data)
3634 {
3635         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636
3637         return dd->send_egress_err_status_cnt[2];
3638 }
3639
3640 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3641                                 const struct cntr_entry *entry,
3642                                 void *context, int vl, int mode, u64 data)
3643 {
3644         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645
3646         return dd->send_egress_err_status_cnt[1];
3647 }
3648
3649 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3650                                 const struct cntr_entry *entry,
3651                                 void *context, int vl, int mode, u64 data)
3652 {
3653         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655         return dd->send_egress_err_status_cnt[0];
3656 }
3657
3658 /*
3659  * Software counters corresponding to each of the
3660  * error status bits within SendErrStatus
3661  */
3662 static u64 access_send_csr_write_bad_addr_err_cnt(
3663                                 const struct cntr_entry *entry,
3664                                 void *context, int vl, int mode, u64 data)
3665 {
3666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667
3668         return dd->send_err_status_cnt[2];
3669 }
3670
3671 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3672                                                  void *context, int vl,
3673                                                  int mode, u64 data)
3674 {
3675         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676
3677         return dd->send_err_status_cnt[1];
3678 }
3679
3680 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3681                                       void *context, int vl, int mode,
3682                                       u64 data)
3683 {
3684         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686         return dd->send_err_status_cnt[0];
3687 }
3688
3689 /*
3690  * Software counters corresponding to each of the
3691  * error status bits within SendCtxtErrStatus
3692  */
3693 static u64 access_pio_write_out_of_bounds_err_cnt(
3694                                 const struct cntr_entry *entry,
3695                                 void *context, int vl, int mode, u64 data)
3696 {
3697         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699         return dd->sw_ctxt_err_status_cnt[4];
3700 }
3701
3702 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3703                                              void *context, int vl, int mode,
3704                                              u64 data)
3705 {
3706         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708         return dd->sw_ctxt_err_status_cnt[3];
3709 }
3710
3711 static u64 access_pio_write_crosses_boundary_err_cnt(
3712                                 const struct cntr_entry *entry,
3713                                 void *context, int vl, int mode, u64 data)
3714 {
3715         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717         return dd->sw_ctxt_err_status_cnt[2];
3718 }
3719
3720 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3721                                                 void *context, int vl,
3722                                                 int mode, u64 data)
3723 {
3724         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726         return dd->sw_ctxt_err_status_cnt[1];
3727 }
3728
3729 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3730                                                void *context, int vl, int mode,
3731                                                u64 data)
3732 {
3733         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735         return dd->sw_ctxt_err_status_cnt[0];
3736 }
3737
3738 /*
3739  * Software counters corresponding to each of the
3740  * error status bits within SendDmaEngErrStatus
3741  */
3742 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3743                                 const struct cntr_entry *entry,
3744                                 void *context, int vl, int mode, u64 data)
3745 {
3746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747
3748         return dd->sw_send_dma_eng_err_status_cnt[23];
3749 }
3750
3751 static u64 access_sdma_header_storage_cor_err_cnt(
3752                                 const struct cntr_entry *entry,
3753                                 void *context, int vl, int mode, u64 data)
3754 {
3755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756
3757         return dd->sw_send_dma_eng_err_status_cnt[22];
3758 }
3759
3760 static u64 access_sdma_packet_tracking_cor_err_cnt(
3761                                 const struct cntr_entry *entry,
3762                                 void *context, int vl, int mode, u64 data)
3763 {
3764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3765
3766         return dd->sw_send_dma_eng_err_status_cnt[21];
3767 }
3768
3769 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3770                                             void *context, int vl, int mode,
3771                                             u64 data)
3772 {
3773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3774
3775         return dd->sw_send_dma_eng_err_status_cnt[20];
3776 }
3777
3778 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3779                                               void *context, int vl, int mode,
3780                                               u64 data)
3781 {
3782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3783
3784         return dd->sw_send_dma_eng_err_status_cnt[19];
3785 }
3786
3787 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3788                                 const struct cntr_entry *entry,
3789                                 void *context, int vl, int mode, u64 data)
3790 {
3791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792
3793         return dd->sw_send_dma_eng_err_status_cnt[18];
3794 }
3795
3796 static u64 access_sdma_header_storage_unc_err_cnt(
3797                                 const struct cntr_entry *entry,
3798                                 void *context, int vl, int mode, u64 data)
3799 {
3800         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801
3802         return dd->sw_send_dma_eng_err_status_cnt[17];
3803 }
3804
3805 static u64 access_sdma_packet_tracking_unc_err_cnt(
3806                                 const struct cntr_entry *entry,
3807                                 void *context, int vl, int mode, u64 data)
3808 {
3809         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810
3811         return dd->sw_send_dma_eng_err_status_cnt[16];
3812 }
3813
3814 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3815                                             void *context, int vl, int mode,
3816                                             u64 data)
3817 {
3818         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819
3820         return dd->sw_send_dma_eng_err_status_cnt[15];
3821 }
3822
3823 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3824                                               void *context, int vl, int mode,
3825                                               u64 data)
3826 {
3827         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828
3829         return dd->sw_send_dma_eng_err_status_cnt[14];
3830 }
3831
3832 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3833                                        void *context, int vl, int mode,
3834                                        u64 data)
3835 {
3836         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837
3838         return dd->sw_send_dma_eng_err_status_cnt[13];
3839 }
3840
3841 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3842                                              void *context, int vl, int mode,
3843                                              u64 data)
3844 {
3845         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846
3847         return dd->sw_send_dma_eng_err_status_cnt[12];
3848 }
3849
3850 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3851                                               void *context, int vl, int mode,
3852                                               u64 data)
3853 {
3854         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855
3856         return dd->sw_send_dma_eng_err_status_cnt[11];
3857 }
3858
3859 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3860                                              void *context, int vl, int mode,
3861                                              u64 data)
3862 {
3863         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864
3865         return dd->sw_send_dma_eng_err_status_cnt[10];
3866 }
3867
3868 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3869                                           void *context, int vl, int mode,
3870                                           u64 data)
3871 {
3872         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874         return dd->sw_send_dma_eng_err_status_cnt[9];
3875 }
3876
3877 static u64 access_sdma_packet_desc_overflow_err_cnt(
3878                                 const struct cntr_entry *entry,
3879                                 void *context, int vl, int mode, u64 data)
3880 {
3881         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883         return dd->sw_send_dma_eng_err_status_cnt[8];
3884 }
3885
3886 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3887                                                void *context, int vl,
3888                                                int mode, u64 data)
3889 {
3890         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892         return dd->sw_send_dma_eng_err_status_cnt[7];
3893 }
3894
3895 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3896                                     void *context, int vl, int mode, u64 data)
3897 {
3898         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899
3900         return dd->sw_send_dma_eng_err_status_cnt[6];
3901 }
3902
3903 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3904                                         void *context, int vl, int mode,
3905                                         u64 data)
3906 {
3907         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908
3909         return dd->sw_send_dma_eng_err_status_cnt[5];
3910 }
3911
3912 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3913                                           void *context, int vl, int mode,
3914                                           u64 data)
3915 {
3916         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917
3918         return dd->sw_send_dma_eng_err_status_cnt[4];
3919 }
3920
3921 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3922                                 const struct cntr_entry *entry,
3923                                 void *context, int vl, int mode, u64 data)
3924 {
3925         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926
3927         return dd->sw_send_dma_eng_err_status_cnt[3];
3928 }
3929
3930 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3931                                         void *context, int vl, int mode,
3932                                         u64 data)
3933 {
3934         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935
3936         return dd->sw_send_dma_eng_err_status_cnt[2];
3937 }
3938
3939 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3940                                             void *context, int vl, int mode,
3941                                             u64 data)
3942 {
3943         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944
3945         return dd->sw_send_dma_eng_err_status_cnt[1];
3946 }
3947
3948 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3949                                         void *context, int vl, int mode,
3950                                         u64 data)
3951 {
3952         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953
3954         return dd->sw_send_dma_eng_err_status_cnt[0];
3955 }
3956
3957 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3958                                  void *context, int vl, int mode,
3959                                  u64 data)
3960 {
3961         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962
3963         u64 val = 0;
3964         u64 csr = entry->csr;
3965
3966         val = read_write_csr(dd, csr, mode, data);
3967         if (mode == CNTR_MODE_R) {
3968                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3969                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3970         } else if (mode == CNTR_MODE_W) {
3971                 dd->sw_rcv_bypass_packet_errors = 0;
3972         } else {
3973                 dd_dev_err(dd, "Invalid cntr register access mode");
3974                 return 0;
3975         }
3976         return val;
3977 }
3978
3979 #define def_access_sw_cpu(cntr) \
3980 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
3981                               void *context, int vl, int mode, u64 data)      \
3982 {                                                                             \
3983         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3984         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
3985                               ppd->ibport_data.rvp.cntr, vl,                  \
3986                               mode, data);                                    \
3987 }
3988
3989 def_access_sw_cpu(rc_acks);
3990 def_access_sw_cpu(rc_qacks);
3991 def_access_sw_cpu(rc_delayed_comp);
3992
3993 #define def_access_ibp_counter(cntr) \
3994 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
3995                                 void *context, int vl, int mode, u64 data)    \
3996 {                                                                             \
3997         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3998                                                                               \
3999         if (vl != CNTR_INVALID_VL)                                            \
4000                 return 0;                                                     \
4001                                                                               \
4002         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4003                              mode, data);                                     \
4004 }
4005
4006 def_access_ibp_counter(loop_pkts);
4007 def_access_ibp_counter(rc_resends);
4008 def_access_ibp_counter(rnr_naks);
4009 def_access_ibp_counter(other_naks);
4010 def_access_ibp_counter(rc_timeouts);
4011 def_access_ibp_counter(pkt_drops);
4012 def_access_ibp_counter(dmawait);
4013 def_access_ibp_counter(rc_seqnak);
4014 def_access_ibp_counter(rc_dupreq);
4015 def_access_ibp_counter(rdma_seq);
4016 def_access_ibp_counter(unaligned);
4017 def_access_ibp_counter(seq_naks);
4018
4019 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4020 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4021 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4022                         CNTR_NORMAL),
4023 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4024                         CNTR_NORMAL),
4025 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4026                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4027                         CNTR_NORMAL),
4028 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4029                         CNTR_NORMAL),
4030 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4031                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4032 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4033                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4034 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4035                         CNTR_NORMAL),
4036 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4037                         CNTR_NORMAL),
4038 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4039                         CNTR_NORMAL),
4040 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4041                         CNTR_NORMAL),
4042 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4043                         CNTR_NORMAL),
4044 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4045                         CNTR_NORMAL),
4046 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4047                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4048 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4049                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4050 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4051                               CNTR_SYNTH),
4052 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4053                             access_dc_rcv_err_cnt),
4054 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4055                                  CNTR_SYNTH),
4056 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4057                                   CNTR_SYNTH),
4058 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4059                                   CNTR_SYNTH),
4060 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4061                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4062 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4063                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4064                                   CNTR_SYNTH),
4065 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4066                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4067 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4068                                CNTR_SYNTH),
4069 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4070                               CNTR_SYNTH),
4071 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4072                                CNTR_SYNTH),
4073 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4074                                  CNTR_SYNTH),
4075 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4076                                 CNTR_SYNTH),
4077 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4078                                 CNTR_SYNTH),
4079 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4080                                CNTR_SYNTH),
4081 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4082                                  CNTR_SYNTH | CNTR_VL),
4083 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4084                                 CNTR_SYNTH | CNTR_VL),
4085 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4086 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4087                                  CNTR_SYNTH | CNTR_VL),
4088 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4089 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4090                                  CNTR_SYNTH | CNTR_VL),
4091 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4092                               CNTR_SYNTH),
4093 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4094                                  CNTR_SYNTH | CNTR_VL),
4095 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4096                                 CNTR_SYNTH),
4097 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4098                                    CNTR_SYNTH | CNTR_VL),
4099 [C_DC_TOTAL_CRC] =
4100         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4101                          CNTR_SYNTH),
4102 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4103                                   CNTR_SYNTH),
4104 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4105                                   CNTR_SYNTH),
4106 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4107                                   CNTR_SYNTH),
4108 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4109                                   CNTR_SYNTH),
4110 [C_DC_CRC_MULT_LN] =
4111         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4112                          CNTR_SYNTH),
4113 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4114                                     CNTR_SYNTH),
4115 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4116                                     CNTR_SYNTH),
4117 [C_DC_SEQ_CRC_CNT] =
4118         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4119                          CNTR_SYNTH),
4120 [C_DC_ESC0_ONLY_CNT] =
4121         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4122                          CNTR_SYNTH),
4123 [C_DC_ESC0_PLUS1_CNT] =
4124         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4125                          CNTR_SYNTH),
4126 [C_DC_ESC0_PLUS2_CNT] =
4127         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4128                          CNTR_SYNTH),
4129 [C_DC_REINIT_FROM_PEER_CNT] =
4130         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4131                          CNTR_SYNTH),
4132 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4133                                   CNTR_SYNTH),
4134 [C_DC_MISC_FLG_CNT] =
4135         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4136                          CNTR_SYNTH),
4137 [C_DC_PRF_GOOD_LTP_CNT] =
4138         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4139 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4140         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4141                          CNTR_SYNTH),
4142 [C_DC_PRF_RX_FLIT_CNT] =
4143         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4144 [C_DC_PRF_TX_FLIT_CNT] =
4145         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4146 [C_DC_PRF_CLK_CNTR] =
4147         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4148 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4149         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4150 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4151         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4152                          CNTR_SYNTH),
4153 [C_DC_PG_STS_TX_SBE_CNT] =
4154         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4155 [C_DC_PG_STS_TX_MBE_CNT] =
4156         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4157                          CNTR_SYNTH),
4158 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4159                             access_sw_cpu_intr),
4160 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4161                             access_sw_cpu_rcv_limit),
4162 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4163                             access_sw_vtx_wait),
4164 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4165                             access_sw_pio_wait),
4166 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4167                             access_sw_pio_drain),
4168 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4169                             access_sw_kmem_wait),
4170 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4171                             access_sw_send_schedule),
4172 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4173                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4174                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175                                       dev_access_u32_csr),
4176 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4177                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178                              access_sde_int_cnt),
4179 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4180                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181                              access_sde_err_cnt),
4182 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4183                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184                                   access_sde_idle_int_cnt),
4185 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4186                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4187                                       access_sde_progress_int_cnt),
4188 /* MISC_ERR_STATUS */
4189 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4190                                 CNTR_NORMAL,
4191                                 access_misc_pll_lock_fail_err_cnt),
4192 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4193                                 CNTR_NORMAL,
4194                                 access_misc_mbist_fail_err_cnt),
4195 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4196                                 CNTR_NORMAL,
4197                                 access_misc_invalid_eep_cmd_err_cnt),
4198 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4199                                 CNTR_NORMAL,
4200                                 access_misc_efuse_done_parity_err_cnt),
4201 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4202                                 CNTR_NORMAL,
4203                                 access_misc_efuse_write_err_cnt),
4204 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4205                                 0, CNTR_NORMAL,
4206                                 access_misc_efuse_read_bad_addr_err_cnt),
4207 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4208                                 CNTR_NORMAL,
4209                                 access_misc_efuse_csr_parity_err_cnt),
4210 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4211                                 CNTR_NORMAL,
4212                                 access_misc_fw_auth_failed_err_cnt),
4213 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4214                                 CNTR_NORMAL,
4215                                 access_misc_key_mismatch_err_cnt),
4216 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4217                                 CNTR_NORMAL,
4218                                 access_misc_sbus_write_failed_err_cnt),
4219 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4220                                 CNTR_NORMAL,
4221                                 access_misc_csr_write_bad_addr_err_cnt),
4222 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4223                                 CNTR_NORMAL,
4224                                 access_misc_csr_read_bad_addr_err_cnt),
4225 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4226                                 CNTR_NORMAL,
4227                                 access_misc_csr_parity_err_cnt),
4228 /* CceErrStatus */
4229 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4230                                 CNTR_NORMAL,
4231                                 access_sw_cce_err_status_aggregated_cnt),
4232 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4233                                 CNTR_NORMAL,
4234                                 access_cce_msix_csr_parity_err_cnt),
4235 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4236                                 CNTR_NORMAL,
4237                                 access_cce_int_map_unc_err_cnt),
4238 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4239                                 CNTR_NORMAL,
4240                                 access_cce_int_map_cor_err_cnt),
4241 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4242                                 CNTR_NORMAL,
4243                                 access_cce_msix_table_unc_err_cnt),
4244 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4245                                 CNTR_NORMAL,
4246                                 access_cce_msix_table_cor_err_cnt),
4247 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4248                                 0, CNTR_NORMAL,
4249                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4250 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4251                                 0, CNTR_NORMAL,
4252                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4253 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4254                                 CNTR_NORMAL,
4255                                 access_cce_seg_write_bad_addr_err_cnt),
4256 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4257                                 CNTR_NORMAL,
4258                                 access_cce_seg_read_bad_addr_err_cnt),
4259 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4260                                 CNTR_NORMAL,
4261                                 access_la_triggered_cnt),
4262 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4263                                 CNTR_NORMAL,
4264                                 access_cce_trgt_cpl_timeout_err_cnt),
4265 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4266                                 CNTR_NORMAL,
4267                                 access_pcic_receive_parity_err_cnt),
4268 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4269                                 CNTR_NORMAL,
4270                                 access_pcic_transmit_back_parity_err_cnt),
4271 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4272                                 0, CNTR_NORMAL,
4273                                 access_pcic_transmit_front_parity_err_cnt),
4274 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4275                                 CNTR_NORMAL,
4276                                 access_pcic_cpl_dat_q_unc_err_cnt),
4277 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4278                                 CNTR_NORMAL,
4279                                 access_pcic_cpl_hd_q_unc_err_cnt),
4280 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4281                                 CNTR_NORMAL,
4282                                 access_pcic_post_dat_q_unc_err_cnt),
4283 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_pcic_post_hd_q_unc_err_cnt),
4286 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4287                                 CNTR_NORMAL,
4288                                 access_pcic_retry_sot_mem_unc_err_cnt),
4289 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4290                                 CNTR_NORMAL,
4291                                 access_pcic_retry_mem_unc_err),
4292 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4293                                 CNTR_NORMAL,
4294                                 access_pcic_n_post_dat_q_parity_err_cnt),
4295 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4296                                 CNTR_NORMAL,
4297                                 access_pcic_n_post_h_q_parity_err_cnt),
4298 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_pcic_cpl_dat_q_cor_err_cnt),
4301 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_pcic_cpl_hd_q_cor_err_cnt),
4304 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_pcic_post_dat_q_cor_err_cnt),
4307 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4308                                 CNTR_NORMAL,
4309                                 access_pcic_post_hd_q_cor_err_cnt),
4310 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_pcic_retry_sot_mem_cor_err_cnt),
4313 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_pcic_retry_mem_cor_err_cnt),
4316 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4317                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4318                                 CNTR_NORMAL,
4319                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4320 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4321                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4322                                 CNTR_NORMAL,
4323                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4324                                 ),
4325 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4326                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4327                         CNTR_NORMAL,
4328                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4329 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4330                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4331                         CNTR_NORMAL,
4332                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4333 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4334                         0, CNTR_NORMAL,
4335                         access_cce_cli2_async_fifo_parity_err_cnt),
4336 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4337                         CNTR_NORMAL,
4338                         access_cce_csr_cfg_bus_parity_err_cnt),
4339 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4340                         0, CNTR_NORMAL,
4341                         access_cce_cli0_async_fifo_parity_err_cnt),
4342 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4343                         CNTR_NORMAL,
4344                         access_cce_rspd_data_parity_err_cnt),
4345 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4346                         CNTR_NORMAL,
4347                         access_cce_trgt_access_err_cnt),
4348 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4349                         0, CNTR_NORMAL,
4350                         access_cce_trgt_async_fifo_parity_err_cnt),
4351 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4352                         CNTR_NORMAL,
4353                         access_cce_csr_write_bad_addr_err_cnt),
4354 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4355                         CNTR_NORMAL,
4356                         access_cce_csr_read_bad_addr_err_cnt),
4357 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4358                         CNTR_NORMAL,
4359                         access_ccs_csr_parity_err_cnt),
4360
4361 /* RcvErrStatus */
4362 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4363                         CNTR_NORMAL,
4364                         access_rx_csr_parity_err_cnt),
4365 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4366                         CNTR_NORMAL,
4367                         access_rx_csr_write_bad_addr_err_cnt),
4368 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4369                         CNTR_NORMAL,
4370                         access_rx_csr_read_bad_addr_err_cnt),
4371 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4372                         CNTR_NORMAL,
4373                         access_rx_dma_csr_unc_err_cnt),
4374 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4375                         CNTR_NORMAL,
4376                         access_rx_dma_dq_fsm_encoding_err_cnt),
4377 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4378                         CNTR_NORMAL,
4379                         access_rx_dma_eq_fsm_encoding_err_cnt),
4380 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4381                         CNTR_NORMAL,
4382                         access_rx_dma_csr_parity_err_cnt),
4383 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4384                         CNTR_NORMAL,
4385                         access_rx_rbuf_data_cor_err_cnt),
4386 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4387                         CNTR_NORMAL,
4388                         access_rx_rbuf_data_unc_err_cnt),
4389 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4390                         CNTR_NORMAL,
4391                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4392 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4393                         CNTR_NORMAL,
4394                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4395 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4396                         CNTR_NORMAL,
4397                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4398 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4399                         CNTR_NORMAL,
4400                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4401 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4402                         CNTR_NORMAL,
4403                         access_rx_rbuf_desc_part2_cor_err_cnt),
4404 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4405                         CNTR_NORMAL,
4406                         access_rx_rbuf_desc_part2_unc_err_cnt),
4407 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4408                         CNTR_NORMAL,
4409                         access_rx_rbuf_desc_part1_cor_err_cnt),
4410 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4411                         CNTR_NORMAL,
4412                         access_rx_rbuf_desc_part1_unc_err_cnt),
4413 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4414                         CNTR_NORMAL,
4415                         access_rx_hq_intr_fsm_err_cnt),
4416 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4417                         CNTR_NORMAL,
4418                         access_rx_hq_intr_csr_parity_err_cnt),
4419 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4420                         CNTR_NORMAL,
4421                         access_rx_lookup_csr_parity_err_cnt),
4422 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4423                         CNTR_NORMAL,
4424                         access_rx_lookup_rcv_array_cor_err_cnt),
4425 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4426                         CNTR_NORMAL,
4427                         access_rx_lookup_rcv_array_unc_err_cnt),
4428 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4429                         0, CNTR_NORMAL,
4430                         access_rx_lookup_des_part2_parity_err_cnt),
4431 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4432                         0, CNTR_NORMAL,
4433                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4434 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4435                         CNTR_NORMAL,
4436                         access_rx_lookup_des_part1_unc_err_cnt),
4437 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4438                         CNTR_NORMAL,
4439                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4440 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4441                         CNTR_NORMAL,
4442                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4443 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4444                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4445                         CNTR_NORMAL,
4446                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4447 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4448                         0, CNTR_NORMAL,
4449                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4450 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4451                         0, CNTR_NORMAL,
4452                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4453 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4454                         CNTR_NORMAL,
4455                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4456 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4457                         CNTR_NORMAL,
4458                         access_rx_rbuf_empty_err_cnt),
4459 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4460                         CNTR_NORMAL,
4461                         access_rx_rbuf_full_err_cnt),
4462 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4463                         CNTR_NORMAL,
4464                         access_rbuf_bad_lookup_err_cnt),
4465 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rbuf_ctx_id_parity_err_cnt),
4468 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rbuf_csr_qeopdw_parity_err_cnt),
4471 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4472                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4473                         CNTR_NORMAL,
4474                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4475 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4476                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4477                         CNTR_NORMAL,
4478                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4479 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4480                         0, CNTR_NORMAL,
4481                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4482 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4483                         0, CNTR_NORMAL,
4484                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4485 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4486                         0, 0, CNTR_NORMAL,
4487                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4488 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4489                         0, CNTR_NORMAL,
4490                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4491 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4492                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4495 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4496                         0, CNTR_NORMAL,
4497                         access_rx_rbuf_block_list_read_cor_err_cnt),
4498 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4499                         0, CNTR_NORMAL,
4500                         access_rx_rbuf_block_list_read_unc_err_cnt),
4501 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_rbuf_lookup_des_cor_err_cnt),
4504 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_rbuf_lookup_des_unc_err_cnt),
4507 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4508                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4509                         CNTR_NORMAL,
4510                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4511 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4512                         CNTR_NORMAL,
4513                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4514 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4515                         CNTR_NORMAL,
4516                         access_rx_rbuf_free_list_cor_err_cnt),
4517 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4518                         CNTR_NORMAL,
4519                         access_rx_rbuf_free_list_unc_err_cnt),
4520 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4521                         CNTR_NORMAL,
4522                         access_rx_rcv_fsm_encoding_err_cnt),
4523 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4524                         CNTR_NORMAL,
4525                         access_rx_dma_flag_cor_err_cnt),
4526 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4527                         CNTR_NORMAL,
4528                         access_rx_dma_flag_unc_err_cnt),
4529 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4530                         CNTR_NORMAL,
4531                         access_rx_dc_sop_eop_parity_err_cnt),
4532 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4533                         CNTR_NORMAL,
4534                         access_rx_rcv_csr_parity_err_cnt),
4535 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4536                         CNTR_NORMAL,
4537                         access_rx_rcv_qp_map_table_cor_err_cnt),
4538 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4539                         CNTR_NORMAL,
4540                         access_rx_rcv_qp_map_table_unc_err_cnt),
4541 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4542                         CNTR_NORMAL,
4543                         access_rx_rcv_data_cor_err_cnt),
4544 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4545                         CNTR_NORMAL,
4546                         access_rx_rcv_data_unc_err_cnt),
4547 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rx_rcv_hdr_cor_err_cnt),
4550 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4551                         CNTR_NORMAL,
4552                         access_rx_rcv_hdr_unc_err_cnt),
4553 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4554                         CNTR_NORMAL,
4555                         access_rx_dc_intf_parity_err_cnt),
4556 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rx_dma_csr_cor_err_cnt),
4559 /* SendPioErrStatus */
4560 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4561                         CNTR_NORMAL,
4562                         access_pio_pec_sop_head_parity_err_cnt),
4563 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4564                         CNTR_NORMAL,
4565                         access_pio_pcc_sop_head_parity_err_cnt),
4566 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4567                         0, 0, CNTR_NORMAL,
4568                         access_pio_last_returned_cnt_parity_err_cnt),
4569 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4570                         0, CNTR_NORMAL,
4571                         access_pio_current_free_cnt_parity_err_cnt),
4572 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4573                         CNTR_NORMAL,
4574                         access_pio_reserved_31_err_cnt),
4575 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4576                         CNTR_NORMAL,
4577                         access_pio_reserved_30_err_cnt),
4578 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4579                         CNTR_NORMAL,
4580                         access_pio_ppmc_sop_len_err_cnt),
4581 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4582                         CNTR_NORMAL,
4583                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4584 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4585                         CNTR_NORMAL,
4586                         access_pio_vl_fifo_parity_err_cnt),
4587 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4588                         CNTR_NORMAL,
4589                         access_pio_vlf_sop_parity_err_cnt),
4590 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4591                         CNTR_NORMAL,
4592                         access_pio_vlf_v1_len_parity_err_cnt),
4593 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4594                         CNTR_NORMAL,
4595                         access_pio_block_qw_count_parity_err_cnt),
4596 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4597                         CNTR_NORMAL,
4598                         access_pio_write_qw_valid_parity_err_cnt),
4599 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_pio_state_machine_err_cnt),
4602 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_pio_write_data_parity_err_cnt),
4605 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4606                         CNTR_NORMAL,
4607                         access_pio_host_addr_mem_cor_err_cnt),
4608 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_pio_host_addr_mem_unc_err_cnt),
4611 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4614 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_pio_init_sm_in_err_cnt),
4617 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_pio_ppmc_pbl_fifo_err_cnt),
4620 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4621                         0, CNTR_NORMAL,
4622                         access_pio_credit_ret_fifo_parity_err_cnt),
4623 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4626 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4629 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4632 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4635 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_pio_sm_pkt_reset_parity_err_cnt),
4638 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_pio_pkt_evict_fifo_parity_err_cnt),
4641 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4642                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4643                         CNTR_NORMAL,
4644                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4645 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4646                         CNTR_NORMAL,
4647                         access_pio_sbrdctl_crrel_parity_err_cnt),
4648 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4649                         CNTR_NORMAL,
4650                         access_pio_pec_fifo_parity_err_cnt),
4651 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4652                         CNTR_NORMAL,
4653                         access_pio_pcc_fifo_parity_err_cnt),
4654 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4655                         CNTR_NORMAL,
4656                         access_pio_sb_mem_fifo1_err_cnt),
4657 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4658                         CNTR_NORMAL,
4659                         access_pio_sb_mem_fifo0_err_cnt),
4660 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4661                         CNTR_NORMAL,
4662                         access_pio_csr_parity_err_cnt),
4663 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_write_addr_parity_err_cnt),
4666 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_write_bad_ctxt_err_cnt),
4669 /* SendDmaErrStatus */
4670 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4671                         0, CNTR_NORMAL,
4672                         access_sdma_pcie_req_tracking_cor_err_cnt),
4673 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4674                         0, CNTR_NORMAL,
4675                         access_sdma_pcie_req_tracking_unc_err_cnt),
4676 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4677                         CNTR_NORMAL,
4678                         access_sdma_csr_parity_err_cnt),
4679 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4680                         CNTR_NORMAL,
4681                         access_sdma_rpy_tag_err_cnt),
4682 /* SendEgressErrStatus */
4683 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4684                         CNTR_NORMAL,
4685                         access_tx_read_pio_memory_csr_unc_err_cnt),
4686 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4687                         0, CNTR_NORMAL,
4688                         access_tx_read_sdma_memory_csr_err_cnt),
4689 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4690                         CNTR_NORMAL,
4691                         access_tx_egress_fifo_cor_err_cnt),
4692 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4693                         CNTR_NORMAL,
4694                         access_tx_read_pio_memory_cor_err_cnt),
4695 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4696                         CNTR_NORMAL,
4697                         access_tx_read_sdma_memory_cor_err_cnt),
4698 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4699                         CNTR_NORMAL,
4700                         access_tx_sb_hdr_cor_err_cnt),
4701 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4702                         CNTR_NORMAL,
4703                         access_tx_credit_overrun_err_cnt),
4704 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4705                         CNTR_NORMAL,
4706                         access_tx_launch_fifo8_cor_err_cnt),
4707 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4708                         CNTR_NORMAL,
4709                         access_tx_launch_fifo7_cor_err_cnt),
4710 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4711                         CNTR_NORMAL,
4712                         access_tx_launch_fifo6_cor_err_cnt),
4713 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4714                         CNTR_NORMAL,
4715                         access_tx_launch_fifo5_cor_err_cnt),
4716 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4717                         CNTR_NORMAL,
4718                         access_tx_launch_fifo4_cor_err_cnt),
4719 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4720                         CNTR_NORMAL,
4721                         access_tx_launch_fifo3_cor_err_cnt),
4722 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4723                         CNTR_NORMAL,
4724                         access_tx_launch_fifo2_cor_err_cnt),
4725 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4726                         CNTR_NORMAL,
4727                         access_tx_launch_fifo1_cor_err_cnt),
4728 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4729                         CNTR_NORMAL,
4730                         access_tx_launch_fifo0_cor_err_cnt),
4731 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4732                         CNTR_NORMAL,
4733                         access_tx_credit_return_vl_err_cnt),
4734 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4735                         CNTR_NORMAL,
4736                         access_tx_hcrc_insertion_err_cnt),
4737 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4738                         CNTR_NORMAL,
4739                         access_tx_egress_fifo_unc_err_cnt),
4740 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4741                         CNTR_NORMAL,
4742                         access_tx_read_pio_memory_unc_err_cnt),
4743 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4744                         CNTR_NORMAL,
4745                         access_tx_read_sdma_memory_unc_err_cnt),
4746 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4747                         CNTR_NORMAL,
4748                         access_tx_sb_hdr_unc_err_cnt),
4749 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4750                         CNTR_NORMAL,
4751                         access_tx_credit_return_partiy_err_cnt),
4752 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4753                         0, 0, CNTR_NORMAL,
4754                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4755 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4756                         0, 0, CNTR_NORMAL,
4757                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4758 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4759                         0, 0, CNTR_NORMAL,
4760                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4761 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4762                         0, 0, CNTR_NORMAL,
4763                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4764 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4765                         0, 0, CNTR_NORMAL,
4766                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4767 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4768                         0, 0, CNTR_NORMAL,
4769                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4770 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4771                         0, 0, CNTR_NORMAL,
4772                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4773 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4774                         0, 0, CNTR_NORMAL,
4775                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4776 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4777                         0, 0, CNTR_NORMAL,
4778                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4779 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4780                         0, 0, CNTR_NORMAL,
4781                         access_tx_sdma15_disallowed_packet_err_cnt),
4782 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4783                         0, 0, CNTR_NORMAL,
4784                         access_tx_sdma14_disallowed_packet_err_cnt),
4785 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4786                         0, 0, CNTR_NORMAL,
4787                         access_tx_sdma13_disallowed_packet_err_cnt),
4788 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4789                         0, 0, CNTR_NORMAL,
4790                         access_tx_sdma12_disallowed_packet_err_cnt),
4791 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4792                         0, 0, CNTR_NORMAL,
4793                         access_tx_sdma11_disallowed_packet_err_cnt),
4794 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4795                         0, 0, CNTR_NORMAL,
4796                         access_tx_sdma10_disallowed_packet_err_cnt),
4797 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4798                         0, 0, CNTR_NORMAL,
4799                         access_tx_sdma9_disallowed_packet_err_cnt),
4800 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4801                         0, 0, CNTR_NORMAL,
4802                         access_tx_sdma8_disallowed_packet_err_cnt),
4803 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4804                         0, 0, CNTR_NORMAL,
4805                         access_tx_sdma7_disallowed_packet_err_cnt),
4806 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4807                         0, 0, CNTR_NORMAL,
4808                         access_tx_sdma6_disallowed_packet_err_cnt),
4809 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4810                         0, 0, CNTR_NORMAL,
4811                         access_tx_sdma5_disallowed_packet_err_cnt),
4812 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4813                         0, 0, CNTR_NORMAL,
4814                         access_tx_sdma4_disallowed_packet_err_cnt),
4815 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4816                         0, 0, CNTR_NORMAL,
4817                         access_tx_sdma3_disallowed_packet_err_cnt),
4818 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4819                         0, 0, CNTR_NORMAL,
4820                         access_tx_sdma2_disallowed_packet_err_cnt),
4821 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4822                         0, 0, CNTR_NORMAL,
4823                         access_tx_sdma1_disallowed_packet_err_cnt),
4824 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4825                         0, 0, CNTR_NORMAL,
4826                         access_tx_sdma0_disallowed_packet_err_cnt),
4827 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4828                         CNTR_NORMAL,
4829                         access_tx_config_parity_err_cnt),
4830 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4831                         CNTR_NORMAL,
4832                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4833 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4834                         CNTR_NORMAL,
4835                         access_tx_launch_csr_parity_err_cnt),
4836 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4837                         CNTR_NORMAL,
4838                         access_tx_illegal_vl_err_cnt),
4839 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4840                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4843 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4844                         CNTR_NORMAL,
4845                         access_egress_reserved_10_err_cnt),
4846 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4847                         CNTR_NORMAL,
4848                         access_egress_reserved_9_err_cnt),
4849 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4850                         0, 0, CNTR_NORMAL,
4851                         access_tx_sdma_launch_intf_parity_err_cnt),
4852 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4853                         CNTR_NORMAL,
4854                         access_tx_pio_launch_intf_parity_err_cnt),
4855 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4856                         CNTR_NORMAL,
4857                         access_egress_reserved_6_err_cnt),
4858 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4859                         CNTR_NORMAL,
4860                         access_tx_incorrect_link_state_err_cnt),
4861 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4862                         CNTR_NORMAL,
4863                         access_tx_linkdown_err_cnt),
4864 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4865                         "EgressFifoUnderrunOrParityErr", 0, 0,
4866                         CNTR_NORMAL,
4867                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4868 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4869                         CNTR_NORMAL,
4870                         access_egress_reserved_2_err_cnt),
4871 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4872                         CNTR_NORMAL,
4873                         access_tx_pkt_integrity_mem_unc_err_cnt),
4874 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4875                         CNTR_NORMAL,
4876                         access_tx_pkt_integrity_mem_cor_err_cnt),
4877 /* SendErrStatus */
4878 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4879                         CNTR_NORMAL,
4880                         access_send_csr_write_bad_addr_err_cnt),
4881 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4882                         CNTR_NORMAL,
4883                         access_send_csr_read_bad_addr_err_cnt),
4884 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4885                         CNTR_NORMAL,
4886                         access_send_csr_parity_cnt),
4887 /* SendCtxtErrStatus */
4888 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4889                         CNTR_NORMAL,
4890                         access_pio_write_out_of_bounds_err_cnt),
4891 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4892                         CNTR_NORMAL,
4893                         access_pio_write_overflow_err_cnt),
4894 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_pio_write_crosses_boundary_err_cnt),
4897 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4898                         CNTR_NORMAL,
4899                         access_pio_disallowed_packet_err_cnt),
4900 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4901                         CNTR_NORMAL,
4902                         access_pio_inconsistent_sop_err_cnt),
4903 /* SendDmaEngErrStatus */
4904 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4905                         0, 0, CNTR_NORMAL,
4906                         access_sdma_header_request_fifo_cor_err_cnt),
4907 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4908                         CNTR_NORMAL,
4909                         access_sdma_header_storage_cor_err_cnt),
4910 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4911                         CNTR_NORMAL,
4912                         access_sdma_packet_tracking_cor_err_cnt),
4913 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4914                         CNTR_NORMAL,
4915                         access_sdma_assembly_cor_err_cnt),
4916 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4917                         CNTR_NORMAL,
4918                         access_sdma_desc_table_cor_err_cnt),
4919 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4920                         0, 0, CNTR_NORMAL,
4921                         access_sdma_header_request_fifo_unc_err_cnt),
4922 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4923                         CNTR_NORMAL,
4924                         access_sdma_header_storage_unc_err_cnt),
4925 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4926                         CNTR_NORMAL,
4927                         access_sdma_packet_tracking_unc_err_cnt),
4928 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4929                         CNTR_NORMAL,
4930                         access_sdma_assembly_unc_err_cnt),
4931 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_sdma_desc_table_unc_err_cnt),
4934 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_sdma_timeout_err_cnt),
4937 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4938                         CNTR_NORMAL,
4939                         access_sdma_header_length_err_cnt),
4940 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4941                         CNTR_NORMAL,
4942                         access_sdma_header_address_err_cnt),
4943 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_sdma_header_select_err_cnt),
4946 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_sdma_reserved_9_err_cnt),
4949 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_sdma_packet_desc_overflow_err_cnt),
4952 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4953                         CNTR_NORMAL,
4954                         access_sdma_length_mismatch_err_cnt),
4955 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4956                         CNTR_NORMAL,
4957                         access_sdma_halt_err_cnt),
4958 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4959                         CNTR_NORMAL,
4960                         access_sdma_mem_read_err_cnt),
4961 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4962                         CNTR_NORMAL,
4963                         access_sdma_first_desc_err_cnt),
4964 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4965                         CNTR_NORMAL,
4966                         access_sdma_tail_out_of_bounds_err_cnt),
4967 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4968                         CNTR_NORMAL,
4969                         access_sdma_too_long_err_cnt),
4970 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4971                         CNTR_NORMAL,
4972                         access_sdma_gen_mismatch_err_cnt),
4973 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4974                         CNTR_NORMAL,
4975                         access_sdma_wrong_dw_err_cnt),
4976 };
4977
4978 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4979 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4980                         CNTR_NORMAL),
4981 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4982                         CNTR_NORMAL),
4983 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4984                         CNTR_NORMAL),
4985 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4986                         CNTR_NORMAL),
4987 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4988                         CNTR_NORMAL),
4989 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4990                         CNTR_NORMAL),
4991 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4992                         CNTR_NORMAL),
4993 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4994 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4995 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4996 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4997                                       CNTR_SYNTH | CNTR_VL),
4998 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4999                                      CNTR_SYNTH | CNTR_VL),
5000 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5001                                       CNTR_SYNTH | CNTR_VL),
5002 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5003 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5004 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5005                              access_sw_link_dn_cnt),
5006 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5007                            access_sw_link_up_cnt),
5008 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5009                                  access_sw_unknown_frame_cnt),
5010 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5011                              access_sw_xmit_discards),
5012 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5013                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5014                                 access_sw_xmit_discards),
5015 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5016                                  access_xmit_constraint_errs),
5017 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5018                                 access_rcv_constraint_errs),
5019 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5020 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5021 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5022 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5023 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5024 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5025 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5026 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5027 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5028 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5029 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5030 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5031 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5032                                access_sw_cpu_rc_acks),
5033 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5034                                 access_sw_cpu_rc_qacks),
5035 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5036                                        access_sw_cpu_rc_delayed_comp),
5037 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5038 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5039 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5040 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5041 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5042 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5043 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5044 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5045 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5046 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5047 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5048 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5049 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5050 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5051 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5052 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5053 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5054 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5055 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5056 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5057 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5058 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5059 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5060 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5061 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5062 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5063 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5064 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5065 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5066 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5067 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5068 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5069 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5070 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5071 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5072 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5073 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5074 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5075 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5076 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5077 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5078 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5079 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5080 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5081 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5082 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5083 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5084 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5085 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5086 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5087 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5088 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5089 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5090 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5091 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5092 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5093 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5094 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5095 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5096 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5097 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5098 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5099 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5100 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5101 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5102 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5103 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5104 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5105 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5106 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5107 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5108 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5109 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5110 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5111 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5112 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5113 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5114 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5115 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5116 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5117 };
5118
5119 /* ======================================================================== */
5120
5121 /* return true if this is chip revision revision a */
5122 int is_ax(struct hfi1_devdata *dd)
5123 {
5124         u8 chip_rev_minor =
5125                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5126                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5127         return (chip_rev_minor & 0xf0) == 0;
5128 }
5129
5130 /* return true if this is chip revision revision b */
5131 int is_bx(struct hfi1_devdata *dd)
5132 {
5133         u8 chip_rev_minor =
5134                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5135                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5136         return (chip_rev_minor & 0xF0) == 0x10;
5137 }
5138
5139 /*
5140  * Append string s to buffer buf.  Arguments curp and len are the current
5141  * position and remaining length, respectively.
5142  *
5143  * return 0 on success, 1 on out of room
5144  */
5145 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5146 {
5147         char *p = *curp;
5148         int len = *lenp;
5149         int result = 0; /* success */
5150         char c;
5151
5152         /* add a comma, if first in the buffer */
5153         if (p != buf) {
5154                 if (len == 0) {
5155                         result = 1; /* out of room */
5156                         goto done;
5157                 }
5158                 *p++ = ',';
5159                 len--;
5160         }
5161
5162         /* copy the string */
5163         while ((c = *s++) != 0) {
5164                 if (len == 0) {
5165                         result = 1; /* out of room */
5166                         goto done;
5167                 }
5168                 *p++ = c;
5169                 len--;
5170         }
5171
5172 done:
5173         /* write return values */
5174         *curp = p;
5175         *lenp = len;
5176
5177         return result;
5178 }
5179
5180 /*
5181  * Using the given flag table, print a comma separated string into
5182  * the buffer.  End in '*' if the buffer is too short.
5183  */
5184 static char *flag_string(char *buf, int buf_len, u64 flags,
5185                          struct flag_table *table, int table_size)
5186 {
5187         char extra[32];
5188         char *p = buf;
5189         int len = buf_len;
5190         int no_room = 0;
5191         int i;
5192
5193         /* make sure there is at least 2 so we can form "*" */
5194         if (len < 2)
5195                 return "";
5196
5197         len--;  /* leave room for a nul */
5198         for (i = 0; i < table_size; i++) {
5199                 if (flags & table[i].flag) {
5200                         no_room = append_str(buf, &p, &len, table[i].str);
5201                         if (no_room)
5202                                 break;
5203                         flags &= ~table[i].flag;
5204                 }
5205         }
5206
5207         /* any undocumented bits left? */
5208         if (!no_room && flags) {
5209                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5210                 no_room = append_str(buf, &p, &len, extra);
5211         }
5212
5213         /* add * if ran out of room */
5214         if (no_room) {
5215                 /* may need to back up to add space for a '*' */
5216                 if (len == 0)
5217                         --p;
5218                 *p++ = '*';
5219         }
5220
5221         /* add final nul - space already allocated above */
5222         *p = 0;
5223         return buf;
5224 }
5225
5226 /* first 8 CCE error interrupt source names */
5227 static const char * const cce_misc_names[] = {
5228         "CceErrInt",            /* 0 */
5229         "RxeErrInt",            /* 1 */
5230         "MiscErrInt",           /* 2 */
5231         "Reserved3",            /* 3 */
5232         "PioErrInt",            /* 4 */
5233         "SDmaErrInt",           /* 5 */
5234         "EgressErrInt",         /* 6 */
5235         "TxeErrInt"             /* 7 */
5236 };
5237
5238 /*
5239  * Return the miscellaneous error interrupt name.
5240  */
5241 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5242 {
5243         if (source < ARRAY_SIZE(cce_misc_names))
5244                 strncpy(buf, cce_misc_names[source], bsize);
5245         else
5246                 snprintf(buf, bsize, "Reserved%u",
5247                          source + IS_GENERAL_ERR_START);
5248
5249         return buf;
5250 }
5251
5252 /*
5253  * Return the SDMA engine error interrupt name.
5254  */
5255 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5256 {
5257         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5258         return buf;
5259 }
5260
5261 /*
5262  * Return the send context error interrupt name.
5263  */
5264 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5265 {
5266         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5267         return buf;
5268 }
5269
5270 static const char * const various_names[] = {
5271         "PbcInt",
5272         "GpioAssertInt",
5273         "Qsfp1Int",
5274         "Qsfp2Int",
5275         "TCritInt"
5276 };
5277
5278 /*
5279  * Return the various interrupt name.
5280  */
5281 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5282 {
5283         if (source < ARRAY_SIZE(various_names))
5284                 strncpy(buf, various_names[source], bsize);
5285         else
5286                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5287         return buf;
5288 }
5289
5290 /*
5291  * Return the DC interrupt name.
5292  */
5293 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5294 {
5295         static const char * const dc_int_names[] = {
5296                 "common",
5297                 "lcb",
5298                 "8051",
5299                 "lbm"   /* local block merge */
5300         };
5301
5302         if (source < ARRAY_SIZE(dc_int_names))
5303                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5304         else
5305                 snprintf(buf, bsize, "DCInt%u", source);
5306         return buf;
5307 }
5308
5309 static const char * const sdma_int_names[] = {
5310         "SDmaInt",
5311         "SdmaIdleInt",
5312         "SdmaProgressInt",
5313 };
5314
5315 /*
5316  * Return the SDMA engine interrupt name.
5317  */
5318 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5319 {
5320         /* what interrupt */
5321         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5322         /* which engine */
5323         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5324
5325         if (likely(what < 3))
5326                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5327         else
5328                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5329         return buf;
5330 }
5331
5332 /*
5333  * Return the receive available interrupt name.
5334  */
5335 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5336 {
5337         snprintf(buf, bsize, "RcvAvailInt%u", source);
5338         return buf;
5339 }
5340
5341 /*
5342  * Return the receive urgent interrupt name.
5343  */
5344 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5345 {
5346         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5347         return buf;
5348 }
5349
5350 /*
5351  * Return the send credit interrupt name.
5352  */
5353 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5354 {
5355         snprintf(buf, bsize, "SendCreditInt%u", source);
5356         return buf;
5357 }
5358
5359 /*
5360  * Return the reserved interrupt name.
5361  */
5362 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5363 {
5364         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5365         return buf;
5366 }
5367
5368 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5369 {
5370         return flag_string(buf, buf_len, flags,
5371                            cce_err_status_flags,
5372                            ARRAY_SIZE(cce_err_status_flags));
5373 }
5374
5375 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5376 {
5377         return flag_string(buf, buf_len, flags,
5378                            rxe_err_status_flags,
5379                            ARRAY_SIZE(rxe_err_status_flags));
5380 }
5381
5382 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5383 {
5384         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5385                            ARRAY_SIZE(misc_err_status_flags));
5386 }
5387
5388 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5389 {
5390         return flag_string(buf, buf_len, flags,
5391                            pio_err_status_flags,
5392                            ARRAY_SIZE(pio_err_status_flags));
5393 }
5394
5395 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5396 {
5397         return flag_string(buf, buf_len, flags,
5398                            sdma_err_status_flags,
5399                            ARRAY_SIZE(sdma_err_status_flags));
5400 }
5401
5402 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5403 {
5404         return flag_string(buf, buf_len, flags,
5405                            egress_err_status_flags,
5406                            ARRAY_SIZE(egress_err_status_flags));
5407 }
5408
5409 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5410 {
5411         return flag_string(buf, buf_len, flags,
5412                            egress_err_info_flags,
5413                            ARRAY_SIZE(egress_err_info_flags));
5414 }
5415
5416 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5417 {
5418         return flag_string(buf, buf_len, flags,
5419                            send_err_status_flags,
5420                            ARRAY_SIZE(send_err_status_flags));
5421 }
5422
5423 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5424 {
5425         char buf[96];
5426         int i = 0;
5427
5428         /*
5429          * For most these errors, there is nothing that can be done except
5430          * report or record it.
5431          */
5432         dd_dev_info(dd, "CCE Error: %s\n",
5433                     cce_err_status_string(buf, sizeof(buf), reg));
5434
5435         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5436             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5437                 /* this error requires a manual drop into SPC freeze mode */
5438                 /* then a fix up */
5439                 start_freeze_handling(dd->pport, FREEZE_SELF);
5440         }
5441
5442         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5443                 if (reg & (1ull << i)) {
5444                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5445                         /* maintain a counter over all cce_err_status errors */
5446                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5447                 }
5448         }
5449 }
5450
5451 /*
5452  * Check counters for receive errors that do not have an interrupt
5453  * associated with them.
5454  */
5455 #define RCVERR_CHECK_TIME 10
5456 static void update_rcverr_timer(unsigned long opaque)
5457 {
5458         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5459         struct hfi1_pportdata *ppd = dd->pport;
5460         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5461
5462         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5463             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5464                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5465                 set_link_down_reason(
5466                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5467                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5468                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5469         }
5470         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5471
5472         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5473 }
5474
5475 static int init_rcverr(struct hfi1_devdata *dd)
5476 {
5477         setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5478         /* Assume the hardware counter has been reset */
5479         dd->rcv_ovfl_cnt = 0;
5480         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5481 }
5482
5483 static void free_rcverr(struct hfi1_devdata *dd)
5484 {
5485         if (dd->rcverr_timer.data)
5486                 del_timer_sync(&dd->rcverr_timer);
5487         dd->rcverr_timer.data = 0;
5488 }
5489
5490 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5491 {
5492         char buf[96];
5493         int i = 0;
5494
5495         dd_dev_info(dd, "Receive Error: %s\n",
5496                     rxe_err_status_string(buf, sizeof(buf), reg));
5497
5498         if (reg & ALL_RXE_FREEZE_ERR) {
5499                 int flags = 0;
5500
5501                 /*
5502                  * Freeze mode recovery is disabled for the errors
5503                  * in RXE_FREEZE_ABORT_MASK
5504                  */
5505                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5506                         flags = FREEZE_ABORT;
5507
5508                 start_freeze_handling(dd->pport, flags);
5509         }
5510
5511         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5512                 if (reg & (1ull << i))
5513                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5514         }
5515 }
5516
5517 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518 {
5519         char buf[96];
5520         int i = 0;
5521
5522         dd_dev_info(dd, "Misc Error: %s",
5523                     misc_err_status_string(buf, sizeof(buf), reg));
5524         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5525                 if (reg & (1ull << i))
5526                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5527         }
5528 }
5529
5530 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5531 {
5532         char buf[96];
5533         int i = 0;
5534
5535         dd_dev_info(dd, "PIO Error: %s\n",
5536                     pio_err_status_string(buf, sizeof(buf), reg));
5537
5538         if (reg & ALL_PIO_FREEZE_ERR)
5539                 start_freeze_handling(dd->pport, 0);
5540
5541         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5542                 if (reg & (1ull << i))
5543                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5544         }
5545 }
5546
5547 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5548 {
5549         char buf[96];
5550         int i = 0;
5551
5552         dd_dev_info(dd, "SDMA Error: %s\n",
5553                     sdma_err_status_string(buf, sizeof(buf), reg));
5554
5555         if (reg & ALL_SDMA_FREEZE_ERR)
5556                 start_freeze_handling(dd->pport, 0);
5557
5558         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5559                 if (reg & (1ull << i))
5560                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5561         }
5562 }
5563
5564 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5565 {
5566         incr_cntr64(&ppd->port_xmit_discards);
5567 }
5568
5569 static void count_port_inactive(struct hfi1_devdata *dd)
5570 {
5571         __count_port_discards(dd->pport);
5572 }
5573
5574 /*
5575  * We have had a "disallowed packet" error during egress. Determine the
5576  * integrity check which failed, and update relevant error counter, etc.
5577  *
5578  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5579  * bit of state per integrity check, and so we can miss the reason for an
5580  * egress error if more than one packet fails the same integrity check
5581  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5582  */
5583 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5584                                         int vl)
5585 {
5586         struct hfi1_pportdata *ppd = dd->pport;
5587         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5588         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5589         char buf[96];
5590
5591         /* clear down all observed info as quickly as possible after read */
5592         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5593
5594         dd_dev_info(dd,
5595                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5596                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5597
5598         /* Eventually add other counters for each bit */
5599         if (info & PORT_DISCARD_EGRESS_ERRS) {
5600                 int weight, i;
5601
5602                 /*
5603                  * Count all applicable bits as individual errors and
5604                  * attribute them to the packet that triggered this handler.
5605                  * This may not be completely accurate due to limitations
5606                  * on the available hardware error information.  There is
5607                  * a single information register and any number of error
5608                  * packets may have occurred and contributed to it before
5609                  * this routine is called.  This means that:
5610                  * a) If multiple packets with the same error occur before
5611                  *    this routine is called, earlier packets are missed.
5612                  *    There is only a single bit for each error type.
5613                  * b) Errors may not be attributed to the correct VL.
5614                  *    The driver is attributing all bits in the info register
5615                  *    to the packet that triggered this call, but bits
5616                  *    could be an accumulation of different packets with
5617                  *    different VLs.
5618                  * c) A single error packet may have multiple counts attached
5619                  *    to it.  There is no way for the driver to know if
5620                  *    multiple bits set in the info register are due to a
5621                  *    single packet or multiple packets.  The driver assumes
5622                  *    multiple packets.
5623                  */
5624                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5625                 for (i = 0; i < weight; i++) {
5626                         __count_port_discards(ppd);
5627                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5628                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5629                         else if (vl == 15)
5630                                 incr_cntr64(&ppd->port_xmit_discards_vl
5631                                             [C_VL_15]);
5632                 }
5633         }
5634 }
5635
5636 /*
5637  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5638  * register. Does it represent a 'port inactive' error?
5639  */
5640 static inline int port_inactive_err(u64 posn)
5641 {
5642         return (posn >= SEES(TX_LINKDOWN) &&
5643                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5644 }
5645
5646 /*
5647  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5648  * register. Does it represent a 'disallowed packet' error?
5649  */
5650 static inline int disallowed_pkt_err(int posn)
5651 {
5652         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5653                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5654 }
5655
5656 /*
5657  * Input value is a bit position of one of the SDMA engine disallowed
5658  * packet errors.  Return which engine.  Use of this must be guarded by
5659  * disallowed_pkt_err().
5660  */
5661 static inline int disallowed_pkt_engine(int posn)
5662 {
5663         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5664 }
5665
5666 /*
5667  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5668  * be done.
5669  */
5670 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5671 {
5672         struct sdma_vl_map *m;
5673         int vl;
5674
5675         /* range check */
5676         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5677                 return -1;
5678
5679         rcu_read_lock();
5680         m = rcu_dereference(dd->sdma_map);
5681         vl = m->engine_to_vl[engine];
5682         rcu_read_unlock();
5683
5684         return vl;
5685 }
5686
5687 /*
5688  * Translate the send context (sofware index) into a VL.  Return -1 if the
5689  * translation cannot be done.
5690  */
5691 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5692 {
5693         struct send_context_info *sci;
5694         struct send_context *sc;
5695         int i;
5696
5697         sci = &dd->send_contexts[sw_index];
5698
5699         /* there is no information for user (PSM) and ack contexts */
5700         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5701                 return -1;
5702
5703         sc = sci->sc;
5704         if (!sc)
5705                 return -1;
5706         if (dd->vld[15].sc == sc)
5707                 return 15;
5708         for (i = 0; i < num_vls; i++)
5709                 if (dd->vld[i].sc == sc)
5710                         return i;
5711
5712         return -1;
5713 }
5714
5715 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5716 {
5717         u64 reg_copy = reg, handled = 0;
5718         char buf[96];
5719         int i = 0;
5720
5721         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5722                 start_freeze_handling(dd->pport, 0);
5723         else if (is_ax(dd) &&
5724                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5725                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5726                 start_freeze_handling(dd->pport, 0);
5727
5728         while (reg_copy) {
5729                 int posn = fls64(reg_copy);
5730                 /* fls64() returns a 1-based offset, we want it zero based */
5731                 int shift = posn - 1;
5732                 u64 mask = 1ULL << shift;
5733
5734                 if (port_inactive_err(shift)) {
5735                         count_port_inactive(dd);
5736                         handled |= mask;
5737                 } else if (disallowed_pkt_err(shift)) {
5738                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5739
5740                         handle_send_egress_err_info(dd, vl);
5741                         handled |= mask;
5742                 }
5743                 reg_copy &= ~mask;
5744         }
5745
5746         reg &= ~handled;
5747
5748         if (reg)
5749                 dd_dev_info(dd, "Egress Error: %s\n",
5750                             egress_err_status_string(buf, sizeof(buf), reg));
5751
5752         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5753                 if (reg & (1ull << i))
5754                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5755         }
5756 }
5757
5758 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5759 {
5760         char buf[96];
5761         int i = 0;
5762
5763         dd_dev_info(dd, "Send Error: %s\n",
5764                     send_err_status_string(buf, sizeof(buf), reg));
5765
5766         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5767                 if (reg & (1ull << i))
5768                         incr_cntr64(&dd->send_err_status_cnt[i]);
5769         }
5770 }
5771
5772 /*
5773  * The maximum number of times the error clear down will loop before
5774  * blocking a repeating error.  This value is arbitrary.
5775  */
5776 #define MAX_CLEAR_COUNT 20
5777
5778 /*
5779  * Clear and handle an error register.  All error interrupts are funneled
5780  * through here to have a central location to correctly handle single-
5781  * or multi-shot errors.
5782  *
5783  * For non per-context registers, call this routine with a context value
5784  * of 0 so the per-context offset is zero.
5785  *
5786  * If the handler loops too many times, assume that something is wrong
5787  * and can't be fixed, so mask the error bits.
5788  */
5789 static void interrupt_clear_down(struct hfi1_devdata *dd,
5790                                  u32 context,
5791                                  const struct err_reg_info *eri)
5792 {
5793         u64 reg;
5794         u32 count;
5795
5796         /* read in a loop until no more errors are seen */
5797         count = 0;
5798         while (1) {
5799                 reg = read_kctxt_csr(dd, context, eri->status);
5800                 if (reg == 0)
5801                         break;
5802                 write_kctxt_csr(dd, context, eri->clear, reg);
5803                 if (likely(eri->handler))
5804                         eri->handler(dd, context, reg);
5805                 count++;
5806                 if (count > MAX_CLEAR_COUNT) {
5807                         u64 mask;
5808
5809                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5810                                    eri->desc, reg);
5811                         /*
5812                          * Read-modify-write so any other masked bits
5813                          * remain masked.
5814                          */
5815                         mask = read_kctxt_csr(dd, context, eri->mask);
5816                         mask &= ~reg;
5817                         write_kctxt_csr(dd, context, eri->mask, mask);
5818                         break;
5819                 }
5820         }
5821 }
5822
5823 /*
5824  * CCE block "misc" interrupt.  Source is < 16.
5825  */
5826 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5827 {
5828         const struct err_reg_info *eri = &misc_errs[source];
5829
5830         if (eri->handler) {
5831                 interrupt_clear_down(dd, 0, eri);
5832         } else {
5833                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5834                            source);
5835         }
5836 }
5837
5838 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5839 {
5840         return flag_string(buf, buf_len, flags,
5841                            sc_err_status_flags,
5842                            ARRAY_SIZE(sc_err_status_flags));
5843 }
5844
5845 /*
5846  * Send context error interrupt.  Source (hw_context) is < 160.
5847  *
5848  * All send context errors cause the send context to halt.  The normal
5849  * clear-down mechanism cannot be used because we cannot clear the
5850  * error bits until several other long-running items are done first.
5851  * This is OK because with the context halted, nothing else is going
5852  * to happen on it anyway.
5853  */
5854 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5855                                 unsigned int hw_context)
5856 {
5857         struct send_context_info *sci;
5858         struct send_context *sc;
5859         char flags[96];
5860         u64 status;
5861         u32 sw_index;
5862         int i = 0;
5863
5864         sw_index = dd->hw_to_sw[hw_context];
5865         if (sw_index >= dd->num_send_contexts) {
5866                 dd_dev_err(dd,
5867                            "out of range sw index %u for send context %u\n",
5868                            sw_index, hw_context);
5869                 return;
5870         }
5871         sci = &dd->send_contexts[sw_index];
5872         sc = sci->sc;
5873         if (!sc) {
5874                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5875                            sw_index, hw_context);
5876                 return;
5877         }
5878
5879         /* tell the software that a halt has begun */
5880         sc_stop(sc, SCF_HALTED);
5881
5882         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5883
5884         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5885                     send_context_err_status_string(flags, sizeof(flags),
5886                                                    status));
5887
5888         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5889                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5890
5891         /*
5892          * Automatically restart halted kernel contexts out of interrupt
5893          * context.  User contexts must ask the driver to restart the context.
5894          */
5895         if (sc->type != SC_USER)
5896                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5897
5898         /*
5899          * Update the counters for the corresponding status bits.
5900          * Note that these particular counters are aggregated over all
5901          * 160 contexts.
5902          */
5903         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5904                 if (status & (1ull << i))
5905                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5906         }
5907 }
5908
5909 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5910                                 unsigned int source, u64 status)
5911 {
5912         struct sdma_engine *sde;
5913         int i = 0;
5914
5915         sde = &dd->per_sdma[source];
5916 #ifdef CONFIG_SDMA_VERBOSITY
5917         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5918                    slashstrip(__FILE__), __LINE__, __func__);
5919         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5920                    sde->this_idx, source, (unsigned long long)status);
5921 #endif
5922         sde->err_cnt++;
5923         sdma_engine_error(sde, status);
5924
5925         /*
5926         * Update the counters for the corresponding status bits.
5927         * Note that these particular counters are aggregated over
5928         * all 16 DMA engines.
5929         */
5930         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5931                 if (status & (1ull << i))
5932                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5933         }
5934 }
5935
5936 /*
5937  * CCE block SDMA error interrupt.  Source is < 16.
5938  */
5939 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5940 {
5941 #ifdef CONFIG_SDMA_VERBOSITY
5942         struct sdma_engine *sde = &dd->per_sdma[source];
5943
5944         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5945                    slashstrip(__FILE__), __LINE__, __func__);
5946         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5947                    source);
5948         sdma_dumpstate(sde);
5949 #endif
5950         interrupt_clear_down(dd, source, &sdma_eng_err);
5951 }
5952
5953 /*
5954  * CCE block "various" interrupt.  Source is < 8.
5955  */
5956 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5957 {
5958         const struct err_reg_info *eri = &various_err[source];
5959
5960         /*
5961          * TCritInt cannot go through interrupt_clear_down()
5962          * because it is not a second tier interrupt. The handler
5963          * should be called directly.
5964          */
5965         if (source == TCRIT_INT_SOURCE)
5966                 handle_temp_err(dd);
5967         else if (eri->handler)
5968                 interrupt_clear_down(dd, 0, eri);
5969         else
5970                 dd_dev_info(dd,
5971                             "%s: Unimplemented/reserved interrupt %d\n",
5972                             __func__, source);
5973 }
5974
5975 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5976 {
5977         /* src_ctx is always zero */
5978         struct hfi1_pportdata *ppd = dd->pport;
5979         unsigned long flags;
5980         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5981
5982         if (reg & QSFP_HFI0_MODPRST_N) {
5983                 if (!qsfp_mod_present(ppd)) {
5984                         dd_dev_info(dd, "%s: QSFP module removed\n",
5985                                     __func__);
5986
5987                         ppd->driver_link_ready = 0;
5988                         /*
5989                          * Cable removed, reset all our information about the
5990                          * cache and cable capabilities
5991                          */
5992
5993                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5994                         /*
5995                          * We don't set cache_refresh_required here as we expect
5996                          * an interrupt when a cable is inserted
5997                          */
5998                         ppd->qsfp_info.cache_valid = 0;
5999                         ppd->qsfp_info.reset_needed = 0;
6000                         ppd->qsfp_info.limiting_active = 0;
6001                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6002                                                flags);
6003                         /* Invert the ModPresent pin now to detect plug-in */
6004                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6006
6007                         if ((ppd->offline_disabled_reason >
6008                           HFI1_ODR_MASK(
6009                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6010                           (ppd->offline_disabled_reason ==
6011                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6012                                 ppd->offline_disabled_reason =
6013                                 HFI1_ODR_MASK(
6014                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6015
6016                         if (ppd->host_link_state == HLS_DN_POLL) {
6017                                 /*
6018                                  * The link is still in POLL. This means
6019                                  * that the normal link down processing
6020                                  * will not happen. We have to do it here
6021                                  * before turning the DC off.
6022                                  */
6023                                 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6024                         }
6025                 } else {
6026                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6027                                     __func__);
6028
6029                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6030                         ppd->qsfp_info.cache_valid = 0;
6031                         ppd->qsfp_info.cache_refresh_required = 1;
6032                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6033                                                flags);
6034
6035                         /*
6036                          * Stop inversion of ModPresent pin to detect
6037                          * removal of the cable
6038                          */
6039                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6040                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6041                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6042
6043                         ppd->offline_disabled_reason =
6044                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6045                 }
6046         }
6047
6048         if (reg & QSFP_HFI0_INT_N) {
6049                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6050                             __func__);
6051                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6052                 ppd->qsfp_info.check_interrupt_flags = 1;
6053                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6054         }
6055
6056         /* Schedule the QSFP work only if there is a cable attached. */
6057         if (qsfp_mod_present(ppd))
6058                 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6059 }
6060
6061 static int request_host_lcb_access(struct hfi1_devdata *dd)
6062 {
6063         int ret;
6064
6065         ret = do_8051_command(dd, HCMD_MISC,
6066                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6067                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6068         if (ret != HCMD_SUCCESS) {
6069                 dd_dev_err(dd, "%s: command failed with error %d\n",
6070                            __func__, ret);
6071         }
6072         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6073 }
6074
6075 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6076 {
6077         int ret;
6078
6079         ret = do_8051_command(dd, HCMD_MISC,
6080                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6081                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6082         if (ret != HCMD_SUCCESS) {
6083                 dd_dev_err(dd, "%s: command failed with error %d\n",
6084                            __func__, ret);
6085         }
6086         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6087 }
6088
6089 /*
6090  * Set the LCB selector - allow host access.  The DCC selector always
6091  * points to the host.
6092  */
6093 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6094 {
6095         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6096                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6097                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6098 }
6099
6100 /*
6101  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6102  * points to the host.
6103  */
6104 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6105 {
6106         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6107                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6108 }
6109
6110 /*
6111  * Acquire LCB access from the 8051.  If the host already has access,
6112  * just increment a counter.  Otherwise, inform the 8051 that the
6113  * host is taking access.
6114  *
6115  * Returns:
6116  *      0 on success
6117  *      -EBUSY if the 8051 has control and cannot be disturbed
6118  *      -errno if unable to acquire access from the 8051
6119  */
6120 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6121 {
6122         struct hfi1_pportdata *ppd = dd->pport;
6123         int ret = 0;
6124
6125         /*
6126          * Use the host link state lock so the operation of this routine
6127          * { link state check, selector change, count increment } can occur
6128          * as a unit against a link state change.  Otherwise there is a
6129          * race between the state change and the count increment.
6130          */
6131         if (sleep_ok) {
6132                 mutex_lock(&ppd->hls_lock);
6133         } else {
6134                 while (!mutex_trylock(&ppd->hls_lock))
6135                         udelay(1);
6136         }
6137
6138         /* this access is valid only when the link is up */
6139         if (ppd->host_link_state & HLS_DOWN) {
6140                 dd_dev_info(dd, "%s: link state %s not up\n",
6141                             __func__, link_state_name(ppd->host_link_state));
6142                 ret = -EBUSY;
6143                 goto done;
6144         }
6145
6146         if (dd->lcb_access_count == 0) {
6147                 ret = request_host_lcb_access(dd);
6148                 if (ret) {
6149                         dd_dev_err(dd,
6150                                    "%s: unable to acquire LCB access, err %d\n",
6151                                    __func__, ret);
6152                         goto done;
6153                 }
6154                 set_host_lcb_access(dd);
6155         }
6156         dd->lcb_access_count++;
6157 done:
6158         mutex_unlock(&ppd->hls_lock);
6159         return ret;
6160 }
6161
6162 /*
6163  * Release LCB access by decrementing the use count.  If the count is moving
6164  * from 1 to 0, inform 8051 that it has control back.
6165  *
6166  * Returns:
6167  *      0 on success
6168  *      -errno if unable to release access to the 8051
6169  */
6170 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6171 {
6172         int ret = 0;
6173
6174         /*
6175          * Use the host link state lock because the acquire needed it.
6176          * Here, we only need to keep { selector change, count decrement }
6177          * as a unit.
6178          */
6179         if (sleep_ok) {
6180                 mutex_lock(&dd->pport->hls_lock);
6181         } else {
6182                 while (!mutex_trylock(&dd->pport->hls_lock))
6183                         udelay(1);
6184         }
6185
6186         if (dd->lcb_access_count == 0) {
6187                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6188                            __func__);
6189                 goto done;
6190         }
6191
6192         if (dd->lcb_access_count == 1) {
6193                 set_8051_lcb_access(dd);
6194                 ret = request_8051_lcb_access(dd);
6195                 if (ret) {
6196                         dd_dev_err(dd,
6197                                    "%s: unable to release LCB access, err %d\n",
6198                                    __func__, ret);
6199                         /* restore host access if the grant didn't work */
6200                         set_host_lcb_access(dd);
6201                         goto done;
6202                 }
6203         }
6204         dd->lcb_access_count--;
6205 done:
6206         mutex_unlock(&dd->pport->hls_lock);
6207         return ret;
6208 }
6209
6210 /*
6211  * Initialize LCB access variables and state.  Called during driver load,
6212  * after most of the initialization is finished.
6213  *
6214  * The DC default is LCB access on for the host.  The driver defaults to
6215  * leaving access to the 8051.  Assign access now - this constrains the call
6216  * to this routine to be after all LCB set-up is done.  In particular, after
6217  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6218  */
6219 static void init_lcb_access(struct hfi1_devdata *dd)
6220 {
6221         dd->lcb_access_count = 0;
6222 }
6223
6224 /*
6225  * Write a response back to a 8051 request.
6226  */
6227 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6228 {
6229         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6230                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6231                   (u64)return_code <<
6232                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6233                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6234 }
6235
6236 /*
6237  * Handle host requests from the 8051.
6238  */
6239 static void handle_8051_request(struct hfi1_pportdata *ppd)
6240 {
6241         struct hfi1_devdata *dd = ppd->dd;
6242         u64 reg;
6243         u16 data = 0;
6244         u8 type;
6245
6246         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6247         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6248                 return; /* no request */
6249
6250         /* zero out COMPLETED so the response is seen */
6251         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6252
6253         /* extract request details */
6254         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6255                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6256         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6257                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6258
6259         switch (type) {
6260         case HREQ_LOAD_CONFIG:
6261         case HREQ_SAVE_CONFIG:
6262         case HREQ_READ_CONFIG:
6263         case HREQ_SET_TX_EQ_ABS:
6264         case HREQ_SET_TX_EQ_REL:
6265         case HREQ_ENABLE:
6266                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6267                             type);
6268                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6269                 break;
6270         case HREQ_CONFIG_DONE:
6271                 hreq_response(dd, HREQ_SUCCESS, 0);
6272                 break;
6273
6274         case HREQ_INTERFACE_TEST:
6275                 hreq_response(dd, HREQ_SUCCESS, data);
6276                 break;
6277         default:
6278                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6279                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6280                 break;
6281         }
6282 }
6283
6284 static void write_global_credit(struct hfi1_devdata *dd,
6285                                 u8 vau, u16 total, u16 shared)
6286 {
6287         write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6288                   ((u64)total <<
6289                    SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6290                   ((u64)shared <<
6291                    SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6292                   ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6293 }
6294
6295 /*
6296  * Set up initial VL15 credits of the remote.  Assumes the rest of
6297  * the CM credit registers are zero from a previous global or credit reset .
6298  */
6299 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6300 {
6301         /* leave shared count at zero for both global and VL15 */
6302         write_global_credit(dd, vau, vl15buf, 0);
6303
6304         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6305                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6306 }
6307
6308 /*
6309  * Zero all credit details from the previous connection and
6310  * reset the CM manager's internal counters.
6311  */
6312 void reset_link_credits(struct hfi1_devdata *dd)
6313 {
6314         int i;
6315
6316         /* remove all previous VL credit limits */
6317         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6318                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6319         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6320         write_global_credit(dd, 0, 0, 0);
6321         /* reset the CM block */
6322         pio_send_control(dd, PSC_CM_RESET);
6323 }
6324
6325 /* convert a vCU to a CU */
6326 static u32 vcu_to_cu(u8 vcu)
6327 {
6328         return 1 << vcu;
6329 }
6330
6331 /* convert a CU to a vCU */
6332 static u8 cu_to_vcu(u32 cu)
6333 {
6334         return ilog2(cu);
6335 }
6336
6337 /* convert a vAU to an AU */
6338 static u32 vau_to_au(u8 vau)
6339 {
6340         return 8 * (1 << vau);
6341 }
6342
6343 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6344 {
6345         ppd->sm_trap_qp = 0x0;
6346         ppd->sa_qp = 0x1;
6347 }
6348
6349 /*
6350  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6351  */
6352 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6353 {
6354         u64 reg;
6355
6356         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6357         write_csr(dd, DC_LCB_CFG_RUN, 0);
6358         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6359         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6360                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6361         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6362         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6363         reg = read_csr(dd, DCC_CFG_RESET);
6364         write_csr(dd, DCC_CFG_RESET, reg |
6365                   (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6366                   (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6367         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6368         if (!abort) {
6369                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6370                 write_csr(dd, DCC_CFG_RESET, reg);
6371                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6372         }
6373 }
6374
6375 /*
6376  * This routine should be called after the link has been transitioned to
6377  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6378  * reset).
6379  *
6380  * The expectation is that the caller of this routine would have taken
6381  * care of properly transitioning the link into the correct state.
6382  */
6383 static void dc_shutdown(struct hfi1_devdata *dd)
6384 {
6385         unsigned long flags;
6386
6387         spin_lock_irqsave(&dd->dc8051_lock, flags);
6388         if (dd->dc_shutdown) {
6389                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6390                 return;
6391         }
6392         dd->dc_shutdown = 1;
6393         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6394         /* Shutdown the LCB */
6395         lcb_shutdown(dd, 1);
6396         /*
6397          * Going to OFFLINE would have causes the 8051 to put the
6398          * SerDes into reset already. Just need to shut down the 8051,
6399          * itself.
6400          */
6401         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6402 }
6403
6404 /*
6405  * Calling this after the DC has been brought out of reset should not
6406  * do any damage.
6407  */
6408 static void dc_start(struct hfi1_devdata *dd)
6409 {
6410         unsigned long flags;
6411         int ret;
6412
6413         spin_lock_irqsave(&dd->dc8051_lock, flags);
6414         if (!dd->dc_shutdown)
6415                 goto done;
6416         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6417         /* Take the 8051 out of reset */
6418         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6419         /* Wait until 8051 is ready */
6420         ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6421         if (ret) {
6422                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6423                            __func__);
6424         }
6425         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6426         write_csr(dd, DCC_CFG_RESET, 0x10);
6427         /* lcb_shutdown() with abort=1 does not restore these */
6428         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6429         spin_lock_irqsave(&dd->dc8051_lock, flags);
6430         dd->dc_shutdown = 0;
6431 done:
6432         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6433 }
6434
6435 /*
6436  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6437  */
6438 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6439 {
6440         u64 rx_radr, tx_radr;
6441         u32 version;
6442
6443         if (dd->icode != ICODE_FPGA_EMULATION)
6444                 return;
6445
6446         /*
6447          * These LCB defaults on emulator _s are good, nothing to do here:
6448          *      LCB_CFG_TX_FIFOS_RADR
6449          *      LCB_CFG_RX_FIFOS_RADR
6450          *      LCB_CFG_LN_DCLK
6451          *      LCB_CFG_IGNORE_LOST_RCLK
6452          */
6453         if (is_emulator_s(dd))
6454                 return;
6455         /* else this is _p */
6456
6457         version = emulator_rev(dd);
6458         if (!is_ax(dd))
6459                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6460
6461         if (version <= 0x12) {
6462                 /* release 0x12 and below */
6463
6464                 /*
6465                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6466                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6467                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6468                  */
6469                 rx_radr =
6470                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473                 /*
6474                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6475                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6476                  */
6477                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6478         } else if (version <= 0x18) {
6479                 /* release 0x13 up to 0x18 */
6480                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6481                 rx_radr =
6482                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6483                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6484                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6485                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486         } else if (version == 0x19) {
6487                 /* release 0x19 */
6488                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6489                 rx_radr =
6490                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494         } else if (version == 0x1a) {
6495                 /* release 0x1a */
6496                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6497                 rx_radr =
6498                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6503         } else {
6504                 /* release 0x1b and higher */
6505                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6506                 rx_radr =
6507                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6508                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6509                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6510                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6511         }
6512
6513         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6514         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6515         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6516                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6517         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6518 }
6519
6520 /*
6521  * Handle a SMA idle message
6522  *
6523  * This is a work-queue function outside of the interrupt.
6524  */
6525 void handle_sma_message(struct work_struct *work)
6526 {
6527         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6528                                                         sma_message_work);
6529         struct hfi1_devdata *dd = ppd->dd;
6530         u64 msg;
6531         int ret;
6532
6533         /*
6534          * msg is bytes 1-4 of the 40-bit idle message - the command code
6535          * is stripped off
6536          */
6537         ret = read_idle_sma(dd, &msg);
6538         if (ret)
6539                 return;
6540         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6541         /*
6542          * React to the SMA message.  Byte[1] (0 for us) is the command.
6543          */
6544         switch (msg & 0xff) {
6545         case SMA_IDLE_ARM:
6546                 /*
6547                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6548                  * State Transitions
6549                  *
6550                  * Only expected in INIT or ARMED, discard otherwise.
6551                  */
6552                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6553                         ppd->neighbor_normal = 1;
6554                 break;
6555         case SMA_IDLE_ACTIVE:
6556                 /*
6557                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6558                  * State Transitions
6559                  *
6560                  * Can activate the node.  Discard otherwise.
6561                  */
6562                 if (ppd->host_link_state == HLS_UP_ARMED &&
6563                     ppd->is_active_optimize_enabled) {
6564                         ppd->neighbor_normal = 1;
6565                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6566                         if (ret)
6567                                 dd_dev_err(
6568                                         dd,
6569                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6570                                         __func__);
6571                 }
6572                 break;
6573         default:
6574                 dd_dev_err(dd,
6575                            "%s: received unexpected SMA idle message 0x%llx\n",
6576                            __func__, msg);
6577                 break;
6578         }
6579 }
6580
6581 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6582 {
6583         u64 rcvctrl;
6584         unsigned long flags;
6585
6586         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6587         rcvctrl = read_csr(dd, RCV_CTRL);
6588         rcvctrl |= add;
6589         rcvctrl &= ~clear;
6590         write_csr(dd, RCV_CTRL, rcvctrl);
6591         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6592 }
6593
6594 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6595 {
6596         adjust_rcvctrl(dd, add, 0);
6597 }
6598
6599 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6600 {
6601         adjust_rcvctrl(dd, 0, clear);
6602 }
6603
6604 /*
6605  * Called from all interrupt handlers to start handling an SPC freeze.
6606  */
6607 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6608 {
6609         struct hfi1_devdata *dd = ppd->dd;
6610         struct send_context *sc;
6611         int i;
6612
6613         if (flags & FREEZE_SELF)
6614                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6615
6616         /* enter frozen mode */
6617         dd->flags |= HFI1_FROZEN;
6618
6619         /* notify all SDMA engines that they are going into a freeze */
6620         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6621
6622         /* do halt pre-handling on all enabled send contexts */
6623         for (i = 0; i < dd->num_send_contexts; i++) {
6624                 sc = dd->send_contexts[i].sc;
6625                 if (sc && (sc->flags & SCF_ENABLED))
6626                         sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6627         }
6628
6629         /* Send context are frozen. Notify user space */
6630         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6631
6632         if (flags & FREEZE_ABORT) {
6633                 dd_dev_err(dd,
6634                            "Aborted freeze recovery. Please REBOOT system\n");
6635                 return;
6636         }
6637         /* queue non-interrupt handler */
6638         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6639 }
6640
6641 /*
6642  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6643  * depending on the "freeze" parameter.
6644  *
6645  * No need to return an error if it times out, our only option
6646  * is to proceed anyway.
6647  */
6648 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6649 {
6650         unsigned long timeout;
6651         u64 reg;
6652
6653         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6654         while (1) {
6655                 reg = read_csr(dd, CCE_STATUS);
6656                 if (freeze) {
6657                         /* waiting until all indicators are set */
6658                         if ((reg & ALL_FROZE) == ALL_FROZE)
6659                                 return; /* all done */
6660                 } else {
6661                         /* waiting until all indicators are clear */
6662                         if ((reg & ALL_FROZE) == 0)
6663                                 return; /* all done */
6664                 }
6665
6666                 if (time_after(jiffies, timeout)) {
6667                         dd_dev_err(dd,
6668                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6669                                    freeze ? "" : "un", reg & ALL_FROZE,
6670                                    freeze ? ALL_FROZE : 0ull);
6671                         return;
6672                 }
6673                 usleep_range(80, 120);
6674         }
6675 }
6676
6677 /*
6678  * Do all freeze handling for the RXE block.
6679  */
6680 static void rxe_freeze(struct hfi1_devdata *dd)
6681 {
6682         int i;
6683
6684         /* disable port */
6685         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6686
6687         /* disable all receive contexts */
6688         for (i = 0; i < dd->num_rcv_contexts; i++)
6689                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6690 }
6691
6692 /*
6693  * Unfreeze handling for the RXE block - kernel contexts only.
6694  * This will also enable the port.  User contexts will do unfreeze
6695  * handling on a per-context basis as they call into the driver.
6696  *
6697  */
6698 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6699 {
6700         u32 rcvmask;
6701         int i;
6702
6703         /* enable all kernel contexts */
6704         for (i = 0; i < dd->n_krcv_queues; i++) {
6705                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6706                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6707                 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6708                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6709                 hfi1_rcvctrl(dd, rcvmask, i);
6710         }
6711
6712         /* enable port */
6713         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6714 }
6715
6716 /*
6717  * Non-interrupt SPC freeze handling.
6718  *
6719  * This is a work-queue function outside of the triggering interrupt.
6720  */
6721 void handle_freeze(struct work_struct *work)
6722 {
6723         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6724                                                                 freeze_work);
6725         struct hfi1_devdata *dd = ppd->dd;
6726
6727         /* wait for freeze indicators on all affected blocks */
6728         wait_for_freeze_status(dd, 1);
6729
6730         /* SPC is now frozen */
6731
6732         /* do send PIO freeze steps */
6733         pio_freeze(dd);
6734
6735         /* do send DMA freeze steps */
6736         sdma_freeze(dd);
6737
6738         /* do send egress freeze steps - nothing to do */
6739
6740         /* do receive freeze steps */
6741         rxe_freeze(dd);
6742
6743         /*
6744          * Unfreeze the hardware - clear the freeze, wait for each
6745          * block's frozen bit to clear, then clear the frozen flag.
6746          */
6747         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748         wait_for_freeze_status(dd, 0);
6749
6750         if (is_ax(dd)) {
6751                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752                 wait_for_freeze_status(dd, 1);
6753                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6754                 wait_for_freeze_status(dd, 0);
6755         }
6756
6757         /* do send PIO unfreeze steps for kernel contexts */
6758         pio_kernel_unfreeze(dd);
6759
6760         /* do send DMA unfreeze steps */
6761         sdma_unfreeze(dd);
6762
6763         /* do send egress unfreeze steps - nothing to do */
6764
6765         /* do receive unfreeze steps for kernel contexts */
6766         rxe_kernel_unfreeze(dd);
6767
6768         /*
6769          * The unfreeze procedure touches global device registers when
6770          * it disables and re-enables RXE. Mark the device unfrozen
6771          * after all that is done so other parts of the driver waiting
6772          * for the device to unfreeze don't do things out of order.
6773          *
6774          * The above implies that the meaning of HFI1_FROZEN flag is
6775          * "Device has gone into freeze mode and freeze mode handling
6776          * is still in progress."
6777          *
6778          * The flag will be removed when freeze mode processing has
6779          * completed.
6780          */
6781         dd->flags &= ~HFI1_FROZEN;
6782         wake_up(&dd->event_queue);
6783
6784         /* no longer frozen */
6785 }
6786
6787 /*
6788  * Handle a link up interrupt from the 8051.
6789  *
6790  * This is a work-queue function outside of the interrupt.
6791  */
6792 void handle_link_up(struct work_struct *work)
6793 {
6794         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6795                                                   link_up_work);
6796         set_link_state(ppd, HLS_UP_INIT);
6797
6798         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6799         read_ltp_rtt(ppd->dd);
6800         /*
6801          * OPA specifies that certain counters are cleared on a transition
6802          * to link up, so do that.
6803          */
6804         clear_linkup_counters(ppd->dd);
6805         /*
6806          * And (re)set link up default values.
6807          */
6808         set_linkup_defaults(ppd);
6809
6810         /* enforce link speed enabled */
6811         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6812                 /* oops - current speed is not enabled, bounce */
6813                 dd_dev_err(ppd->dd,
6814                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6815                            ppd->link_speed_active, ppd->link_speed_enabled);
6816                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6817                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
6818                 set_link_state(ppd, HLS_DN_OFFLINE);
6819                 start_link(ppd);
6820         }
6821 }
6822
6823 /*
6824  * Several pieces of LNI information were cached for SMA in ppd.
6825  * Reset these on link down
6826  */
6827 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6828 {
6829         ppd->neighbor_guid = 0;
6830         ppd->neighbor_port_number = 0;
6831         ppd->neighbor_type = 0;
6832         ppd->neighbor_fm_security = 0;
6833 }
6834
6835 static const char * const link_down_reason_strs[] = {
6836         [OPA_LINKDOWN_REASON_NONE] = "None",
6837         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6838         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6839         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6840         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6841         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6842         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6843         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6844         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6845         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6846         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6847         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6848         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6849         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6850         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6851         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6852         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6853         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6854         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6855         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6856         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6857         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6858         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6859         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6860         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6861         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6862         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6863         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6864         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6865         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6866         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6867         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6868         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6869                                         "Excessive buffer overrun",
6870         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6871         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6872         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6873         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6874         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6875         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6876         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6877         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6878                                         "Local media not installed",
6879         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6880         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6881         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6882                                         "End to end not installed",
6883         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6884         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6885         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6886         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6887         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6888         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6889 };
6890
6891 /* return the neighbor link down reason string */
6892 static const char *link_down_reason_str(u8 reason)
6893 {
6894         const char *str = NULL;
6895
6896         if (reason < ARRAY_SIZE(link_down_reason_strs))
6897                 str = link_down_reason_strs[reason];
6898         if (!str)
6899                 str = "(invalid)";
6900
6901         return str;
6902 }
6903
6904 /*
6905  * Handle a link down interrupt from the 8051.
6906  *
6907  * This is a work-queue function outside of the interrupt.
6908  */
6909 void handle_link_down(struct work_struct *work)
6910 {
6911         u8 lcl_reason, neigh_reason = 0;
6912         u8 link_down_reason;
6913         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6914                                                   link_down_work);
6915         int was_up;
6916         static const char ldr_str[] = "Link down reason: ";
6917
6918         if ((ppd->host_link_state &
6919              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6920              ppd->port_type == PORT_TYPE_FIXED)
6921                 ppd->offline_disabled_reason =
6922                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6923
6924         /* Go offline first, then deal with reading/writing through 8051 */
6925         was_up = !!(ppd->host_link_state & HLS_UP);
6926         set_link_state(ppd, HLS_DN_OFFLINE);
6927
6928         if (was_up) {
6929                 lcl_reason = 0;
6930                 /* link down reason is only valid if the link was up */
6931                 read_link_down_reason(ppd->dd, &link_down_reason);
6932                 switch (link_down_reason) {
6933                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6934                         /* the link went down, no idle message reason */
6935                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6936                                     ldr_str);
6937                         break;
6938                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6939                         /*
6940                          * The neighbor reason is only valid if an idle message
6941                          * was received for it.
6942                          */
6943                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
6944                         dd_dev_info(ppd->dd,
6945                                     "%sNeighbor link down message %d, %s\n",
6946                                     ldr_str, neigh_reason,
6947                                     link_down_reason_str(neigh_reason));
6948                         break;
6949                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6950                         dd_dev_info(ppd->dd,
6951                                     "%sHost requested link to go offline\n",
6952                                     ldr_str);
6953                         break;
6954                 default:
6955                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6956                                     ldr_str, link_down_reason);
6957                         break;
6958                 }
6959
6960                 /*
6961                  * If no reason, assume peer-initiated but missed
6962                  * LinkGoingDown idle flits.
6963                  */
6964                 if (neigh_reason == 0)
6965                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6966         } else {
6967                 /* went down while polling or going up */
6968                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6969         }
6970
6971         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6972
6973         /* inform the SMA when the link transitions from up to down */
6974         if (was_up && ppd->local_link_down_reason.sma == 0 &&
6975             ppd->neigh_link_down_reason.sma == 0) {
6976                 ppd->local_link_down_reason.sma =
6977                                         ppd->local_link_down_reason.latest;
6978                 ppd->neigh_link_down_reason.sma =
6979                                         ppd->neigh_link_down_reason.latest;
6980         }
6981
6982         reset_neighbor_info(ppd);
6983
6984         /* disable the port */
6985         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6986
6987         /*
6988          * If there is no cable attached, turn the DC off. Otherwise,
6989          * start the link bring up.
6990          */
6991         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
6992                 dc_shutdown(ppd->dd);
6993         else
6994                 start_link(ppd);
6995 }
6996
6997 void handle_link_bounce(struct work_struct *work)
6998 {
6999         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7000                                                         link_bounce_work);
7001
7002         /*
7003          * Only do something if the link is currently up.
7004          */
7005         if (ppd->host_link_state & HLS_UP) {
7006                 set_link_state(ppd, HLS_DN_OFFLINE);
7007                 start_link(ppd);
7008         } else {
7009                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7010                             __func__, link_state_name(ppd->host_link_state));
7011         }
7012 }
7013
7014 /*
7015  * Mask conversion: Capability exchange to Port LTP.  The capability
7016  * exchange has an implicit 16b CRC that is mandatory.
7017  */
7018 static int cap_to_port_ltp(int cap)
7019 {
7020         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7021
7022         if (cap & CAP_CRC_14B)
7023                 port_ltp |= PORT_LTP_CRC_MODE_14;
7024         if (cap & CAP_CRC_48B)
7025                 port_ltp |= PORT_LTP_CRC_MODE_48;
7026         if (cap & CAP_CRC_12B_16B_PER_LANE)
7027                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7028
7029         return port_ltp;
7030 }
7031
7032 /*
7033  * Convert an OPA Port LTP mask to capability mask
7034  */
7035 int port_ltp_to_cap(int port_ltp)
7036 {
7037         int cap_mask = 0;
7038
7039         if (port_ltp & PORT_LTP_CRC_MODE_14)
7040                 cap_mask |= CAP_CRC_14B;
7041         if (port_ltp & PORT_LTP_CRC_MODE_48)
7042                 cap_mask |= CAP_CRC_48B;
7043         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7044                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7045
7046         return cap_mask;
7047 }
7048
7049 /*
7050  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7051  */
7052 static int lcb_to_port_ltp(int lcb_crc)
7053 {
7054         int port_ltp = 0;
7055
7056         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7057                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7058         else if (lcb_crc == LCB_CRC_48B)
7059                 port_ltp = PORT_LTP_CRC_MODE_48;
7060         else if (lcb_crc == LCB_CRC_14B)
7061                 port_ltp = PORT_LTP_CRC_MODE_14;
7062         else
7063                 port_ltp = PORT_LTP_CRC_MODE_16;
7064
7065         return port_ltp;
7066 }
7067
7068 /*
7069  * Our neighbor has indicated that we are allowed to act as a fabric
7070  * manager, so place the full management partition key in the second
7071  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7072  * that we should already have the limited management partition key in
7073  * array element 1, and also that the port is not yet up when
7074  * add_full_mgmt_pkey() is invoked.
7075  */
7076 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7077 {
7078         struct hfi1_devdata *dd = ppd->dd;
7079
7080         /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7081         if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7082                 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7083                             __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7084         ppd->pkeys[2] = FULL_MGMT_P_KEY;
7085         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7086         hfi1_event_pkey_change(ppd->dd, ppd->port);
7087 }
7088
7089 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7090 {
7091         if (ppd->pkeys[2] != 0) {
7092                 ppd->pkeys[2] = 0;
7093                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7094                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7095         }
7096 }
7097
7098 /*
7099  * Convert the given link width to the OPA link width bitmask.
7100  */
7101 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7102 {
7103         switch (width) {
7104         case 0:
7105                 /*
7106                  * Simulator and quick linkup do not set the width.
7107                  * Just set it to 4x without complaint.
7108                  */
7109                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7110                         return OPA_LINK_WIDTH_4X;
7111                 return 0; /* no lanes up */
7112         case 1: return OPA_LINK_WIDTH_1X;
7113         case 2: return OPA_LINK_WIDTH_2X;
7114         case 3: return OPA_LINK_WIDTH_3X;
7115         default:
7116                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7117                             __func__, width);
7118                 /* fall through */
7119         case 4: return OPA_LINK_WIDTH_4X;
7120         }
7121 }
7122
7123 /*
7124  * Do a population count on the bottom nibble.
7125  */
7126 static const u8 bit_counts[16] = {
7127         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7128 };
7129
7130 static inline u8 nibble_to_count(u8 nibble)
7131 {
7132         return bit_counts[nibble & 0xf];
7133 }
7134
7135 /*
7136  * Read the active lane information from the 8051 registers and return
7137  * their widths.
7138  *
7139  * Active lane information is found in these 8051 registers:
7140  *      enable_lane_tx
7141  *      enable_lane_rx
7142  */
7143 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7144                             u16 *rx_width)
7145 {
7146         u16 tx, rx;
7147         u8 enable_lane_rx;
7148         u8 enable_lane_tx;
7149         u8 tx_polarity_inversion;
7150         u8 rx_polarity_inversion;
7151         u8 max_rate;
7152
7153         /* read the active lanes */
7154         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7155                          &rx_polarity_inversion, &max_rate);
7156         read_local_lni(dd, &enable_lane_rx);
7157
7158         /* convert to counts */
7159         tx = nibble_to_count(enable_lane_tx);
7160         rx = nibble_to_count(enable_lane_rx);
7161
7162         /*
7163          * Set link_speed_active here, overriding what was set in
7164          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7165          * set the max_rate field in handle_verify_cap until v0.19.
7166          */
7167         if ((dd->icode == ICODE_RTL_SILICON) &&
7168             (dd->dc8051_ver < dc8051_ver(0, 19))) {
7169                 /* max_rate: 0 = 12.5G, 1 = 25G */
7170                 switch (max_rate) {
7171                 case 0:
7172                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7173                         break;
7174                 default:
7175                         dd_dev_err(dd,
7176                                    "%s: unexpected max rate %d, using 25Gb\n",
7177                                    __func__, (int)max_rate);
7178                         /* fall through */
7179                 case 1:
7180                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7181                         break;
7182                 }
7183         }
7184
7185         dd_dev_info(dd,
7186                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7187                     enable_lane_tx, tx, enable_lane_rx, rx);
7188         *tx_width = link_width_to_bits(dd, tx);
7189         *rx_width = link_width_to_bits(dd, rx);
7190 }
7191
7192 /*
7193  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7194  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7195  * after link up.  I.e. look elsewhere for downgrade information.
7196  *
7197  * Bits are:
7198  *      + bits [7:4] contain the number of active transmitters
7199  *      + bits [3:0] contain the number of active receivers
7200  * These are numbers 1 through 4 and can be different values if the
7201  * link is asymmetric.
7202  *
7203  * verify_cap_local_fm_link_width[0] retains its original value.
7204  */
7205 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7206                               u16 *rx_width)
7207 {
7208         u16 widths, tx, rx;
7209         u8 misc_bits, local_flags;
7210         u16 active_tx, active_rx;
7211
7212         read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7213         tx = widths >> 12;
7214         rx = (widths >> 8) & 0xf;
7215
7216         *tx_width = link_width_to_bits(dd, tx);
7217         *rx_width = link_width_to_bits(dd, rx);
7218
7219         /* print the active widths */
7220         get_link_widths(dd, &active_tx, &active_rx);
7221 }
7222
7223 /*
7224  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7225  * hardware information when the link first comes up.
7226  *
7227  * The link width is not available until after VerifyCap.AllFramesReceived
7228  * (the trigger for handle_verify_cap), so this is outside that routine
7229  * and should be called when the 8051 signals linkup.
7230  */
7231 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7232 {
7233         u16 tx_width, rx_width;
7234
7235         /* get end-of-LNI link widths */
7236         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7237
7238         /* use tx_width as the link is supposed to be symmetric on link up */
7239         ppd->link_width_active = tx_width;
7240         /* link width downgrade active (LWD.A) starts out matching LW.A */
7241         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7242         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7243         /* per OPA spec, on link up LWD.E resets to LWD.S */
7244         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7245         /* cache the active egress rate (units {10^6 bits/sec]) */
7246         ppd->current_egress_rate = active_egress_rate(ppd);
7247 }
7248
7249 /*
7250  * Handle a verify capabilities interrupt from the 8051.
7251  *
7252  * This is a work-queue function outside of the interrupt.
7253  */
7254 void handle_verify_cap(struct work_struct *work)
7255 {
7256         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7257                                                                 link_vc_work);
7258         struct hfi1_devdata *dd = ppd->dd;
7259         u64 reg;
7260         u8 power_management;
7261         u8 continious;
7262         u8 vcu;
7263         u8 vau;
7264         u8 z;
7265         u16 vl15buf;
7266         u16 link_widths;
7267         u16 crc_mask;
7268         u16 crc_val;
7269         u16 device_id;
7270         u16 active_tx, active_rx;
7271         u8 partner_supported_crc;
7272         u8 remote_tx_rate;
7273         u8 device_rev;
7274
7275         set_link_state(ppd, HLS_VERIFY_CAP);
7276
7277         lcb_shutdown(dd, 0);
7278         adjust_lcb_for_fpga_serdes(dd);
7279
7280         /*
7281          * These are now valid:
7282          *      remote VerifyCap fields in the general LNI config
7283          *      CSR DC8051_STS_REMOTE_GUID
7284          *      CSR DC8051_STS_REMOTE_NODE_TYPE
7285          *      CSR DC8051_STS_REMOTE_FM_SECURITY
7286          *      CSR DC8051_STS_REMOTE_PORT_NO
7287          */
7288
7289         read_vc_remote_phy(dd, &power_management, &continious);
7290         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7291                               &partner_supported_crc);
7292         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7293         read_remote_device_id(dd, &device_id, &device_rev);
7294         /*
7295          * And the 'MgmtAllowed' information, which is exchanged during
7296          * LNI, is also be available at this point.
7297          */
7298         read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7299         /* print the active widths */
7300         get_link_widths(dd, &active_tx, &active_rx);
7301         dd_dev_info(dd,
7302                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7303                     (int)power_management, (int)continious);
7304         dd_dev_info(dd,
7305                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7306                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7307                     (int)partner_supported_crc);
7308         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7309                     (u32)remote_tx_rate, (u32)link_widths);
7310         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7311                     (u32)device_id, (u32)device_rev);
7312         /*
7313          * The peer vAU value just read is the peer receiver value.  HFI does
7314          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7315          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7316          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7317          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7318          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7319          * subject to the Z value exception.
7320          */
7321         if (vau == 0)
7322                 vau = 1;
7323         set_up_vl15(dd, vau, vl15buf);
7324
7325         /* set up the LCB CRC mode */
7326         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7327
7328         /* order is important: use the lowest bit in common */
7329         if (crc_mask & CAP_CRC_14B)
7330                 crc_val = LCB_CRC_14B;
7331         else if (crc_mask & CAP_CRC_48B)
7332                 crc_val = LCB_CRC_48B;
7333         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7334                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7335         else
7336                 crc_val = LCB_CRC_16B;
7337
7338         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7339         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7340                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7341
7342         /* set (14b only) or clear sideband credit */
7343         reg = read_csr(dd, SEND_CM_CTRL);
7344         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7345                 write_csr(dd, SEND_CM_CTRL,
7346                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7347         } else {
7348                 write_csr(dd, SEND_CM_CTRL,
7349                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7350         }
7351
7352         ppd->link_speed_active = 0;     /* invalid value */
7353         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7354                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7355                 switch (remote_tx_rate) {
7356                 case 0:
7357                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7358                         break;
7359                 case 1:
7360                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7361                         break;
7362                 }
7363         } else {
7364                 /* actual rate is highest bit of the ANDed rates */
7365                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7366
7367                 if (rate & 2)
7368                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7369                 else if (rate & 1)
7370                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7371         }
7372         if (ppd->link_speed_active == 0) {
7373                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7374                            __func__, (int)remote_tx_rate);
7375                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7376         }
7377
7378         /*
7379          * Cache the values of the supported, enabled, and active
7380          * LTP CRC modes to return in 'portinfo' queries. But the bit
7381          * flags that are returned in the portinfo query differ from
7382          * what's in the link_crc_mask, crc_sizes, and crc_val
7383          * variables. Convert these here.
7384          */
7385         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7386                 /* supported crc modes */
7387         ppd->port_ltp_crc_mode |=
7388                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7389                 /* enabled crc modes */
7390         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7391                 /* active crc mode */
7392
7393         /* set up the remote credit return table */
7394         assign_remote_cm_au_table(dd, vcu);
7395
7396         /*
7397          * The LCB is reset on entry to handle_verify_cap(), so this must
7398          * be applied on every link up.
7399          *
7400          * Adjust LCB error kill enable to kill the link if
7401          * these RBUF errors are seen:
7402          *      REPLAY_BUF_MBE_SMASK
7403          *      FLIT_INPUT_BUF_MBE_SMASK
7404          */
7405         if (is_ax(dd)) {                        /* fixed in B0 */
7406                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7407                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7408                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7409                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7410         }
7411
7412         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7413         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7414
7415         /* give 8051 access to the LCB CSRs */
7416         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7417         set_8051_lcb_access(dd);
7418
7419         ppd->neighbor_guid =
7420                 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7421         ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7422                                         DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7423         ppd->neighbor_type =
7424                 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7425                 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7426         ppd->neighbor_fm_security =
7427                 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7428                 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7429         dd_dev_info(dd,
7430                     "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7431                     ppd->neighbor_guid, ppd->neighbor_type,
7432                     ppd->mgmt_allowed, ppd->neighbor_fm_security);
7433         if (ppd->mgmt_allowed)
7434                 add_full_mgmt_pkey(ppd);
7435
7436         /* tell the 8051 to go to LinkUp */
7437         set_link_state(ppd, HLS_GOING_UP);
7438 }
7439
7440 /*
7441  * Apply the link width downgrade enabled policy against the current active
7442  * link widths.
7443  *
7444  * Called when the enabled policy changes or the active link widths change.
7445  */
7446 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7447 {
7448         int do_bounce = 0;
7449         int tries;
7450         u16 lwde;
7451         u16 tx, rx;
7452
7453         /* use the hls lock to avoid a race with actual link up */
7454         tries = 0;
7455 retry:
7456         mutex_lock(&ppd->hls_lock);
7457         /* only apply if the link is up */
7458         if (ppd->host_link_state & HLS_DOWN) {
7459                 /* still going up..wait and retry */
7460                 if (ppd->host_link_state & HLS_GOING_UP) {
7461                         if (++tries < 1000) {
7462                                 mutex_unlock(&ppd->hls_lock);
7463                                 usleep_range(100, 120); /* arbitrary */
7464                                 goto retry;
7465                         }
7466                         dd_dev_err(ppd->dd,
7467                                    "%s: giving up waiting for link state change\n",
7468                                    __func__);
7469                 }
7470                 goto done;
7471         }
7472
7473         lwde = ppd->link_width_downgrade_enabled;
7474
7475         if (refresh_widths) {
7476                 get_link_widths(ppd->dd, &tx, &rx);
7477                 ppd->link_width_downgrade_tx_active = tx;
7478                 ppd->link_width_downgrade_rx_active = rx;
7479         }
7480
7481         if (ppd->link_width_downgrade_tx_active == 0 ||
7482             ppd->link_width_downgrade_rx_active == 0) {
7483                 /* the 8051 reported a dead link as a downgrade */
7484                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7485         } else if (lwde == 0) {
7486                 /* downgrade is disabled */
7487
7488                 /* bounce if not at starting active width */
7489                 if ((ppd->link_width_active !=
7490                      ppd->link_width_downgrade_tx_active) ||
7491                     (ppd->link_width_active !=
7492                      ppd->link_width_downgrade_rx_active)) {
7493                         dd_dev_err(ppd->dd,
7494                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7495                         dd_dev_err(ppd->dd,
7496                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7497                                    ppd->link_width_active,
7498                                    ppd->link_width_downgrade_tx_active,
7499                                    ppd->link_width_downgrade_rx_active);
7500                         do_bounce = 1;
7501                 }
7502         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7503                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7504                 /* Tx or Rx is outside the enabled policy */
7505                 dd_dev_err(ppd->dd,
7506                            "Link is outside of downgrade allowed, downing link\n");
7507                 dd_dev_err(ppd->dd,
7508                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7509                            lwde, ppd->link_width_downgrade_tx_active,
7510                            ppd->link_width_downgrade_rx_active);
7511                 do_bounce = 1;
7512         }
7513
7514 done:
7515         mutex_unlock(&ppd->hls_lock);
7516
7517         if (do_bounce) {
7518                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7519                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7520                 set_link_state(ppd, HLS_DN_OFFLINE);
7521                 start_link(ppd);
7522         }
7523 }
7524
7525 /*
7526  * Handle a link downgrade interrupt from the 8051.
7527  *
7528  * This is a work-queue function outside of the interrupt.
7529  */
7530 void handle_link_downgrade(struct work_struct *work)
7531 {
7532         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7533                                                         link_downgrade_work);
7534
7535         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7536         apply_link_downgrade_policy(ppd, 1);
7537 }
7538
7539 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7540 {
7541         return flag_string(buf, buf_len, flags, dcc_err_flags,
7542                 ARRAY_SIZE(dcc_err_flags));
7543 }
7544
7545 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7546 {
7547         return flag_string(buf, buf_len, flags, lcb_err_flags,
7548                 ARRAY_SIZE(lcb_err_flags));
7549 }
7550
7551 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7552 {
7553         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7554                 ARRAY_SIZE(dc8051_err_flags));
7555 }
7556
7557 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7558 {
7559         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7560                 ARRAY_SIZE(dc8051_info_err_flags));
7561 }
7562
7563 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7564 {
7565         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7566                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7567 }
7568
7569 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7570 {
7571         struct hfi1_pportdata *ppd = dd->pport;
7572         u64 info, err, host_msg;
7573         int queue_link_down = 0;
7574         char buf[96];
7575
7576         /* look at the flags */
7577         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7578                 /* 8051 information set by firmware */
7579                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7580                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7581                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7582                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7583                 host_msg = (info >>
7584                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7585                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7586
7587                 /*
7588                  * Handle error flags.
7589                  */
7590                 if (err & FAILED_LNI) {
7591                         /*
7592                          * LNI error indications are cleared by the 8051
7593                          * only when starting polling.  Only pay attention
7594                          * to them when in the states that occur during
7595                          * LNI.
7596                          */
7597                         if (ppd->host_link_state
7598                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7599                                 queue_link_down = 1;
7600                                 dd_dev_info(dd, "Link error: %s\n",
7601                                             dc8051_info_err_string(buf,
7602                                                                    sizeof(buf),
7603                                                                    err &
7604                                                                    FAILED_LNI));
7605                         }
7606                         err &= ~(u64)FAILED_LNI;
7607                 }
7608                 /* unknown frames can happen durning LNI, just count */
7609                 if (err & UNKNOWN_FRAME) {
7610                         ppd->unknown_frame_count++;
7611                         err &= ~(u64)UNKNOWN_FRAME;
7612                 }
7613                 if (err) {
7614                         /* report remaining errors, but do not do anything */
7615                         dd_dev_err(dd, "8051 info error: %s\n",
7616                                    dc8051_info_err_string(buf, sizeof(buf),
7617                                                           err));
7618                 }
7619
7620                 /*
7621                  * Handle host message flags.
7622                  */
7623                 if (host_msg & HOST_REQ_DONE) {
7624                         /*
7625                          * Presently, the driver does a busy wait for
7626                          * host requests to complete.  This is only an
7627                          * informational message.
7628                          * NOTE: The 8051 clears the host message
7629                          * information *on the next 8051 command*.
7630                          * Therefore, when linkup is achieved,
7631                          * this flag will still be set.
7632                          */
7633                         host_msg &= ~(u64)HOST_REQ_DONE;
7634                 }
7635                 if (host_msg & BC_SMA_MSG) {
7636                         queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7637                         host_msg &= ~(u64)BC_SMA_MSG;
7638                 }
7639                 if (host_msg & LINKUP_ACHIEVED) {
7640                         dd_dev_info(dd, "8051: Link up\n");
7641                         queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7642                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7643                 }
7644                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7645                         handle_8051_request(ppd);
7646                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7647                 }
7648                 if (host_msg & VERIFY_CAP_FRAME) {
7649                         queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7650                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7651                 }
7652                 if (host_msg & LINK_GOING_DOWN) {
7653                         const char *extra = "";
7654                         /* no downgrade action needed if going down */
7655                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7656                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7657                                 extra = " (ignoring downgrade)";
7658                         }
7659                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7660                         queue_link_down = 1;
7661                         host_msg &= ~(u64)LINK_GOING_DOWN;
7662                 }
7663                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7664                         queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7665                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7666                 }
7667                 if (host_msg) {
7668                         /* report remaining messages, but do not do anything */
7669                         dd_dev_info(dd, "8051 info host message: %s\n",
7670                                     dc8051_info_host_msg_string(buf,
7671                                                                 sizeof(buf),
7672                                                                 host_msg));
7673                 }
7674
7675                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7676         }
7677         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7678                 /*
7679                  * Lost the 8051 heartbeat.  If this happens, we
7680                  * receive constant interrupts about it.  Disable
7681                  * the interrupt after the first.
7682                  */
7683                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7684                 write_csr(dd, DC_DC8051_ERR_EN,
7685                           read_csr(dd, DC_DC8051_ERR_EN) &
7686                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7687
7688                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7689         }
7690         if (reg) {
7691                 /* report the error, but do not do anything */
7692                 dd_dev_err(dd, "8051 error: %s\n",
7693                            dc8051_err_string(buf, sizeof(buf), reg));
7694         }
7695
7696         if (queue_link_down) {
7697                 /*
7698                  * if the link is already going down or disabled, do not
7699                  * queue another
7700                  */
7701                 if ((ppd->host_link_state &
7702                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7703                     ppd->link_enabled == 0) {
7704                         dd_dev_info(dd, "%s: not queuing link down\n",
7705                                     __func__);
7706                 } else {
7707                         queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7708                 }
7709         }
7710 }
7711
7712 static const char * const fm_config_txt[] = {
7713 [0] =
7714         "BadHeadDist: Distance violation between two head flits",
7715 [1] =
7716         "BadTailDist: Distance violation between two tail flits",
7717 [2] =
7718         "BadCtrlDist: Distance violation between two credit control flits",
7719 [3] =
7720         "BadCrdAck: Credits return for unsupported VL",
7721 [4] =
7722         "UnsupportedVLMarker: Received VL Marker",
7723 [5] =
7724         "BadPreempt: Exceeded the preemption nesting level",
7725 [6] =
7726         "BadControlFlit: Received unsupported control flit",
7727 /* no 7 */
7728 [8] =
7729         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7730 };
7731
7732 static const char * const port_rcv_txt[] = {
7733 [1] =
7734         "BadPktLen: Illegal PktLen",
7735 [2] =
7736         "PktLenTooLong: Packet longer than PktLen",
7737 [3] =
7738         "PktLenTooShort: Packet shorter than PktLen",
7739 [4] =
7740         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7741 [5] =
7742         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7743 [6] =
7744         "BadL2: Illegal L2 opcode",
7745 [7] =
7746         "BadSC: Unsupported SC",
7747 [9] =
7748         "BadRC: Illegal RC",
7749 [11] =
7750         "PreemptError: Preempting with same VL",
7751 [12] =
7752         "PreemptVL15: Preempting a VL15 packet",
7753 };
7754
7755 #define OPA_LDR_FMCONFIG_OFFSET 16
7756 #define OPA_LDR_PORTRCV_OFFSET 0
7757 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7758 {
7759         u64 info, hdr0, hdr1;
7760         const char *extra;
7761         char buf[96];
7762         struct hfi1_pportdata *ppd = dd->pport;
7763         u8 lcl_reason = 0;
7764         int do_bounce = 0;
7765
7766         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7767                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7768                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7769                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7770                         /* set status bit */
7771                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7772                 }
7773                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7774         }
7775
7776         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7777                 struct hfi1_pportdata *ppd = dd->pport;
7778                 /* this counter saturates at (2^32) - 1 */
7779                 if (ppd->link_downed < (u32)UINT_MAX)
7780                         ppd->link_downed++;
7781                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7782         }
7783
7784         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7785                 u8 reason_valid = 1;
7786
7787                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7788                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7789                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7790                         /* set status bit */
7791                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7792                 }
7793                 switch (info) {
7794                 case 0:
7795                 case 1:
7796                 case 2:
7797                 case 3:
7798                 case 4:
7799                 case 5:
7800                 case 6:
7801                         extra = fm_config_txt[info];
7802                         break;
7803                 case 8:
7804                         extra = fm_config_txt[info];
7805                         if (ppd->port_error_action &
7806                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7807                                 do_bounce = 1;
7808                                 /*
7809                                  * lcl_reason cannot be derived from info
7810                                  * for this error
7811                                  */
7812                                 lcl_reason =
7813                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7814                         }
7815                         break;
7816                 default:
7817                         reason_valid = 0;
7818                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7819                         extra = buf;
7820                         break;
7821                 }
7822
7823                 if (reason_valid && !do_bounce) {
7824                         do_bounce = ppd->port_error_action &
7825                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7826                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7827                 }
7828
7829                 /* just report this */
7830                 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7831                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7832         }
7833
7834         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7835                 u8 reason_valid = 1;
7836
7837                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7838                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7839                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7840                 if (!(dd->err_info_rcvport.status_and_code &
7841                       OPA_EI_STATUS_SMASK)) {
7842                         dd->err_info_rcvport.status_and_code =
7843                                 info & OPA_EI_CODE_SMASK;
7844                         /* set status bit */
7845                         dd->err_info_rcvport.status_and_code |=
7846                                 OPA_EI_STATUS_SMASK;
7847                         /*
7848                          * save first 2 flits in the packet that caused
7849                          * the error
7850                          */
7851                         dd->err_info_rcvport.packet_flit1 = hdr0;
7852                         dd->err_info_rcvport.packet_flit2 = hdr1;
7853                 }
7854                 switch (info) {
7855                 case 1:
7856                 case 2:
7857                 case 3:
7858                 case 4:
7859                 case 5:
7860                 case 6:
7861                 case 7:
7862                 case 9:
7863                 case 11:
7864                 case 12:
7865                         extra = port_rcv_txt[info];
7866                         break;
7867                 default:
7868                         reason_valid = 0;
7869                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7870                         extra = buf;
7871                         break;
7872                 }
7873
7874                 if (reason_valid && !do_bounce) {
7875                         do_bounce = ppd->port_error_action &
7876                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7877                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7878                 }
7879
7880                 /* just report this */
7881                 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7882                 dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7883                             hdr0, hdr1);
7884
7885                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7886         }
7887
7888         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7889                 /* informative only */
7890                 dd_dev_info(dd, "8051 access to LCB blocked\n");
7891                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7892         }
7893         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7894                 /* informative only */
7895                 dd_dev_info(dd, "host access to LCB blocked\n");
7896                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7897         }
7898
7899         /* report any remaining errors */
7900         if (reg)
7901                 dd_dev_info(dd, "DCC Error: %s\n",
7902                             dcc_err_string(buf, sizeof(buf), reg));
7903
7904         if (lcl_reason == 0)
7905                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7906
7907         if (do_bounce) {
7908                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7909                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7910                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7911         }
7912 }
7913
7914 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7915 {
7916         char buf[96];
7917
7918         dd_dev_info(dd, "LCB Error: %s\n",
7919                     lcb_err_string(buf, sizeof(buf), reg));
7920 }
7921
7922 /*
7923  * CCE block DC interrupt.  Source is < 8.
7924  */
7925 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7926 {
7927         const struct err_reg_info *eri = &dc_errs[source];
7928
7929         if (eri->handler) {
7930                 interrupt_clear_down(dd, 0, eri);
7931         } else if (source == 3 /* dc_lbm_int */) {
7932                 /*
7933                  * This indicates that a parity error has occurred on the
7934                  * address/control lines presented to the LBM.  The error
7935                  * is a single pulse, there is no associated error flag,
7936                  * and it is non-maskable.  This is because if a parity
7937                  * error occurs on the request the request is dropped.
7938                  * This should never occur, but it is nice to know if it
7939                  * ever does.
7940                  */
7941                 dd_dev_err(dd, "Parity error in DC LBM block\n");
7942         } else {
7943                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7944         }
7945 }
7946
7947 /*
7948  * TX block send credit interrupt.  Source is < 160.
7949  */
7950 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7951 {
7952         sc_group_release_update(dd, source);
7953 }
7954
7955 /*
7956  * TX block SDMA interrupt.  Source is < 48.
7957  *
7958  * SDMA interrupts are grouped by type:
7959  *
7960  *       0 -  N-1 = SDma
7961  *       N - 2N-1 = SDmaProgress
7962  *      2N - 3N-1 = SDmaIdle
7963  */
7964 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7965 {
7966         /* what interrupt */
7967         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7968         /* which engine */
7969         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7970
7971 #ifdef CONFIG_SDMA_VERBOSITY
7972         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7973                    slashstrip(__FILE__), __LINE__, __func__);
7974         sdma_dumpstate(&dd->per_sdma[which]);
7975 #endif
7976
7977         if (likely(what < 3 && which < dd->num_sdma)) {
7978                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7979         } else {
7980                 /* should not happen */
7981                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7982         }
7983 }
7984
7985 /*
7986  * RX block receive available interrupt.  Source is < 160.
7987  */
7988 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7989 {
7990         struct hfi1_ctxtdata *rcd;
7991         char *err_detail;
7992
7993         if (likely(source < dd->num_rcv_contexts)) {
7994                 rcd = dd->rcd[source];
7995                 if (rcd) {
7996                         if (source < dd->first_user_ctxt)
7997                                 rcd->do_interrupt(rcd, 0);
7998                         else
7999                                 handle_user_interrupt(rcd);
8000                         return; /* OK */
8001                 }
8002                 /* received an interrupt, but no rcd */
8003                 err_detail = "dataless";
8004         } else {
8005                 /* received an interrupt, but are not using that context */
8006                 err_detail = "out of range";
8007         }
8008         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8009                    err_detail, source);
8010 }
8011
8012 /*
8013  * RX block receive urgent interrupt.  Source is < 160.
8014  */
8015 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8016 {
8017         struct hfi1_ctxtdata *rcd;
8018         char *err_detail;
8019
8020         if (likely(source < dd->num_rcv_contexts)) {
8021                 rcd = dd->rcd[source];
8022                 if (rcd) {
8023                         /* only pay attention to user urgent interrupts */
8024                         if (source >= dd->first_user_ctxt)
8025                                 handle_user_interrupt(rcd);
8026                         return; /* OK */
8027                 }
8028                 /* received an interrupt, but no rcd */
8029                 err_detail = "dataless";
8030         } else {
8031                 /* received an interrupt, but are not using that context */
8032                 err_detail = "out of range";
8033         }
8034         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8035                    err_detail, source);
8036 }
8037
8038 /*
8039  * Reserved range interrupt.  Should not be called in normal operation.
8040  */
8041 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8042 {
8043         char name[64];
8044
8045         dd_dev_err(dd, "unexpected %s interrupt\n",
8046                    is_reserved_name(name, sizeof(name), source));
8047 }
8048
8049 static const struct is_table is_table[] = {
8050 /*
8051  * start                 end
8052  *                              name func               interrupt func
8053  */
8054 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8055                                 is_misc_err_name,       is_misc_err_int },
8056 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8057                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8058 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8059                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8060 { IS_SDMA_START,             IS_SDMA_END,
8061                                 is_sdma_eng_name,       is_sdma_eng_int },
8062 { IS_VARIOUS_START,          IS_VARIOUS_END,
8063                                 is_various_name,        is_various_int },
8064 { IS_DC_START,       IS_DC_END,
8065                                 is_dc_name,             is_dc_int },
8066 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8067                                 is_rcv_avail_name,      is_rcv_avail_int },
8068 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8069                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8070 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8071                                 is_send_credit_name,    is_send_credit_int},
8072 { IS_RESERVED_START,     IS_RESERVED_END,
8073                                 is_reserved_name,       is_reserved_int},
8074 };
8075
8076 /*
8077  * Interrupt source interrupt - called when the given source has an interrupt.
8078  * Source is a bit index into an array of 64-bit integers.
8079  */
8080 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8081 {
8082         const struct is_table *entry;
8083
8084         /* avoids a double compare by walking the table in-order */
8085         for (entry = &is_table[0]; entry->is_name; entry++) {
8086                 if (source < entry->end) {
8087                         trace_hfi1_interrupt(dd, entry, source);
8088                         entry->is_int(dd, source - entry->start);
8089                         return;
8090                 }
8091         }
8092         /* fell off the end */
8093         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8094 }
8095
8096 /*
8097  * General interrupt handler.  This is able to correctly handle
8098  * all interrupts in case INTx is used.
8099  */
8100 static irqreturn_t general_interrupt(int irq, void *data)
8101 {
8102         struct hfi1_devdata *dd = data;
8103         u64 regs[CCE_NUM_INT_CSRS];
8104         u32 bit;
8105         int i;
8106
8107         this_cpu_inc(*dd->int_counter);
8108
8109         /* phase 1: scan and clear all handled interrupts */
8110         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8111                 if (dd->gi_mask[i] == 0) {
8112                         regs[i] = 0;    /* used later */
8113                         continue;
8114                 }
8115                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8116                                 dd->gi_mask[i];
8117                 /* only clear if anything is set */
8118                 if (regs[i])
8119                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8120         }
8121
8122         /* phase 2: call the appropriate handler */
8123         for_each_set_bit(bit, (unsigned long *)&regs[0],
8124                          CCE_NUM_INT_CSRS * 64) {
8125                 is_interrupt(dd, bit);
8126         }
8127
8128         return IRQ_HANDLED;
8129 }
8130
8131 static irqreturn_t sdma_interrupt(int irq, void *data)
8132 {
8133         struct sdma_engine *sde = data;
8134         struct hfi1_devdata *dd = sde->dd;
8135         u64 status;
8136
8137 #ifdef CONFIG_SDMA_VERBOSITY
8138         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8139                    slashstrip(__FILE__), __LINE__, __func__);
8140         sdma_dumpstate(sde);
8141 #endif
8142
8143         this_cpu_inc(*dd->int_counter);
8144
8145         /* This read_csr is really bad in the hot path */
8146         status = read_csr(dd,
8147                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8148                           & sde->imask;
8149         if (likely(status)) {
8150                 /* clear the interrupt(s) */
8151                 write_csr(dd,
8152                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8153                           status);
8154
8155                 /* handle the interrupt(s) */
8156                 sdma_engine_interrupt(sde, status);
8157         } else
8158                 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8159                            sde->this_idx);
8160
8161         return IRQ_HANDLED;
8162 }
8163
8164 /*
8165  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8166  * to insure that the write completed.  This does NOT guarantee that
8167  * queued DMA writes to memory from the chip are pushed.
8168  */
8169 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8170 {
8171         struct hfi1_devdata *dd = rcd->dd;
8172         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8173
8174         mmiowb();       /* make sure everything before is written */
8175         write_csr(dd, addr, rcd->imask);
8176         /* force the above write on the chip and get a value back */
8177         (void)read_csr(dd, addr);
8178 }
8179
8180 /* force the receive interrupt */
8181 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8182 {
8183         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8184 }
8185
8186 /*
8187  * Return non-zero if a packet is present.
8188  *
8189  * This routine is called when rechecking for packets after the RcvAvail
8190  * interrupt has been cleared down.  First, do a quick check of memory for
8191  * a packet present.  If not found, use an expensive CSR read of the context
8192  * tail to determine the actual tail.  The CSR read is necessary because there
8193  * is no method to push pending DMAs to memory other than an interrupt and we
8194  * are trying to determine if we need to force an interrupt.
8195  */
8196 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8197 {
8198         u32 tail;
8199         int present;
8200
8201         if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8202                 present = (rcd->seq_cnt ==
8203                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8204         else /* is RDMA rtail */
8205                 present = (rcd->head != get_rcvhdrtail(rcd));
8206
8207         if (present)
8208                 return 1;
8209
8210         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8211         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8212         return rcd->head != tail;
8213 }
8214
8215 /*
8216  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8217  * This routine will try to handle packets immediately (latency), but if
8218  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8219  * chip receive interrupt is *not* cleared down until this or the thread (if
8220  * invoked) is finished.  The intent is to avoid extra interrupts while we
8221  * are processing packets anyway.
8222  */
8223 static irqreturn_t receive_context_interrupt(int irq, void *data)
8224 {
8225         struct hfi1_ctxtdata *rcd = data;
8226         struct hfi1_devdata *dd = rcd->dd;
8227         int disposition;
8228         int present;
8229
8230         trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8231         this_cpu_inc(*dd->int_counter);
8232         aspm_ctx_disable(rcd);
8233
8234         /* receive interrupt remains blocked while processing packets */
8235         disposition = rcd->do_interrupt(rcd, 0);
8236
8237         /*
8238          * Too many packets were seen while processing packets in this
8239          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8240          * remains blocked.
8241          */
8242         if (disposition == RCV_PKT_LIMIT)
8243                 return IRQ_WAKE_THREAD;
8244
8245         /*
8246          * The packet processor detected no more packets.  Clear the receive
8247          * interrupt and recheck for a packet packet that may have arrived
8248          * after the previous check and interrupt clear.  If a packet arrived,
8249          * force another interrupt.
8250          */
8251         clear_recv_intr(rcd);
8252         present = check_packet_present(rcd);
8253         if (present)
8254                 force_recv_intr(rcd);
8255
8256         return IRQ_HANDLED;
8257 }
8258
8259 /*
8260  * Receive packet thread handler.  This expects to be invoked with the
8261  * receive interrupt still blocked.
8262  */
8263 static irqreturn_t receive_context_thread(int irq, void *data)
8264 {
8265         struct hfi1_ctxtdata *rcd = data;
8266         int present;
8267
8268         /* receive interrupt is still blocked from the IRQ handler */
8269         (void)rcd->do_interrupt(rcd, 1);
8270
8271         /*
8272          * The packet processor will only return if it detected no more
8273          * packets.  Hold IRQs here so we can safely clear the interrupt and
8274          * recheck for a packet that may have arrived after the previous
8275          * check and the interrupt clear.  If a packet arrived, force another
8276          * interrupt.
8277          */
8278         local_irq_disable();
8279         clear_recv_intr(rcd);
8280         present = check_packet_present(rcd);
8281         if (present)
8282                 force_recv_intr(rcd);
8283         local_irq_enable();
8284
8285         return IRQ_HANDLED;
8286 }
8287
8288 /* ========================================================================= */
8289
8290 u32 read_physical_state(struct hfi1_devdata *dd)
8291 {
8292         u64 reg;
8293
8294         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8295         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8296                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8297 }
8298
8299 u32 read_logical_state(struct hfi1_devdata *dd)
8300 {
8301         u64 reg;
8302
8303         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8304         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8305                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8306 }
8307
8308 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8309 {
8310         u64 reg;
8311
8312         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8313         /* clear current state, set new state */
8314         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8315         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8316         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8317 }
8318
8319 /*
8320  * Use the 8051 to read a LCB CSR.
8321  */
8322 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8323 {
8324         u32 regno;
8325         int ret;
8326
8327         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8328                 if (acquire_lcb_access(dd, 0) == 0) {
8329                         *data = read_csr(dd, addr);
8330                         release_lcb_access(dd, 0);
8331                         return 0;
8332                 }
8333                 return -EBUSY;
8334         }
8335
8336         /* register is an index of LCB registers: (offset - base) / 8 */
8337         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8338         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8339         if (ret != HCMD_SUCCESS)
8340                 return -EBUSY;
8341         return 0;
8342 }
8343
8344 /*
8345  * Read an LCB CSR.  Access may not be in host control, so check.
8346  * Return 0 on success, -EBUSY on failure.
8347  */
8348 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8349 {
8350         struct hfi1_pportdata *ppd = dd->pport;
8351
8352         /* if up, go through the 8051 for the value */
8353         if (ppd->host_link_state & HLS_UP)
8354                 return read_lcb_via_8051(dd, addr, data);
8355         /* if going up or down, no access */
8356         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8357                 return -EBUSY;
8358         /* otherwise, host has access */
8359         *data = read_csr(dd, addr);
8360         return 0;
8361 }
8362
8363 /*
8364  * Use the 8051 to write a LCB CSR.
8365  */
8366 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8367 {
8368         u32 regno;
8369         int ret;
8370
8371         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8372             (dd->dc8051_ver < dc8051_ver(0, 20))) {
8373                 if (acquire_lcb_access(dd, 0) == 0) {
8374                         write_csr(dd, addr, data);
8375                         release_lcb_access(dd, 0);
8376                         return 0;
8377                 }
8378                 return -EBUSY;
8379         }
8380
8381         /* register is an index of LCB registers: (offset - base) / 8 */
8382         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8383         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8384         if (ret != HCMD_SUCCESS)
8385                 return -EBUSY;
8386         return 0;
8387 }
8388
8389 /*
8390  * Write an LCB CSR.  Access may not be in host control, so check.
8391  * Return 0 on success, -EBUSY on failure.
8392  */
8393 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8394 {
8395         struct hfi1_pportdata *ppd = dd->pport;
8396
8397         /* if up, go through the 8051 for the value */
8398         if (ppd->host_link_state & HLS_UP)
8399                 return write_lcb_via_8051(dd, addr, data);
8400         /* if going up or down, no access */
8401         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8402                 return -EBUSY;
8403         /* otherwise, host has access */
8404         write_csr(dd, addr, data);
8405         return 0;
8406 }
8407
8408 /*
8409  * Returns:
8410  *      < 0 = Linux error, not able to get access
8411  *      > 0 = 8051 command RETURN_CODE
8412  */
8413 static int do_8051_command(
8414         struct hfi1_devdata *dd,
8415         u32 type,
8416         u64 in_data,
8417         u64 *out_data)
8418 {
8419         u64 reg, completed;
8420         int return_code;
8421         unsigned long flags;
8422         unsigned long timeout;
8423
8424         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8425
8426         /*
8427          * Alternative to holding the lock for a long time:
8428          * - keep busy wait - have other users bounce off
8429          */
8430         spin_lock_irqsave(&dd->dc8051_lock, flags);
8431
8432         /* We can't send any commands to the 8051 if it's in reset */
8433         if (dd->dc_shutdown) {
8434                 return_code = -ENODEV;
8435                 goto fail;
8436         }
8437
8438         /*
8439          * If an 8051 host command timed out previously, then the 8051 is
8440          * stuck.
8441          *
8442          * On first timeout, attempt to reset and restart the entire DC
8443          * block (including 8051). (Is this too big of a hammer?)
8444          *
8445          * If the 8051 times out a second time, the reset did not bring it
8446          * back to healthy life. In that case, fail any subsequent commands.
8447          */
8448         if (dd->dc8051_timed_out) {
8449                 if (dd->dc8051_timed_out > 1) {
8450                         dd_dev_err(dd,
8451                                    "Previous 8051 host command timed out, skipping command %u\n",
8452                                    type);
8453                         return_code = -ENXIO;
8454                         goto fail;
8455                 }
8456                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8457                 dc_shutdown(dd);
8458                 dc_start(dd);
8459                 spin_lock_irqsave(&dd->dc8051_lock, flags);
8460         }
8461
8462         /*
8463          * If there is no timeout, then the 8051 command interface is
8464          * waiting for a command.
8465          */
8466
8467         /*
8468          * When writing a LCB CSR, out_data contains the full value to
8469          * to be written, while in_data contains the relative LCB
8470          * address in 7:0.  Do the work here, rather than the caller,
8471          * of distrubting the write data to where it needs to go:
8472          *
8473          * Write data
8474          *   39:00 -> in_data[47:8]
8475          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8476          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8477          */
8478         if (type == HCMD_WRITE_LCB_CSR) {
8479                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8480                 reg = ((((*out_data) >> 40) & 0xff) <<
8481                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8482                       | ((((*out_data) >> 48) & 0xffff) <<
8483                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8484                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8485         }
8486
8487         /*
8488          * Do two writes: the first to stabilize the type and req_data, the
8489          * second to activate.
8490          */
8491         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8492                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8493                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8494                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8495         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8496         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8497         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8498
8499         /* wait for completion, alternate: interrupt */
8500         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8501         while (1) {
8502                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8503                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8504                 if (completed)
8505                         break;
8506                 if (time_after(jiffies, timeout)) {
8507                         dd->dc8051_timed_out++;
8508                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8509                         if (out_data)
8510                                 *out_data = 0;
8511                         return_code = -ETIMEDOUT;
8512                         goto fail;
8513                 }
8514                 udelay(2);
8515         }
8516
8517         if (out_data) {
8518                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8519                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8520                 if (type == HCMD_READ_LCB_CSR) {
8521                         /* top 16 bits are in a different register */
8522                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8523                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8524                                 << (48
8525                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8526                 }
8527         }
8528         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8529                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8530         dd->dc8051_timed_out = 0;
8531         /*
8532          * Clear command for next user.
8533          */
8534         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8535
8536 fail:
8537         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8538
8539         return return_code;
8540 }
8541
8542 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8543 {
8544         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8545 }
8546
8547 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8548                      u8 lane_id, u32 config_data)
8549 {
8550         u64 data;
8551         int ret;
8552
8553         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8554                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8555                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8556         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8557         if (ret != HCMD_SUCCESS) {
8558                 dd_dev_err(dd,
8559                            "load 8051 config: field id %d, lane %d, err %d\n",
8560                            (int)field_id, (int)lane_id, ret);
8561         }
8562         return ret;
8563 }
8564
8565 /*
8566  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8567  * set the result, even on error.
8568  * Return 0 on success, -errno on failure
8569  */
8570 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8571                      u32 *result)
8572 {
8573         u64 big_data;
8574         u32 addr;
8575         int ret;
8576
8577         /* address start depends on the lane_id */
8578         if (lane_id < 4)
8579                 addr = (4 * NUM_GENERAL_FIELDS)
8580                         + (lane_id * 4 * NUM_LANE_FIELDS);
8581         else
8582                 addr = 0;
8583         addr += field_id * 4;
8584
8585         /* read is in 8-byte chunks, hardware will truncate the address down */
8586         ret = read_8051_data(dd, addr, 8, &big_data);
8587
8588         if (ret == 0) {
8589                 /* extract the 4 bytes we want */
8590                 if (addr & 0x4)
8591                         *result = (u32)(big_data >> 32);
8592                 else
8593                         *result = (u32)big_data;
8594         } else {
8595                 *result = 0;
8596                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8597                            __func__, lane_id, field_id);
8598         }
8599
8600         return ret;
8601 }
8602
8603 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8604                               u8 continuous)
8605 {
8606         u32 frame;
8607
8608         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8609                 | power_management << POWER_MANAGEMENT_SHIFT;
8610         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8611                                 GENERAL_CONFIG, frame);
8612 }
8613
8614 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8615                                  u16 vl15buf, u8 crc_sizes)
8616 {
8617         u32 frame;
8618
8619         frame = (u32)vau << VAU_SHIFT
8620                 | (u32)z << Z_SHIFT
8621                 | (u32)vcu << VCU_SHIFT
8622                 | (u32)vl15buf << VL15BUF_SHIFT
8623                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8624         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8625                                 GENERAL_CONFIG, frame);
8626 }
8627
8628 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8629                                      u8 *flag_bits, u16 *link_widths)
8630 {
8631         u32 frame;
8632
8633         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8634                          &frame);
8635         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8636         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8637         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8638 }
8639
8640 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8641                                      u8 misc_bits,
8642                                      u8 flag_bits,
8643                                      u16 link_widths)
8644 {
8645         u32 frame;
8646
8647         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8648                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8649                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8650         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8651                      frame);
8652 }
8653
8654 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8655                                  u8 device_rev)
8656 {
8657         u32 frame;
8658
8659         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8660                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8661         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8662 }
8663
8664 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8665                                   u8 *device_rev)
8666 {
8667         u32 frame;
8668
8669         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8670         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8671         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8672                         & REMOTE_DEVICE_REV_MASK;
8673 }
8674
8675 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8676 {
8677         u32 frame;
8678
8679         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8680         *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8681         *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8682 }
8683
8684 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8685                                u8 *continuous)
8686 {
8687         u32 frame;
8688
8689         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8690         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8691                                         & POWER_MANAGEMENT_MASK;
8692         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8693                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8694 }
8695
8696 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8697                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8698 {
8699         u32 frame;
8700
8701         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8702         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8703         *z = (frame >> Z_SHIFT) & Z_MASK;
8704         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8705         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8706         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8707 }
8708
8709 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8710                                       u8 *remote_tx_rate,
8711                                       u16 *link_widths)
8712 {
8713         u32 frame;
8714
8715         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8716                          &frame);
8717         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8718                                 & REMOTE_TX_RATE_MASK;
8719         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8720 }
8721
8722 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8723 {
8724         u32 frame;
8725
8726         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8727         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8728 }
8729
8730 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8731 {
8732         u32 frame;
8733
8734         read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8735         *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8736 }
8737
8738 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8739 {
8740         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8741 }
8742
8743 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8744 {
8745         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8746 }
8747
8748 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8749 {
8750         u32 frame;
8751         int ret;
8752
8753         *link_quality = 0;
8754         if (dd->pport->host_link_state & HLS_UP) {
8755                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8756                                        &frame);
8757                 if (ret == 0)
8758                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
8759                                                 & LINK_QUALITY_MASK;
8760         }
8761 }
8762
8763 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8764 {
8765         u32 frame;
8766
8767         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8768         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8769 }
8770
8771 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8772 {
8773         u32 frame;
8774
8775         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8776         *ldr = (frame & 0xff);
8777 }
8778
8779 static int read_tx_settings(struct hfi1_devdata *dd,
8780                             u8 *enable_lane_tx,
8781                             u8 *tx_polarity_inversion,
8782                             u8 *rx_polarity_inversion,
8783                             u8 *max_rate)
8784 {
8785         u32 frame;
8786         int ret;
8787
8788         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8789         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8790                                 & ENABLE_LANE_TX_MASK;
8791         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8792                                 & TX_POLARITY_INVERSION_MASK;
8793         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8794                                 & RX_POLARITY_INVERSION_MASK;
8795         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8796         return ret;
8797 }
8798
8799 static int write_tx_settings(struct hfi1_devdata *dd,
8800                              u8 enable_lane_tx,
8801                              u8 tx_polarity_inversion,
8802                              u8 rx_polarity_inversion,
8803                              u8 max_rate)
8804 {
8805         u32 frame;
8806
8807         /* no need to mask, all variable sizes match field widths */
8808         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8809                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8810                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8811                 | max_rate << MAX_RATE_SHIFT;
8812         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8813 }
8814
8815 /*
8816  * Read an idle LCB message.
8817  *
8818  * Returns 0 on success, -EINVAL on error
8819  */
8820 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8821 {
8822         int ret;
8823
8824         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8825         if (ret != HCMD_SUCCESS) {
8826                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8827                            (u32)type, ret);
8828                 return -EINVAL;
8829         }
8830         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8831         /* return only the payload as we already know the type */
8832         *data_out >>= IDLE_PAYLOAD_SHIFT;
8833         return 0;
8834 }
8835
8836 /*
8837  * Read an idle SMA message.  To be done in response to a notification from
8838  * the 8051.
8839  *
8840  * Returns 0 on success, -EINVAL on error
8841  */
8842 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8843 {
8844         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8845                                  data);
8846 }
8847
8848 /*
8849  * Send an idle LCB message.
8850  *
8851  * Returns 0 on success, -EINVAL on error
8852  */
8853 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8854 {
8855         int ret;
8856
8857         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8858         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8859         if (ret != HCMD_SUCCESS) {
8860                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8861                            data, ret);
8862                 return -EINVAL;
8863         }
8864         return 0;
8865 }
8866
8867 /*
8868  * Send an idle SMA message.
8869  *
8870  * Returns 0 on success, -EINVAL on error
8871  */
8872 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8873 {
8874         u64 data;
8875
8876         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8877                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8878         return send_idle_message(dd, data);
8879 }
8880
8881 /*
8882  * Initialize the LCB then do a quick link up.  This may or may not be
8883  * in loopback.
8884  *
8885  * return 0 on success, -errno on error
8886  */
8887 static int do_quick_linkup(struct hfi1_devdata *dd)
8888 {
8889         u64 reg;
8890         unsigned long timeout;
8891         int ret;
8892
8893         lcb_shutdown(dd, 0);
8894
8895         if (loopback) {
8896                 /* LCB_CFG_LOOPBACK.VAL = 2 */
8897                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8898                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8899                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8900                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8901         }
8902
8903         /* start the LCBs */
8904         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8905         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8906
8907         /* simulator only loopback steps */
8908         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8909                 /* LCB_CFG_RUN.EN = 1 */
8910                 write_csr(dd, DC_LCB_CFG_RUN,
8911                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8912
8913                 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8914                 timeout = jiffies + msecs_to_jiffies(10);
8915                 while (1) {
8916                         reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8917                         if (reg)
8918                                 break;
8919                         if (time_after(jiffies, timeout)) {
8920                                 dd_dev_err(dd,
8921                                            "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8922                                 return -ETIMEDOUT;
8923                         }
8924                         udelay(2);
8925                 }
8926
8927                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8928                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8929         }
8930
8931         if (!loopback) {
8932                 /*
8933                  * When doing quick linkup and not in loopback, both
8934                  * sides must be done with LCB set-up before either
8935                  * starts the quick linkup.  Put a delay here so that
8936                  * both sides can be started and have a chance to be
8937                  * done with LCB set up before resuming.
8938                  */
8939                 dd_dev_err(dd,
8940                            "Pausing for peer to be finished with LCB set up\n");
8941                 msleep(5000);
8942                 dd_dev_err(dd, "Continuing with quick linkup\n");
8943         }
8944
8945         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8946         set_8051_lcb_access(dd);
8947
8948         /*
8949          * State "quick" LinkUp request sets the physical link state to
8950          * LinkUp without a verify capability sequence.
8951          * This state is in simulator v37 and later.
8952          */
8953         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8954         if (ret != HCMD_SUCCESS) {
8955                 dd_dev_err(dd,
8956                            "%s: set physical link state to quick LinkUp failed with return %d\n",
8957                            __func__, ret);
8958
8959                 set_host_lcb_access(dd);
8960                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8961
8962                 if (ret >= 0)
8963                         ret = -EINVAL;
8964                 return ret;
8965         }
8966
8967         return 0; /* success */
8968 }
8969
8970 /*
8971  * Set the SerDes to internal loopback mode.
8972  * Returns 0 on success, -errno on error.
8973  */
8974 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8975 {
8976         int ret;
8977
8978         ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8979         if (ret == HCMD_SUCCESS)
8980                 return 0;
8981         dd_dev_err(dd,
8982                    "Set physical link state to SerDes Loopback failed with return %d\n",
8983                    ret);
8984         if (ret >= 0)
8985                 ret = -EINVAL;
8986         return ret;
8987 }
8988
8989 /*
8990  * Do all special steps to set up loopback.
8991  */
8992 static int init_loopback(struct hfi1_devdata *dd)
8993 {
8994         dd_dev_info(dd, "Entering loopback mode\n");
8995
8996         /* all loopbacks should disable self GUID check */
8997         write_csr(dd, DC_DC8051_CFG_MODE,
8998                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8999
9000         /*
9001          * The simulator has only one loopback option - LCB.  Switch
9002          * to that option, which includes quick link up.
9003          *
9004          * Accept all valid loopback values.
9005          */
9006         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9007             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9008              loopback == LOOPBACK_CABLE)) {
9009                 loopback = LOOPBACK_LCB;
9010                 quick_linkup = 1;
9011                 return 0;
9012         }
9013
9014         /* handle serdes loopback */
9015         if (loopback == LOOPBACK_SERDES) {
9016                 /* internal serdes loopack needs quick linkup on RTL */
9017                 if (dd->icode == ICODE_RTL_SILICON)
9018                         quick_linkup = 1;
9019                 return set_serdes_loopback_mode(dd);
9020         }
9021
9022         /* LCB loopback - handled at poll time */
9023         if (loopback == LOOPBACK_LCB) {
9024                 quick_linkup = 1; /* LCB is always quick linkup */
9025
9026                 /* not supported in emulation due to emulation RTL changes */
9027                 if (dd->icode == ICODE_FPGA_EMULATION) {
9028                         dd_dev_err(dd,
9029                                    "LCB loopback not supported in emulation\n");
9030                         return -EINVAL;
9031                 }
9032                 return 0;
9033         }
9034
9035         /* external cable loopback requires no extra steps */
9036         if (loopback == LOOPBACK_CABLE)
9037                 return 0;
9038
9039         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9040         return -EINVAL;
9041 }
9042
9043 /*
9044  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9045  * used in the Verify Capability link width attribute.
9046  */
9047 static u16 opa_to_vc_link_widths(u16 opa_widths)
9048 {
9049         int i;
9050         u16 result = 0;
9051
9052         static const struct link_bits {
9053                 u16 from;
9054                 u16 to;
9055         } opa_link_xlate[] = {
9056                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9057                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9058                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9059                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9060         };
9061
9062         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9063                 if (opa_widths & opa_link_xlate[i].from)
9064                         result |= opa_link_xlate[i].to;
9065         }
9066         return result;
9067 }
9068
9069 /*
9070  * Set link attributes before moving to polling.
9071  */
9072 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9073 {
9074         struct hfi1_devdata *dd = ppd->dd;
9075         u8 enable_lane_tx;
9076         u8 tx_polarity_inversion;
9077         u8 rx_polarity_inversion;
9078         int ret;
9079
9080         /* reset our fabric serdes to clear any lingering problems */
9081         fabric_serdes_reset(dd);
9082
9083         /* set the local tx rate - need to read-modify-write */
9084         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9085                                &rx_polarity_inversion, &ppd->local_tx_rate);
9086         if (ret)
9087                 goto set_local_link_attributes_fail;
9088
9089         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9090                 /* set the tx rate to the fastest enabled */
9091                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9092                         ppd->local_tx_rate = 1;
9093                 else
9094                         ppd->local_tx_rate = 0;
9095         } else {
9096                 /* set the tx rate to all enabled */
9097                 ppd->local_tx_rate = 0;
9098                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9099                         ppd->local_tx_rate |= 2;
9100                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9101                         ppd->local_tx_rate |= 1;
9102         }
9103
9104         enable_lane_tx = 0xF; /* enable all four lanes */
9105         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9106                                 rx_polarity_inversion, ppd->local_tx_rate);
9107         if (ret != HCMD_SUCCESS)
9108                 goto set_local_link_attributes_fail;
9109
9110         /*
9111          * DC supports continuous updates.
9112          */
9113         ret = write_vc_local_phy(dd,
9114                                  0 /* no power management */,
9115                                  1 /* continuous updates */);
9116         if (ret != HCMD_SUCCESS)
9117                 goto set_local_link_attributes_fail;
9118
9119         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9120         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9121                                     ppd->port_crc_mode_enabled);
9122         if (ret != HCMD_SUCCESS)
9123                 goto set_local_link_attributes_fail;
9124
9125         ret = write_vc_local_link_width(dd, 0, 0,
9126                                         opa_to_vc_link_widths(
9127                                                 ppd->link_width_enabled));
9128         if (ret != HCMD_SUCCESS)
9129                 goto set_local_link_attributes_fail;
9130
9131         /* let peer know who we are */
9132         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9133         if (ret == HCMD_SUCCESS)
9134                 return 0;
9135
9136 set_local_link_attributes_fail:
9137         dd_dev_err(dd,
9138                    "Failed to set local link attributes, return 0x%x\n",
9139                    ret);
9140         return ret;
9141 }
9142
9143 /*
9144  * Call this to start the link.
9145  * Do not do anything if the link is disabled.
9146  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9147  */
9148 int start_link(struct hfi1_pportdata *ppd)
9149 {
9150         /*
9151          * Tune the SerDes to a ballpark setting for optimal signal and bit
9152          * error rate.  Needs to be done before starting the link.
9153          */
9154         tune_serdes(ppd);
9155
9156         if (!ppd->link_enabled) {
9157                 dd_dev_info(ppd->dd,
9158                             "%s: stopping link start because link is disabled\n",
9159                             __func__);
9160                 return 0;
9161         }
9162         if (!ppd->driver_link_ready) {
9163                 dd_dev_info(ppd->dd,
9164                             "%s: stopping link start because driver is not ready\n",
9165                             __func__);
9166                 return 0;
9167         }
9168
9169         /*
9170          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9171          * pkey table can be configured properly if the HFI unit is connected
9172          * to switch port with MgmtAllowed=NO
9173          */
9174         clear_full_mgmt_pkey(ppd);
9175
9176         return set_link_state(ppd, HLS_DN_POLL);
9177 }
9178
9179 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9180 {
9181         struct hfi1_devdata *dd = ppd->dd;
9182         u64 mask;
9183         unsigned long timeout;
9184
9185         /*
9186          * Some QSFP cables have a quirk that asserts the IntN line as a side
9187          * effect of power up on plug-in. We ignore this false positive
9188          * interrupt until the module has finished powering up by waiting for
9189          * a minimum timeout of the module inrush initialization time of
9190          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9191          * module have stabilized.
9192          */
9193         msleep(500);
9194
9195         /*
9196          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9197          */
9198         timeout = jiffies + msecs_to_jiffies(2000);
9199         while (1) {
9200                 mask = read_csr(dd, dd->hfi1_id ?
9201                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9202                 if (!(mask & QSFP_HFI0_INT_N))
9203                         break;
9204                 if (time_after(jiffies, timeout)) {
9205                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9206                                     __func__);
9207                         break;
9208                 }
9209                 udelay(2);
9210         }
9211 }
9212
9213 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9214 {
9215         struct hfi1_devdata *dd = ppd->dd;
9216         u64 mask;
9217
9218         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9219         if (enable) {
9220                 /*
9221                  * Clear the status register to avoid an immediate interrupt
9222                  * when we re-enable the IntN pin
9223                  */
9224                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9225                           QSFP_HFI0_INT_N);
9226                 mask |= (u64)QSFP_HFI0_INT_N;
9227         } else {
9228                 mask &= ~(u64)QSFP_HFI0_INT_N;
9229         }
9230         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9231 }
9232
9233 void reset_qsfp(struct hfi1_pportdata *ppd)
9234 {
9235         struct hfi1_devdata *dd = ppd->dd;
9236         u64 mask, qsfp_mask;
9237
9238         /* Disable INT_N from triggering QSFP interrupts */
9239         set_qsfp_int_n(ppd, 0);
9240
9241         /* Reset the QSFP */
9242         mask = (u64)QSFP_HFI0_RESET_N;
9243
9244         qsfp_mask = read_csr(dd,
9245                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9246         qsfp_mask &= ~mask;
9247         write_csr(dd,
9248                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9249
9250         udelay(10);
9251
9252         qsfp_mask |= mask;
9253         write_csr(dd,
9254                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9255
9256         wait_for_qsfp_init(ppd);
9257
9258         /*
9259          * Allow INT_N to trigger the QSFP interrupt to watch
9260          * for alarms and warnings
9261          */
9262         set_qsfp_int_n(ppd, 1);
9263 }
9264
9265 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9266                                         u8 *qsfp_interrupt_status)
9267 {
9268         struct hfi1_devdata *dd = ppd->dd;
9269
9270         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9271             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9272                 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9273                             __func__);
9274
9275         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9276             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9277                 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9278                             __func__);
9279
9280         /*
9281          * The remaining alarms/warnings don't matter if the link is down.
9282          */
9283         if (ppd->host_link_state & HLS_DOWN)
9284                 return 0;
9285
9286         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9287             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9288                 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9289                             __func__);
9290
9291         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9292             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9293                 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9294                             __func__);
9295
9296         /* Byte 2 is vendor specific */
9297
9298         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9299             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9300                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9301                             __func__);
9302
9303         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9304             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9305                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9306                             __func__);
9307
9308         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9309             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9310                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9311                             __func__);
9312
9313         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9314             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9315                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9316                             __func__);
9317
9318         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9319             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9320                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9321                             __func__);
9322
9323         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9324             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9325                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9326                             __func__);
9327
9328         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9329             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9330                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9331                             __func__);
9332
9333         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9334             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9335                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9336                             __func__);
9337
9338         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9339             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9340                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9341                             __func__);
9342
9343         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9344             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9345                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9346                             __func__);
9347
9348         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9349             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9350                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9351                             __func__);
9352
9353         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9354             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9355                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9356                             __func__);
9357
9358         /* Bytes 9-10 and 11-12 are reserved */
9359         /* Bytes 13-15 are vendor specific */
9360
9361         return 0;
9362 }
9363
9364 /* This routine will only be scheduled if the QSFP module present is asserted */
9365 void qsfp_event(struct work_struct *work)
9366 {
9367         struct qsfp_data *qd;
9368         struct hfi1_pportdata *ppd;
9369         struct hfi1_devdata *dd;
9370
9371         qd = container_of(work, struct qsfp_data, qsfp_work);
9372         ppd = qd->ppd;
9373         dd = ppd->dd;
9374
9375         /* Sanity check */
9376         if (!qsfp_mod_present(ppd))
9377                 return;
9378
9379         /*
9380          * Turn DC back on after cable has been re-inserted. Up until
9381          * now, the DC has been in reset to save power.
9382          */
9383         dc_start(dd);
9384
9385         if (qd->cache_refresh_required) {
9386                 set_qsfp_int_n(ppd, 0);
9387
9388                 wait_for_qsfp_init(ppd);
9389
9390                 /*
9391                  * Allow INT_N to trigger the QSFP interrupt to watch
9392                  * for alarms and warnings
9393                  */
9394                 set_qsfp_int_n(ppd, 1);
9395
9396                 start_link(ppd);
9397         }
9398
9399         if (qd->check_interrupt_flags) {
9400                 u8 qsfp_interrupt_status[16] = {0,};
9401
9402                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9403                                   &qsfp_interrupt_status[0], 16) != 16) {
9404                         dd_dev_info(dd,
9405                                     "%s: Failed to read status of QSFP module\n",
9406                                     __func__);
9407                 } else {
9408                         unsigned long flags;
9409
9410                         handle_qsfp_error_conditions(
9411                                         ppd, qsfp_interrupt_status);
9412                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9413                         ppd->qsfp_info.check_interrupt_flags = 0;
9414                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9415                                                flags);
9416                 }
9417         }
9418 }
9419
9420 static void init_qsfp_int(struct hfi1_devdata *dd)
9421 {
9422         struct hfi1_pportdata *ppd = dd->pport;
9423         u64 qsfp_mask, cce_int_mask;
9424         const int qsfp1_int_smask = QSFP1_INT % 64;
9425         const int qsfp2_int_smask = QSFP2_INT % 64;
9426
9427         /*
9428          * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9429          * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9430          * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9431          * the index of the appropriate CSR in the CCEIntMask CSR array
9432          */
9433         cce_int_mask = read_csr(dd, CCE_INT_MASK +
9434                                 (8 * (QSFP1_INT / 64)));
9435         if (dd->hfi1_id) {
9436                 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9437                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9438                           cce_int_mask);
9439         } else {
9440                 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9441                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9442                           cce_int_mask);
9443         }
9444
9445         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9446         /* Clear current status to avoid spurious interrupts */
9447         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9448                   qsfp_mask);
9449         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9450                   qsfp_mask);
9451
9452         set_qsfp_int_n(ppd, 0);
9453
9454         /* Handle active low nature of INT_N and MODPRST_N pins */
9455         if (qsfp_mod_present(ppd))
9456                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9457         write_csr(dd,
9458                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9459                   qsfp_mask);
9460 }
9461
9462 /*
9463  * Do a one-time initialize of the LCB block.
9464  */
9465 static void init_lcb(struct hfi1_devdata *dd)
9466 {
9467         /* simulator does not correctly handle LCB cclk loopback, skip */
9468         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9469                 return;
9470
9471         /* the DC has been reset earlier in the driver load */
9472
9473         /* set LCB for cclk loopback on the port */
9474         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9475         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9476         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9477         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9478         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9479         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9480         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9481 }
9482
9483 /*
9484  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9485  * on error.
9486  */
9487 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9488 {
9489         int ret;
9490         u8 status;
9491
9492         /* report success if not a QSFP */
9493         if (ppd->port_type != PORT_TYPE_QSFP)
9494                 return 0;
9495
9496         /* read byte 2, the status byte */
9497         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9498         if (ret < 0)
9499                 return ret;
9500         if (ret != 1)
9501                 return -EIO;
9502
9503         return 0; /* success */
9504 }
9505
9506 /*
9507  * Values for QSFP retry.
9508  *
9509  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9510  * arrived at from experience on a large cluster.
9511  */
9512 #define MAX_QSFP_RETRIES 20
9513 #define QSFP_RETRY_WAIT 500 /* msec */
9514
9515 /*
9516  * Try a QSFP read.  If it fails, schedule a retry for later.
9517  * Called on first link activation after driver load.
9518  */
9519 static void try_start_link(struct hfi1_pportdata *ppd)
9520 {
9521         if (test_qsfp_read(ppd)) {
9522                 /* read failed */
9523                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9524                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9525                         return;
9526                 }
9527                 dd_dev_info(ppd->dd,
9528                             "QSFP not responding, waiting and retrying %d\n",
9529                             (int)ppd->qsfp_retry_count);
9530                 ppd->qsfp_retry_count++;
9531                 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9532                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9533                 return;
9534         }
9535         ppd->qsfp_retry_count = 0;
9536
9537         start_link(ppd);
9538 }
9539
9540 /*
9541  * Workqueue function to start the link after a delay.
9542  */
9543 void handle_start_link(struct work_struct *work)
9544 {
9545         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9546                                                   start_link_work.work);
9547         try_start_link(ppd);
9548 }
9549
9550 int bringup_serdes(struct hfi1_pportdata *ppd)
9551 {
9552         struct hfi1_devdata *dd = ppd->dd;
9553         u64 guid;
9554         int ret;
9555
9556         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9557                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9558
9559         guid = ppd->guid;
9560         if (!guid) {
9561                 if (dd->base_guid)
9562                         guid = dd->base_guid + ppd->port - 1;
9563                 ppd->guid = guid;
9564         }
9565
9566         /* Set linkinit_reason on power up per OPA spec */
9567         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9568
9569         /* one-time init of the LCB */
9570         init_lcb(dd);
9571
9572         if (loopback) {
9573                 ret = init_loopback(dd);
9574                 if (ret < 0)
9575                         return ret;
9576         }
9577
9578         get_port_type(ppd);
9579         if (ppd->port_type == PORT_TYPE_QSFP) {
9580                 set_qsfp_int_n(ppd, 0);
9581                 wait_for_qsfp_init(ppd);
9582                 set_qsfp_int_n(ppd, 1);
9583         }
9584
9585         try_start_link(ppd);
9586         return 0;
9587 }
9588
9589 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9590 {
9591         struct hfi1_devdata *dd = ppd->dd;
9592
9593         /*
9594          * Shut down the link and keep it down.   First turn off that the
9595          * driver wants to allow the link to be up (driver_link_ready).
9596          * Then make sure the link is not automatically restarted
9597          * (link_enabled).  Cancel any pending restart.  And finally
9598          * go offline.
9599          */
9600         ppd->driver_link_ready = 0;
9601         ppd->link_enabled = 0;
9602
9603         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9604         flush_delayed_work(&ppd->start_link_work);
9605         cancel_delayed_work_sync(&ppd->start_link_work);
9606
9607         ppd->offline_disabled_reason =
9608                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9609         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9610                              OPA_LINKDOWN_REASON_SMA_DISABLED);
9611         set_link_state(ppd, HLS_DN_OFFLINE);
9612
9613         /* disable the port */
9614         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9615 }
9616
9617 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9618 {
9619         struct hfi1_pportdata *ppd;
9620         int i;
9621
9622         ppd = (struct hfi1_pportdata *)(dd + 1);
9623         for (i = 0; i < dd->num_pports; i++, ppd++) {
9624                 ppd->ibport_data.rvp.rc_acks = NULL;
9625                 ppd->ibport_data.rvp.rc_qacks = NULL;
9626                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9627                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9628                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9629                 if (!ppd->ibport_data.rvp.rc_acks ||
9630                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9631                     !ppd->ibport_data.rvp.rc_qacks)
9632                         return -ENOMEM;
9633         }
9634
9635         return 0;
9636 }
9637
9638 static const char * const pt_names[] = {
9639         "expected",
9640         "eager",
9641         "invalid"
9642 };
9643
9644 static const char *pt_name(u32 type)
9645 {
9646         return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9647 }
9648
9649 /*
9650  * index is the index into the receive array
9651  */
9652 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9653                   u32 type, unsigned long pa, u16 order)
9654 {
9655         u64 reg;
9656         void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9657                               (dd->kregbase + RCV_ARRAY));
9658
9659         if (!(dd->flags & HFI1_PRESENT))
9660                 goto done;
9661
9662         if (type == PT_INVALID) {
9663                 pa = 0;
9664         } else if (type > PT_INVALID) {
9665                 dd_dev_err(dd,
9666                            "unexpected receive array type %u for index %u, not handled\n",
9667                            type, index);
9668                 goto done;
9669         }
9670
9671         hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9672                   pt_name(type), index, pa, (unsigned long)order);
9673
9674 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9675         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9676                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9677                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9678                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9679         writeq(reg, base + (index * 8));
9680
9681         if (type == PT_EAGER)
9682                 /*
9683                  * Eager entries are written one-by-one so we have to push them
9684                  * after we write the entry.
9685                  */
9686                 flush_wc();
9687 done:
9688         return;
9689 }
9690
9691 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9692 {
9693         struct hfi1_devdata *dd = rcd->dd;
9694         u32 i;
9695
9696         /* this could be optimized */
9697         for (i = rcd->eager_base; i < rcd->eager_base +
9698                      rcd->egrbufs.alloced; i++)
9699                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9700
9701         for (i = rcd->expected_base;
9702                         i < rcd->expected_base + rcd->expected_count; i++)
9703                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9704 }
9705
9706 struct ib_header *hfi1_get_msgheader(
9707         struct hfi1_devdata *dd, __le32 *rhf_addr)
9708 {
9709         u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9710
9711         return (struct ib_header *)
9712                 (rhf_addr - dd->rhf_offset + offset);
9713 }
9714
9715 static const char * const ib_cfg_name_strings[] = {
9716         "HFI1_IB_CFG_LIDLMC",
9717         "HFI1_IB_CFG_LWID_DG_ENB",
9718         "HFI1_IB_CFG_LWID_ENB",
9719         "HFI1_IB_CFG_LWID",
9720         "HFI1_IB_CFG_SPD_ENB",
9721         "HFI1_IB_CFG_SPD",
9722         "HFI1_IB_CFG_RXPOL_ENB",
9723         "HFI1_IB_CFG_LREV_ENB",
9724         "HFI1_IB_CFG_LINKLATENCY",
9725         "HFI1_IB_CFG_HRTBT",
9726         "HFI1_IB_CFG_OP_VLS",
9727         "HFI1_IB_CFG_VL_HIGH_CAP",
9728         "HFI1_IB_CFG_VL_LOW_CAP",
9729         "HFI1_IB_CFG_OVERRUN_THRESH",
9730         "HFI1_IB_CFG_PHYERR_THRESH",
9731         "HFI1_IB_CFG_LINKDEFAULT",
9732         "HFI1_IB_CFG_PKEYS",
9733         "HFI1_IB_CFG_MTU",
9734         "HFI1_IB_CFG_LSTATE",
9735         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9736         "HFI1_IB_CFG_PMA_TICKS",
9737         "HFI1_IB_CFG_PORT"
9738 };
9739
9740 static const char *ib_cfg_name(int which)
9741 {
9742         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9743                 return "invalid";
9744         return ib_cfg_name_strings[which];
9745 }
9746
9747 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9748 {
9749         struct hfi1_devdata *dd = ppd->dd;
9750         int val = 0;
9751
9752         switch (which) {
9753         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9754                 val = ppd->link_width_enabled;
9755                 break;
9756         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9757                 val = ppd->link_width_active;
9758                 break;
9759         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9760                 val = ppd->link_speed_enabled;
9761                 break;
9762         case HFI1_IB_CFG_SPD: /* current Link speed */
9763                 val = ppd->link_speed_active;
9764                 break;
9765
9766         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9767         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9768         case HFI1_IB_CFG_LINKLATENCY:
9769                 goto unimplemented;
9770
9771         case HFI1_IB_CFG_OP_VLS:
9772                 val = ppd->vls_operational;
9773                 break;
9774         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9775                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9776                 break;
9777         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9778                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9779                 break;
9780         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9781                 val = ppd->overrun_threshold;
9782                 break;
9783         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9784                 val = ppd->phy_error_threshold;
9785                 break;
9786         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9787                 val = dd->link_default;
9788                 break;
9789
9790         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9791         case HFI1_IB_CFG_PMA_TICKS:
9792         default:
9793 unimplemented:
9794                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9795                         dd_dev_info(
9796                                 dd,
9797                                 "%s: which %s: not implemented\n",
9798                                 __func__,
9799                                 ib_cfg_name(which));
9800                 break;
9801         }
9802
9803         return val;
9804 }
9805
9806 /*
9807  * The largest MAD packet size.
9808  */
9809 #define MAX_MAD_PACKET 2048
9810
9811 /*
9812  * Return the maximum header bytes that can go on the _wire_
9813  * for this device. This count includes the ICRC which is
9814  * not part of the packet held in memory but it is appended
9815  * by the HW.
9816  * This is dependent on the device's receive header entry size.
9817  * HFI allows this to be set per-receive context, but the
9818  * driver presently enforces a global value.
9819  */
9820 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9821 {
9822         /*
9823          * The maximum non-payload (MTU) bytes in LRH.PktLen are
9824          * the Receive Header Entry Size minus the PBC (or RHF) size
9825          * plus one DW for the ICRC appended by HW.
9826          *
9827          * dd->rcd[0].rcvhdrqentsize is in DW.
9828          * We use rcd[0] as all context will have the same value. Also,
9829          * the first kernel context would have been allocated by now so
9830          * we are guaranteed a valid value.
9831          */
9832         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9833 }
9834
9835 /*
9836  * Set Send Length
9837  * @ppd - per port data
9838  *
9839  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9840  * registers compare against LRH.PktLen, so use the max bytes included
9841  * in the LRH.
9842  *
9843  * This routine changes all VL values except VL15, which it maintains at
9844  * the same value.
9845  */
9846 static void set_send_length(struct hfi1_pportdata *ppd)
9847 {
9848         struct hfi1_devdata *dd = ppd->dd;
9849         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9850         u32 maxvlmtu = dd->vld[15].mtu;
9851         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9852                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9853                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9854         int i, j;
9855         u32 thres;
9856
9857         for (i = 0; i < ppd->vls_supported; i++) {
9858                 if (dd->vld[i].mtu > maxvlmtu)
9859                         maxvlmtu = dd->vld[i].mtu;
9860                 if (i <= 3)
9861                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9862                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9863                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9864                 else
9865                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9866                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9867                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9868         }
9869         write_csr(dd, SEND_LEN_CHECK0, len1);
9870         write_csr(dd, SEND_LEN_CHECK1, len2);
9871         /* adjust kernel credit return thresholds based on new MTUs */
9872         /* all kernel receive contexts have the same hdrqentsize */
9873         for (i = 0; i < ppd->vls_supported; i++) {
9874                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9875                             sc_mtu_to_threshold(dd->vld[i].sc,
9876                                                 dd->vld[i].mtu,
9877                                                 dd->rcd[0]->rcvhdrqentsize));
9878                 for (j = 0; j < INIT_SC_PER_VL; j++)
9879                         sc_set_cr_threshold(
9880                                         pio_select_send_context_vl(dd, j, i),
9881                                             thres);
9882         }
9883         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9884                     sc_mtu_to_threshold(dd->vld[15].sc,
9885                                         dd->vld[15].mtu,
9886                                         dd->rcd[0]->rcvhdrqentsize));
9887         sc_set_cr_threshold(dd->vld[15].sc, thres);
9888
9889         /* Adjust maximum MTU for the port in DC */
9890         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9891                 (ilog2(maxvlmtu >> 8) + 1);
9892         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9893         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9894         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9895                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9896         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9897 }
9898
9899 static void set_lidlmc(struct hfi1_pportdata *ppd)
9900 {
9901         int i;
9902         u64 sreg = 0;
9903         struct hfi1_devdata *dd = ppd->dd;
9904         u32 mask = ~((1U << ppd->lmc) - 1);
9905         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9906
9907         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9908                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9909         c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9910                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9911               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9912                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9913         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9914
9915         /*
9916          * Iterate over all the send contexts and set their SLID check
9917          */
9918         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9919                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9920                (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9921                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9922
9923         for (i = 0; i < dd->chip_send_contexts; i++) {
9924                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9925                           i, (u32)sreg);
9926                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9927         }
9928
9929         /* Now we have to do the same thing for the sdma engines */
9930         sdma_update_lmc(dd, mask, ppd->lid);
9931 }
9932
9933 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9934 {
9935         unsigned long timeout;
9936         u32 curr_state;
9937
9938         timeout = jiffies + msecs_to_jiffies(msecs);
9939         while (1) {
9940                 curr_state = read_physical_state(dd);
9941                 if (curr_state == state)
9942                         break;
9943                 if (time_after(jiffies, timeout)) {
9944                         dd_dev_err(dd,
9945                                    "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9946                                    state, curr_state);
9947                         return -ETIMEDOUT;
9948                 }
9949                 usleep_range(1950, 2050); /* sleep 2ms-ish */
9950         }
9951
9952         return 0;
9953 }
9954
9955 static const char *state_completed_string(u32 completed)
9956 {
9957         static const char * const state_completed[] = {
9958                 "EstablishComm",
9959                 "OptimizeEQ",
9960                 "VerifyCap"
9961         };
9962
9963         if (completed < ARRAY_SIZE(state_completed))
9964                 return state_completed[completed];
9965
9966         return "unknown";
9967 }
9968
9969 static const char all_lanes_dead_timeout_expired[] =
9970         "All lanes were inactive â€“ was the interconnect media removed?";
9971 static const char tx_out_of_policy[] =
9972         "Passing lanes on local port do not meet the local link width policy";
9973 static const char no_state_complete[] =
9974         "State timeout occurred before link partner completed the state";
9975 static const char * const state_complete_reasons[] = {
9976         [0x00] = "Reason unknown",
9977         [0x01] = "Link was halted by driver, refer to LinkDownReason",
9978         [0x02] = "Link partner reported failure",
9979         [0x10] = "Unable to achieve frame sync on any lane",
9980         [0x11] =
9981           "Unable to find a common bit rate with the link partner",
9982         [0x12] =
9983           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9984         [0x13] =
9985           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
9986         [0x14] = no_state_complete,
9987         [0x15] =
9988           "State timeout occurred before link partner identified equalization presets",
9989         [0x16] =
9990           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
9991         [0x17] = tx_out_of_policy,
9992         [0x20] = all_lanes_dead_timeout_expired,
9993         [0x21] =
9994           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
9995         [0x22] = no_state_complete,
9996         [0x23] =
9997           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
9998         [0x24] = tx_out_of_policy,
9999         [0x30] = all_lanes_dead_timeout_expired,
10000         [0x31] =
10001           "State timeout occurred waiting for host to process received frames",
10002         [0x32] = no_state_complete,
10003         [0x33] =
10004           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10005         [0x34] = tx_out_of_policy,
10006 };
10007
10008 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10009                                                      u32 code)
10010 {
10011         const char *str = NULL;
10012
10013         if (code < ARRAY_SIZE(state_complete_reasons))
10014                 str = state_complete_reasons[code];
10015
10016         if (str)
10017                 return str;
10018         return "Reserved";
10019 }
10020
10021 /* describe the given last state complete frame */
10022 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10023                                   const char *prefix)
10024 {
10025         struct hfi1_devdata *dd = ppd->dd;
10026         u32 success;
10027         u32 state;
10028         u32 reason;
10029         u32 lanes;
10030
10031         /*
10032          * Decode frame:
10033          *  [ 0: 0] - success
10034          *  [ 3: 1] - state
10035          *  [ 7: 4] - next state timeout
10036          *  [15: 8] - reason code
10037          *  [31:16] - lanes
10038          */
10039         success = frame & 0x1;
10040         state = (frame >> 1) & 0x7;
10041         reason = (frame >> 8) & 0xff;
10042         lanes = (frame >> 16) & 0xffff;
10043
10044         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10045                    prefix, frame);
10046         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10047                    state_completed_string(state), state);
10048         dd_dev_err(dd, "    state successfully completed: %s\n",
10049                    success ? "yes" : "no");
10050         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10051                    reason, state_complete_reason_code_string(ppd, reason));
10052         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10053 }
10054
10055 /*
10056  * Read the last state complete frames and explain them.  This routine
10057  * expects to be called if the link went down during link negotiation
10058  * and initialization (LNI).  That is, anywhere between polling and link up.
10059  */
10060 static void check_lni_states(struct hfi1_pportdata *ppd)
10061 {
10062         u32 last_local_state;
10063         u32 last_remote_state;
10064
10065         read_last_local_state(ppd->dd, &last_local_state);
10066         read_last_remote_state(ppd->dd, &last_remote_state);
10067
10068         /*
10069          * Don't report anything if there is nothing to report.  A value of
10070          * 0 means the link was taken down while polling and there was no
10071          * training in-process.
10072          */
10073         if (last_local_state == 0 && last_remote_state == 0)
10074                 return;
10075
10076         decode_state_complete(ppd, last_local_state, "transmitted");
10077         decode_state_complete(ppd, last_remote_state, "received");
10078 }
10079
10080 /*
10081  * Helper for set_link_state().  Do not call except from that routine.
10082  * Expects ppd->hls_mutex to be held.
10083  *
10084  * @rem_reason value to be sent to the neighbor
10085  *
10086  * LinkDownReasons only set if transition succeeds.
10087  */
10088 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10089 {
10090         struct hfi1_devdata *dd = ppd->dd;
10091         u32 pstate, previous_state;
10092         int ret;
10093         int do_transition;
10094         int do_wait;
10095
10096         previous_state = ppd->host_link_state;
10097         ppd->host_link_state = HLS_GOING_OFFLINE;
10098         pstate = read_physical_state(dd);
10099         if (pstate == PLS_OFFLINE) {
10100                 do_transition = 0;      /* in right state */
10101                 do_wait = 0;            /* ...no need to wait */
10102         } else if ((pstate & 0xff) == PLS_OFFLINE) {
10103                 do_transition = 0;      /* in an offline transient state */
10104                 do_wait = 1;            /* ...wait for it to settle */
10105         } else {
10106                 do_transition = 1;      /* need to move to offline */
10107                 do_wait = 1;            /* ...will need to wait */
10108         }
10109
10110         if (do_transition) {
10111                 ret = set_physical_link_state(dd,
10112                                               (rem_reason << 8) | PLS_OFFLINE);
10113
10114                 if (ret != HCMD_SUCCESS) {
10115                         dd_dev_err(dd,
10116                                    "Failed to transition to Offline link state, return %d\n",
10117                                    ret);
10118                         return -EINVAL;
10119                 }
10120                 if (ppd->offline_disabled_reason ==
10121                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10122                         ppd->offline_disabled_reason =
10123                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10124         }
10125
10126         if (do_wait) {
10127                 /* it can take a while for the link to go down */
10128                 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
10129                 if (ret < 0)
10130                         return ret;
10131         }
10132
10133         /* make sure the logical state is also down */
10134         wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10135
10136         /*
10137          * Now in charge of LCB - must be after the physical state is
10138          * offline.quiet and before host_link_state is changed.
10139          */
10140         set_host_lcb_access(dd);
10141         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10142         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10143
10144         if (ppd->port_type == PORT_TYPE_QSFP &&
10145             ppd->qsfp_info.limiting_active &&
10146             qsfp_mod_present(ppd)) {
10147                 int ret;
10148
10149                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10150                 if (ret == 0) {
10151                         set_qsfp_tx(ppd, 0);
10152                         release_chip_resource(dd, qsfp_resource(dd));
10153                 } else {
10154                         /* not fatal, but should warn */
10155                         dd_dev_err(dd,
10156                                    "Unable to acquire lock to turn off QSFP TX\n");
10157                 }
10158         }
10159
10160         /*
10161          * The LNI has a mandatory wait time after the physical state
10162          * moves to Offline.Quiet.  The wait time may be different
10163          * depending on how the link went down.  The 8051 firmware
10164          * will observe the needed wait time and only move to ready
10165          * when that is completed.  The largest of the quiet timeouts
10166          * is 6s, so wait that long and then at least 0.5s more for
10167          * other transitions, and another 0.5s for a buffer.
10168          */
10169         ret = wait_fm_ready(dd, 7000);
10170         if (ret) {
10171                 dd_dev_err(dd,
10172                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10173                 /* state is really offline, so make it so */
10174                 ppd->host_link_state = HLS_DN_OFFLINE;
10175                 return ret;
10176         }
10177
10178         /*
10179          * The state is now offline and the 8051 is ready to accept host
10180          * requests.
10181          *      - change our state
10182          *      - notify others if we were previously in a linkup state
10183          */
10184         ppd->host_link_state = HLS_DN_OFFLINE;
10185         if (previous_state & HLS_UP) {
10186                 /* went down while link was up */
10187                 handle_linkup_change(dd, 0);
10188         } else if (previous_state
10189                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10190                 /* went down while attempting link up */
10191                 check_lni_states(ppd);
10192         }
10193
10194         /* the active link width (downgrade) is 0 on link down */
10195         ppd->link_width_active = 0;
10196         ppd->link_width_downgrade_tx_active = 0;
10197         ppd->link_width_downgrade_rx_active = 0;
10198         ppd->current_egress_rate = 0;
10199         return 0;
10200 }
10201
10202 /* return the link state name */
10203 static const char *link_state_name(u32 state)
10204 {
10205         const char *name;
10206         int n = ilog2(state);
10207         static const char * const names[] = {
10208                 [__HLS_UP_INIT_BP]       = "INIT",
10209                 [__HLS_UP_ARMED_BP]      = "ARMED",
10210                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10211                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10212                 [__HLS_DN_POLL_BP]       = "POLL",
10213                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10214                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10215                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10216                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10217                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10218                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10219         };
10220
10221         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10222         return name ? name : "unknown";
10223 }
10224
10225 /* return the link state reason name */
10226 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10227 {
10228         if (state == HLS_UP_INIT) {
10229                 switch (ppd->linkinit_reason) {
10230                 case OPA_LINKINIT_REASON_LINKUP:
10231                         return "(LINKUP)";
10232                 case OPA_LINKINIT_REASON_FLAPPING:
10233                         return "(FLAPPING)";
10234                 case OPA_LINKINIT_OUTSIDE_POLICY:
10235                         return "(OUTSIDE_POLICY)";
10236                 case OPA_LINKINIT_QUARANTINED:
10237                         return "(QUARANTINED)";
10238                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10239                         return "(INSUFIC_CAPABILITY)";
10240                 default:
10241                         break;
10242                 }
10243         }
10244         return "";
10245 }
10246
10247 /*
10248  * driver_physical_state - convert the driver's notion of a port's
10249  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10250  * Return -1 (converted to a u32) to indicate error.
10251  */
10252 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10253 {
10254         switch (ppd->host_link_state) {
10255         case HLS_UP_INIT:
10256         case HLS_UP_ARMED:
10257         case HLS_UP_ACTIVE:
10258                 return IB_PORTPHYSSTATE_LINKUP;
10259         case HLS_DN_POLL:
10260                 return IB_PORTPHYSSTATE_POLLING;
10261         case HLS_DN_DISABLE:
10262                 return IB_PORTPHYSSTATE_DISABLED;
10263         case HLS_DN_OFFLINE:
10264                 return OPA_PORTPHYSSTATE_OFFLINE;
10265         case HLS_VERIFY_CAP:
10266                 return IB_PORTPHYSSTATE_POLLING;
10267         case HLS_GOING_UP:
10268                 return IB_PORTPHYSSTATE_POLLING;
10269         case HLS_GOING_OFFLINE:
10270                 return OPA_PORTPHYSSTATE_OFFLINE;
10271         case HLS_LINK_COOLDOWN:
10272                 return OPA_PORTPHYSSTATE_OFFLINE;
10273         case HLS_DN_DOWNDEF:
10274         default:
10275                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10276                            ppd->host_link_state);
10277                 return  -1;
10278         }
10279 }
10280
10281 /*
10282  * driver_logical_state - convert the driver's notion of a port's
10283  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10284  * (converted to a u32) to indicate error.
10285  */
10286 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10287 {
10288         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10289                 return IB_PORT_DOWN;
10290
10291         switch (ppd->host_link_state & HLS_UP) {
10292         case HLS_UP_INIT:
10293                 return IB_PORT_INIT;
10294         case HLS_UP_ARMED:
10295                 return IB_PORT_ARMED;
10296         case HLS_UP_ACTIVE:
10297                 return IB_PORT_ACTIVE;
10298         default:
10299                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10300                            ppd->host_link_state);
10301         return -1;
10302         }
10303 }
10304
10305 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10306                           u8 neigh_reason, u8 rem_reason)
10307 {
10308         if (ppd->local_link_down_reason.latest == 0 &&
10309             ppd->neigh_link_down_reason.latest == 0) {
10310                 ppd->local_link_down_reason.latest = lcl_reason;
10311                 ppd->neigh_link_down_reason.latest = neigh_reason;
10312                 ppd->remote_link_down_reason = rem_reason;
10313         }
10314 }
10315
10316 /*
10317  * Change the physical and/or logical link state.
10318  *
10319  * Do not call this routine while inside an interrupt.  It contains
10320  * calls to routines that can take multiple seconds to finish.
10321  *
10322  * Returns 0 on success, -errno on failure.
10323  */
10324 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10325 {
10326         struct hfi1_devdata *dd = ppd->dd;
10327         struct ib_event event = {.device = NULL};
10328         int ret1, ret = 0;
10329         int orig_new_state, poll_bounce;
10330
10331         mutex_lock(&ppd->hls_lock);
10332
10333         orig_new_state = state;
10334         if (state == HLS_DN_DOWNDEF)
10335                 state = dd->link_default;
10336
10337         /* interpret poll -> poll as a link bounce */
10338         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10339                       state == HLS_DN_POLL;
10340
10341         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10342                     link_state_name(ppd->host_link_state),
10343                     link_state_name(orig_new_state),
10344                     poll_bounce ? "(bounce) " : "",
10345                     link_state_reason_name(ppd, state));
10346
10347         /*
10348          * If we're going to a (HLS_*) link state that implies the logical
10349          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10350          * reset is_sm_config_started to 0.
10351          */
10352         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10353                 ppd->is_sm_config_started = 0;
10354
10355         /*
10356          * Do nothing if the states match.  Let a poll to poll link bounce
10357          * go through.
10358          */
10359         if (ppd->host_link_state == state && !poll_bounce)
10360                 goto done;
10361
10362         switch (state) {
10363         case HLS_UP_INIT:
10364                 if (ppd->host_link_state == HLS_DN_POLL &&
10365                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10366                         /*
10367                          * Quick link up jumps from polling to here.
10368                          *
10369                          * Whether in normal or loopback mode, the
10370                          * simulator jumps from polling to link up.
10371                          * Accept that here.
10372                          */
10373                         /* OK */
10374                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10375                         goto unexpected;
10376                 }
10377
10378                 ppd->host_link_state = HLS_UP_INIT;
10379                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10380                 if (ret) {
10381                         /* logical state didn't change, stay at going_up */
10382                         ppd->host_link_state = HLS_GOING_UP;
10383                         dd_dev_err(dd,
10384                                    "%s: logical state did not change to INIT\n",
10385                                    __func__);
10386                 } else {
10387                         /* clear old transient LINKINIT_REASON code */
10388                         if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10389                                 ppd->linkinit_reason =
10390                                         OPA_LINKINIT_REASON_LINKUP;
10391
10392                         /* enable the port */
10393                         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10394
10395                         handle_linkup_change(dd, 1);
10396                 }
10397                 break;
10398         case HLS_UP_ARMED:
10399                 if (ppd->host_link_state != HLS_UP_INIT)
10400                         goto unexpected;
10401
10402                 ppd->host_link_state = HLS_UP_ARMED;
10403                 set_logical_state(dd, LSTATE_ARMED);
10404                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10405                 if (ret) {
10406                         /* logical state didn't change, stay at init */
10407                         ppd->host_link_state = HLS_UP_INIT;
10408                         dd_dev_err(dd,
10409                                    "%s: logical state did not change to ARMED\n",
10410                                    __func__);
10411                 }
10412                 /*
10413                  * The simulator does not currently implement SMA messages,
10414                  * so neighbor_normal is not set.  Set it here when we first
10415                  * move to Armed.
10416                  */
10417                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10418                         ppd->neighbor_normal = 1;
10419                 break;
10420         case HLS_UP_ACTIVE:
10421                 if (ppd->host_link_state != HLS_UP_ARMED)
10422                         goto unexpected;
10423
10424                 ppd->host_link_state = HLS_UP_ACTIVE;
10425                 set_logical_state(dd, LSTATE_ACTIVE);
10426                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10427                 if (ret) {
10428                         /* logical state didn't change, stay at armed */
10429                         ppd->host_link_state = HLS_UP_ARMED;
10430                         dd_dev_err(dd,
10431                                    "%s: logical state did not change to ACTIVE\n",
10432                                    __func__);
10433                 } else {
10434                         /* tell all engines to go running */
10435                         sdma_all_running(dd);
10436
10437                         /* Signal the IB layer that the port has went active */
10438                         event.device = &dd->verbs_dev.rdi.ibdev;
10439                         event.element.port_num = ppd->port;
10440                         event.event = IB_EVENT_PORT_ACTIVE;
10441                 }
10442                 break;
10443         case HLS_DN_POLL:
10444                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10445                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10446                     dd->dc_shutdown)
10447                         dc_start(dd);
10448                 /* Hand LED control to the DC */
10449                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10450
10451                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10452                         u8 tmp = ppd->link_enabled;
10453
10454                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10455                         if (ret) {
10456                                 ppd->link_enabled = tmp;
10457                                 break;
10458                         }
10459                         ppd->remote_link_down_reason = 0;
10460
10461                         if (ppd->driver_link_ready)
10462                                 ppd->link_enabled = 1;
10463                 }
10464
10465                 set_all_slowpath(ppd->dd);
10466                 ret = set_local_link_attributes(ppd);
10467                 if (ret)
10468                         break;
10469
10470                 ppd->port_error_action = 0;
10471                 ppd->host_link_state = HLS_DN_POLL;
10472
10473                 if (quick_linkup) {
10474                         /* quick linkup does not go into polling */
10475                         ret = do_quick_linkup(dd);
10476                 } else {
10477                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10478                         if (ret1 != HCMD_SUCCESS) {
10479                                 dd_dev_err(dd,
10480                                            "Failed to transition to Polling link state, return 0x%x\n",
10481                                            ret1);
10482                                 ret = -EINVAL;
10483                         }
10484                 }
10485                 ppd->offline_disabled_reason =
10486                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10487                 /*
10488                  * If an error occurred above, go back to offline.  The
10489                  * caller may reschedule another attempt.
10490                  */
10491                 if (ret)
10492                         goto_offline(ppd, 0);
10493                 break;
10494         case HLS_DN_DISABLE:
10495                 /* link is disabled */
10496                 ppd->link_enabled = 0;
10497
10498                 /* allow any state to transition to disabled */
10499
10500                 /* must transition to offline first */
10501                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10502                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10503                         if (ret)
10504                                 break;
10505                         ppd->remote_link_down_reason = 0;
10506                 }
10507
10508                 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10509                 if (ret1 != HCMD_SUCCESS) {
10510                         dd_dev_err(dd,
10511                                    "Failed to transition to Disabled link state, return 0x%x\n",
10512                                    ret1);
10513                         ret = -EINVAL;
10514                         break;
10515                 }
10516                 ppd->host_link_state = HLS_DN_DISABLE;
10517                 dc_shutdown(dd);
10518                 break;
10519         case HLS_DN_OFFLINE:
10520                 if (ppd->host_link_state == HLS_DN_DISABLE)
10521                         dc_start(dd);
10522
10523                 /* allow any state to transition to offline */
10524                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10525                 if (!ret)
10526                         ppd->remote_link_down_reason = 0;
10527                 break;
10528         case HLS_VERIFY_CAP:
10529                 if (ppd->host_link_state != HLS_DN_POLL)
10530                         goto unexpected;
10531                 ppd->host_link_state = HLS_VERIFY_CAP;
10532                 break;
10533         case HLS_GOING_UP:
10534                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10535                         goto unexpected;
10536
10537                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10538                 if (ret1 != HCMD_SUCCESS) {
10539                         dd_dev_err(dd,
10540                                    "Failed to transition to link up state, return 0x%x\n",
10541                                    ret1);
10542                         ret = -EINVAL;
10543                         break;
10544                 }
10545                 ppd->host_link_state = HLS_GOING_UP;
10546                 break;
10547
10548         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10549         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10550         default:
10551                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10552                             __func__, state);
10553                 ret = -EINVAL;
10554                 break;
10555         }
10556
10557         goto done;
10558
10559 unexpected:
10560         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10561                    __func__, link_state_name(ppd->host_link_state),
10562                    link_state_name(state));
10563         ret = -EINVAL;
10564
10565 done:
10566         mutex_unlock(&ppd->hls_lock);
10567
10568         if (event.device)
10569                 ib_dispatch_event(&event);
10570
10571         return ret;
10572 }
10573
10574 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10575 {
10576         u64 reg;
10577         int ret = 0;
10578
10579         switch (which) {
10580         case HFI1_IB_CFG_LIDLMC:
10581                 set_lidlmc(ppd);
10582                 break;
10583         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10584                 /*
10585                  * The VL Arbitrator high limit is sent in units of 4k
10586                  * bytes, while HFI stores it in units of 64 bytes.
10587                  */
10588                 val *= 4096 / 64;
10589                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10590                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10591                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10592                 break;
10593         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10594                 /* HFI only supports POLL as the default link down state */
10595                 if (val != HLS_DN_POLL)
10596                         ret = -EINVAL;
10597                 break;
10598         case HFI1_IB_CFG_OP_VLS:
10599                 if (ppd->vls_operational != val) {
10600                         ppd->vls_operational = val;
10601                         if (!ppd->port)
10602                                 ret = -EINVAL;
10603                 }
10604                 break;
10605         /*
10606          * For link width, link width downgrade, and speed enable, always AND
10607          * the setting with what is actually supported.  This has two benefits.
10608          * First, enabled can't have unsupported values, no matter what the
10609          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10610          * "fill in with your supported value" have all the bits in the
10611          * field set, so simply ANDing with supported has the desired result.
10612          */
10613         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10614                 ppd->link_width_enabled = val & ppd->link_width_supported;
10615                 break;
10616         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10617                 ppd->link_width_downgrade_enabled =
10618                                 val & ppd->link_width_downgrade_supported;
10619                 break;
10620         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10621                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10622                 break;
10623         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10624                 /*
10625                  * HFI does not follow IB specs, save this value
10626                  * so we can report it, if asked.
10627                  */
10628                 ppd->overrun_threshold = val;
10629                 break;
10630         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10631                 /*
10632                  * HFI does not follow IB specs, save this value
10633                  * so we can report it, if asked.
10634                  */
10635                 ppd->phy_error_threshold = val;
10636                 break;
10637
10638         case HFI1_IB_CFG_MTU:
10639                 set_send_length(ppd);
10640                 break;
10641
10642         case HFI1_IB_CFG_PKEYS:
10643                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10644                         set_partition_keys(ppd);
10645                 break;
10646
10647         default:
10648                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10649                         dd_dev_info(ppd->dd,
10650                                     "%s: which %s, val 0x%x: not implemented\n",
10651                                     __func__, ib_cfg_name(which), val);
10652                 break;
10653         }
10654         return ret;
10655 }
10656
10657 /* begin functions related to vl arbitration table caching */
10658 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10659 {
10660         int i;
10661
10662         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10663                         VL_ARB_LOW_PRIO_TABLE_SIZE);
10664         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10665                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
10666
10667         /*
10668          * Note that we always return values directly from the
10669          * 'vl_arb_cache' (and do no CSR reads) in response to a
10670          * 'Get(VLArbTable)'. This is obviously correct after a
10671          * 'Set(VLArbTable)', since the cache will then be up to
10672          * date. But it's also correct prior to any 'Set(VLArbTable)'
10673          * since then both the cache, and the relevant h/w registers
10674          * will be zeroed.
10675          */
10676
10677         for (i = 0; i < MAX_PRIO_TABLE; i++)
10678                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10679 }
10680
10681 /*
10682  * vl_arb_lock_cache
10683  *
10684  * All other vl_arb_* functions should be called only after locking
10685  * the cache.
10686  */
10687 static inline struct vl_arb_cache *
10688 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10689 {
10690         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10691                 return NULL;
10692         spin_lock(&ppd->vl_arb_cache[idx].lock);
10693         return &ppd->vl_arb_cache[idx];
10694 }
10695
10696 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10697 {
10698         spin_unlock(&ppd->vl_arb_cache[idx].lock);
10699 }
10700
10701 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10702                              struct ib_vl_weight_elem *vl)
10703 {
10704         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10705 }
10706
10707 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10708                              struct ib_vl_weight_elem *vl)
10709 {
10710         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10711 }
10712
10713 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10714                               struct ib_vl_weight_elem *vl)
10715 {
10716         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10717 }
10718
10719 /* end functions related to vl arbitration table caching */
10720
10721 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10722                           u32 size, struct ib_vl_weight_elem *vl)
10723 {
10724         struct hfi1_devdata *dd = ppd->dd;
10725         u64 reg;
10726         unsigned int i, is_up = 0;
10727         int drain, ret = 0;
10728
10729         mutex_lock(&ppd->hls_lock);
10730
10731         if (ppd->host_link_state & HLS_UP)
10732                 is_up = 1;
10733
10734         drain = !is_ax(dd) && is_up;
10735
10736         if (drain)
10737                 /*
10738                  * Before adjusting VL arbitration weights, empty per-VL
10739                  * FIFOs, otherwise a packet whose VL weight is being
10740                  * set to 0 could get stuck in a FIFO with no chance to
10741                  * egress.
10742                  */
10743                 ret = stop_drain_data_vls(dd);
10744
10745         if (ret) {
10746                 dd_dev_err(
10747                         dd,
10748                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10749                         __func__);
10750                 goto err;
10751         }
10752
10753         for (i = 0; i < size; i++, vl++) {
10754                 /*
10755                  * NOTE: The low priority shift and mask are used here, but
10756                  * they are the same for both the low and high registers.
10757                  */
10758                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10759                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10760                       | (((u64)vl->weight
10761                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10762                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10763                 write_csr(dd, target + (i * 8), reg);
10764         }
10765         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10766
10767         if (drain)
10768                 open_fill_data_vls(dd); /* reopen all VLs */
10769
10770 err:
10771         mutex_unlock(&ppd->hls_lock);
10772
10773         return ret;
10774 }
10775
10776 /*
10777  * Read one credit merge VL register.
10778  */
10779 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10780                            struct vl_limit *vll)
10781 {
10782         u64 reg = read_csr(dd, csr);
10783
10784         vll->dedicated = cpu_to_be16(
10785                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10786                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10787         vll->shared = cpu_to_be16(
10788                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10789                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10790 }
10791
10792 /*
10793  * Read the current credit merge limits.
10794  */
10795 static int get_buffer_control(struct hfi1_devdata *dd,
10796                               struct buffer_control *bc, u16 *overall_limit)
10797 {
10798         u64 reg;
10799         int i;
10800
10801         /* not all entries are filled in */
10802         memset(bc, 0, sizeof(*bc));
10803
10804         /* OPA and HFI have a 1-1 mapping */
10805         for (i = 0; i < TXE_NUM_DATA_VL; i++)
10806                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10807
10808         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10809         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10810
10811         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10812         bc->overall_shared_limit = cpu_to_be16(
10813                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10814                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10815         if (overall_limit)
10816                 *overall_limit = (reg
10817                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10818                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10819         return sizeof(struct buffer_control);
10820 }
10821
10822 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10823 {
10824         u64 reg;
10825         int i;
10826
10827         /* each register contains 16 SC->VLnt mappings, 4 bits each */
10828         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10829         for (i = 0; i < sizeof(u64); i++) {
10830                 u8 byte = *(((u8 *)&reg) + i);
10831
10832                 dp->vlnt[2 * i] = byte & 0xf;
10833                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10834         }
10835
10836         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10837         for (i = 0; i < sizeof(u64); i++) {
10838                 u8 byte = *(((u8 *)&reg) + i);
10839
10840                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10841                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10842         }
10843         return sizeof(struct sc2vlnt);
10844 }
10845
10846 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10847                               struct ib_vl_weight_elem *vl)
10848 {
10849         unsigned int i;
10850
10851         for (i = 0; i < nelems; i++, vl++) {
10852                 vl->vl = 0xf;
10853                 vl->weight = 0;
10854         }
10855 }
10856
10857 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10858 {
10859         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10860                   DC_SC_VL_VAL(15_0,
10861                                0, dp->vlnt[0] & 0xf,
10862                                1, dp->vlnt[1] & 0xf,
10863                                2, dp->vlnt[2] & 0xf,
10864                                3, dp->vlnt[3] & 0xf,
10865                                4, dp->vlnt[4] & 0xf,
10866                                5, dp->vlnt[5] & 0xf,
10867                                6, dp->vlnt[6] & 0xf,
10868                                7, dp->vlnt[7] & 0xf,
10869                                8, dp->vlnt[8] & 0xf,
10870                                9, dp->vlnt[9] & 0xf,
10871                                10, dp->vlnt[10] & 0xf,
10872                                11, dp->vlnt[11] & 0xf,
10873                                12, dp->vlnt[12] & 0xf,
10874                                13, dp->vlnt[13] & 0xf,
10875                                14, dp->vlnt[14] & 0xf,
10876                                15, dp->vlnt[15] & 0xf));
10877         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10878                   DC_SC_VL_VAL(31_16,
10879                                16, dp->vlnt[16] & 0xf,
10880                                17, dp->vlnt[17] & 0xf,
10881                                18, dp->vlnt[18] & 0xf,
10882                                19, dp->vlnt[19] & 0xf,
10883                                20, dp->vlnt[20] & 0xf,
10884                                21, dp->vlnt[21] & 0xf,
10885                                22, dp->vlnt[22] & 0xf,
10886                                23, dp->vlnt[23] & 0xf,
10887                                24, dp->vlnt[24] & 0xf,
10888                                25, dp->vlnt[25] & 0xf,
10889                                26, dp->vlnt[26] & 0xf,
10890                                27, dp->vlnt[27] & 0xf,
10891                                28, dp->vlnt[28] & 0xf,
10892                                29, dp->vlnt[29] & 0xf,
10893                                30, dp->vlnt[30] & 0xf,
10894                                31, dp->vlnt[31] & 0xf));
10895 }
10896
10897 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10898                         u16 limit)
10899 {
10900         if (limit != 0)
10901                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10902                             what, (int)limit, idx);
10903 }
10904
10905 /* change only the shared limit portion of SendCmGLobalCredit */
10906 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10907 {
10908         u64 reg;
10909
10910         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10911         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10912         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10913         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10914 }
10915
10916 /* change only the total credit limit portion of SendCmGLobalCredit */
10917 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10918 {
10919         u64 reg;
10920
10921         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10922         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10923         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10924         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10925 }
10926
10927 /* set the given per-VL shared limit */
10928 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10929 {
10930         u64 reg;
10931         u32 addr;
10932
10933         if (vl < TXE_NUM_DATA_VL)
10934                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10935         else
10936                 addr = SEND_CM_CREDIT_VL15;
10937
10938         reg = read_csr(dd, addr);
10939         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10940         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10941         write_csr(dd, addr, reg);
10942 }
10943
10944 /* set the given per-VL dedicated limit */
10945 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10946 {
10947         u64 reg;
10948         u32 addr;
10949
10950         if (vl < TXE_NUM_DATA_VL)
10951                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10952         else
10953                 addr = SEND_CM_CREDIT_VL15;
10954
10955         reg = read_csr(dd, addr);
10956         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10957         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10958         write_csr(dd, addr, reg);
10959 }
10960
10961 /* spin until the given per-VL status mask bits clear */
10962 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10963                                      const char *which)
10964 {
10965         unsigned long timeout;
10966         u64 reg;
10967
10968         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10969         while (1) {
10970                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10971
10972                 if (reg == 0)
10973                         return; /* success */
10974                 if (time_after(jiffies, timeout))
10975                         break;          /* timed out */
10976                 udelay(1);
10977         }
10978
10979         dd_dev_err(dd,
10980                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10981                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10982         /*
10983          * If this occurs, it is likely there was a credit loss on the link.
10984          * The only recovery from that is a link bounce.
10985          */
10986         dd_dev_err(dd,
10987                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10988 }
10989
10990 /*
10991  * The number of credits on the VLs may be changed while everything
10992  * is "live", but the following algorithm must be followed due to
10993  * how the hardware is actually implemented.  In particular,
10994  * Return_Credit_Status[] is the only correct status check.
10995  *
10996  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10997  *     set Global_Shared_Credit_Limit = 0
10998  *     use_all_vl = 1
10999  * mask0 = all VLs that are changing either dedicated or shared limits
11000  * set Shared_Limit[mask0] = 0
11001  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11002  * if (changing any dedicated limit)
11003  *     mask1 = all VLs that are lowering dedicated limits
11004  *     lower Dedicated_Limit[mask1]
11005  *     spin until Return_Credit_Status[mask1] == 0
11006  *     raise Dedicated_Limits
11007  * raise Shared_Limits
11008  * raise Global_Shared_Credit_Limit
11009  *
11010  * lower = if the new limit is lower, set the limit to the new value
11011  * raise = if the new limit is higher than the current value (may be changed
11012  *      earlier in the algorithm), set the new limit to the new value
11013  */
11014 int set_buffer_control(struct hfi1_pportdata *ppd,
11015                        struct buffer_control *new_bc)
11016 {
11017         struct hfi1_devdata *dd = ppd->dd;
11018         u64 changing_mask, ld_mask, stat_mask;
11019         int change_count;
11020         int i, use_all_mask;
11021         int this_shared_changing;
11022         int vl_count = 0, ret;
11023         /*
11024          * A0: add the variable any_shared_limit_changing below and in the
11025          * algorithm above.  If removing A0 support, it can be removed.
11026          */
11027         int any_shared_limit_changing;
11028         struct buffer_control cur_bc;
11029         u8 changing[OPA_MAX_VLS];
11030         u8 lowering_dedicated[OPA_MAX_VLS];
11031         u16 cur_total;
11032         u32 new_total = 0;
11033         const u64 all_mask =
11034         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11035          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11036          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11037          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11038          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11039          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11040          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11041          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11042          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11043
11044 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11045 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11046
11047         /* find the new total credits, do sanity check on unused VLs */
11048         for (i = 0; i < OPA_MAX_VLS; i++) {
11049                 if (valid_vl(i)) {
11050                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11051                         continue;
11052                 }
11053                 nonzero_msg(dd, i, "dedicated",
11054                             be16_to_cpu(new_bc->vl[i].dedicated));
11055                 nonzero_msg(dd, i, "shared",
11056                             be16_to_cpu(new_bc->vl[i].shared));
11057                 new_bc->vl[i].dedicated = 0;
11058                 new_bc->vl[i].shared = 0;
11059         }
11060         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11061
11062         /* fetch the current values */
11063         get_buffer_control(dd, &cur_bc, &cur_total);
11064
11065         /*
11066          * Create the masks we will use.
11067          */
11068         memset(changing, 0, sizeof(changing));
11069         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11070         /*
11071          * NOTE: Assumes that the individual VL bits are adjacent and in
11072          * increasing order
11073          */
11074         stat_mask =
11075                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11076         changing_mask = 0;
11077         ld_mask = 0;
11078         change_count = 0;
11079         any_shared_limit_changing = 0;
11080         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11081                 if (!valid_vl(i))
11082                         continue;
11083                 this_shared_changing = new_bc->vl[i].shared
11084                                                 != cur_bc.vl[i].shared;
11085                 if (this_shared_changing)
11086                         any_shared_limit_changing = 1;
11087                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11088                     this_shared_changing) {
11089                         changing[i] = 1;
11090                         changing_mask |= stat_mask;
11091                         change_count++;
11092                 }
11093                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11094                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11095                         lowering_dedicated[i] = 1;
11096                         ld_mask |= stat_mask;
11097                 }
11098         }
11099
11100         /* bracket the credit change with a total adjustment */
11101         if (new_total > cur_total)
11102                 set_global_limit(dd, new_total);
11103
11104         /*
11105          * Start the credit change algorithm.
11106          */
11107         use_all_mask = 0;
11108         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11109              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11110             (is_ax(dd) && any_shared_limit_changing)) {
11111                 set_global_shared(dd, 0);
11112                 cur_bc.overall_shared_limit = 0;
11113                 use_all_mask = 1;
11114         }
11115
11116         for (i = 0; i < NUM_USABLE_VLS; i++) {
11117                 if (!valid_vl(i))
11118                         continue;
11119
11120                 if (changing[i]) {
11121                         set_vl_shared(dd, i, 0);
11122                         cur_bc.vl[i].shared = 0;
11123                 }
11124         }
11125
11126         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11127                                  "shared");
11128
11129         if (change_count > 0) {
11130                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11131                         if (!valid_vl(i))
11132                                 continue;
11133
11134                         if (lowering_dedicated[i]) {
11135                                 set_vl_dedicated(dd, i,
11136                                                  be16_to_cpu(new_bc->
11137                                                              vl[i].dedicated));
11138                                 cur_bc.vl[i].dedicated =
11139                                                 new_bc->vl[i].dedicated;
11140                         }
11141                 }
11142
11143                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11144
11145                 /* now raise all dedicated that are going up */
11146                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11147                         if (!valid_vl(i))
11148                                 continue;
11149
11150                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11151                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11152                                 set_vl_dedicated(dd, i,
11153                                                  be16_to_cpu(new_bc->
11154                                                              vl[i].dedicated));
11155                 }
11156         }
11157
11158         /* next raise all shared that are going up */
11159         for (i = 0; i < NUM_USABLE_VLS; i++) {
11160                 if (!valid_vl(i))
11161                         continue;
11162
11163                 if (be16_to_cpu(new_bc->vl[i].shared) >
11164                                 be16_to_cpu(cur_bc.vl[i].shared))
11165                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11166         }
11167
11168         /* finally raise the global shared */
11169         if (be16_to_cpu(new_bc->overall_shared_limit) >
11170             be16_to_cpu(cur_bc.overall_shared_limit))
11171                 set_global_shared(dd,
11172                                   be16_to_cpu(new_bc->overall_shared_limit));
11173
11174         /* bracket the credit change with a total adjustment */
11175         if (new_total < cur_total)
11176                 set_global_limit(dd, new_total);
11177
11178         /*
11179          * Determine the actual number of operational VLS using the number of
11180          * dedicated and shared credits for each VL.
11181          */
11182         if (change_count > 0) {
11183                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11184                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11185                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11186                                 vl_count++;
11187                 ppd->actual_vls_operational = vl_count;
11188                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11189                                     ppd->actual_vls_operational :
11190                                     ppd->vls_operational,
11191                                     NULL);
11192                 if (ret == 0)
11193                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11194                                            ppd->actual_vls_operational :
11195                                            ppd->vls_operational, NULL);
11196                 if (ret)
11197                         return ret;
11198         }
11199         return 0;
11200 }
11201
11202 /*
11203  * Read the given fabric manager table. Return the size of the
11204  * table (in bytes) on success, and a negative error code on
11205  * failure.
11206  */
11207 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11208
11209 {
11210         int size;
11211         struct vl_arb_cache *vlc;
11212
11213         switch (which) {
11214         case FM_TBL_VL_HIGH_ARB:
11215                 size = 256;
11216                 /*
11217                  * OPA specifies 128 elements (of 2 bytes each), though
11218                  * HFI supports only 16 elements in h/w.
11219                  */
11220                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11221                 vl_arb_get_cache(vlc, t);
11222                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11223                 break;
11224         case FM_TBL_VL_LOW_ARB:
11225                 size = 256;
11226                 /*
11227                  * OPA specifies 128 elements (of 2 bytes each), though
11228                  * HFI supports only 16 elements in h/w.
11229                  */
11230                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11231                 vl_arb_get_cache(vlc, t);
11232                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11233                 break;
11234         case FM_TBL_BUFFER_CONTROL:
11235                 size = get_buffer_control(ppd->dd, t, NULL);
11236                 break;
11237         case FM_TBL_SC2VLNT:
11238                 size = get_sc2vlnt(ppd->dd, t);
11239                 break;
11240         case FM_TBL_VL_PREEMPT_ELEMS:
11241                 size = 256;
11242                 /* OPA specifies 128 elements, of 2 bytes each */
11243                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11244                 break;
11245         case FM_TBL_VL_PREEMPT_MATRIX:
11246                 size = 256;
11247                 /*
11248                  * OPA specifies that this is the same size as the VL
11249                  * arbitration tables (i.e., 256 bytes).
11250                  */
11251                 break;
11252         default:
11253                 return -EINVAL;
11254         }
11255         return size;
11256 }
11257
11258 /*
11259  * Write the given fabric manager table.
11260  */
11261 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11262 {
11263         int ret = 0;
11264         struct vl_arb_cache *vlc;
11265
11266         switch (which) {
11267         case FM_TBL_VL_HIGH_ARB:
11268                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11269                 if (vl_arb_match_cache(vlc, t)) {
11270                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11271                         break;
11272                 }
11273                 vl_arb_set_cache(vlc, t);
11274                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11275                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11276                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11277                 break;
11278         case FM_TBL_VL_LOW_ARB:
11279                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11280                 if (vl_arb_match_cache(vlc, t)) {
11281                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11282                         break;
11283                 }
11284                 vl_arb_set_cache(vlc, t);
11285                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11286                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11287                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11288                 break;
11289         case FM_TBL_BUFFER_CONTROL:
11290                 ret = set_buffer_control(ppd, t);
11291                 break;
11292         case FM_TBL_SC2VLNT:
11293                 set_sc2vlnt(ppd->dd, t);
11294                 break;
11295         default:
11296                 ret = -EINVAL;
11297         }
11298         return ret;
11299 }
11300
11301 /*
11302  * Disable all data VLs.
11303  *
11304  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11305  */
11306 static int disable_data_vls(struct hfi1_devdata *dd)
11307 {
11308         if (is_ax(dd))
11309                 return 1;
11310
11311         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11312
11313         return 0;
11314 }
11315
11316 /*
11317  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11318  * Just re-enables all data VLs (the "fill" part happens
11319  * automatically - the name was chosen for symmetry with
11320  * stop_drain_data_vls()).
11321  *
11322  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11323  */
11324 int open_fill_data_vls(struct hfi1_devdata *dd)
11325 {
11326         if (is_ax(dd))
11327                 return 1;
11328
11329         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11330
11331         return 0;
11332 }
11333
11334 /*
11335  * drain_data_vls() - assumes that disable_data_vls() has been called,
11336  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11337  * engines to drop to 0.
11338  */
11339 static void drain_data_vls(struct hfi1_devdata *dd)
11340 {
11341         sc_wait(dd);
11342         sdma_wait(dd);
11343         pause_for_credit_return(dd);
11344 }
11345
11346 /*
11347  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11348  *
11349  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11350  * meant to be used like this:
11351  *
11352  * stop_drain_data_vls(dd);
11353  * // do things with per-VL resources
11354  * open_fill_data_vls(dd);
11355  */
11356 int stop_drain_data_vls(struct hfi1_devdata *dd)
11357 {
11358         int ret;
11359
11360         ret = disable_data_vls(dd);
11361         if (ret == 0)
11362                 drain_data_vls(dd);
11363
11364         return ret;
11365 }
11366
11367 /*
11368  * Convert a nanosecond time to a cclock count.  No matter how slow
11369  * the cclock, a non-zero ns will always have a non-zero result.
11370  */
11371 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11372 {
11373         u32 cclocks;
11374
11375         if (dd->icode == ICODE_FPGA_EMULATION)
11376                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11377         else  /* simulation pretends to be ASIC */
11378                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11379         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11380                 cclocks = 1;
11381         return cclocks;
11382 }
11383
11384 /*
11385  * Convert a cclock count to nanoseconds. Not matter how slow
11386  * the cclock, a non-zero cclocks will always have a non-zero result.
11387  */
11388 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11389 {
11390         u32 ns;
11391
11392         if (dd->icode == ICODE_FPGA_EMULATION)
11393                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11394         else  /* simulation pretends to be ASIC */
11395                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11396         if (cclocks && !ns)
11397                 ns = 1;
11398         return ns;
11399 }
11400
11401 /*
11402  * Dynamically adjust the receive interrupt timeout for a context based on
11403  * incoming packet rate.
11404  *
11405  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11406  */
11407 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11408 {
11409         struct hfi1_devdata *dd = rcd->dd;
11410         u32 timeout = rcd->rcvavail_timeout;
11411
11412         /*
11413          * This algorithm doubles or halves the timeout depending on whether
11414          * the number of packets received in this interrupt were less than or
11415          * greater equal the interrupt count.
11416          *
11417          * The calculations below do not allow a steady state to be achieved.
11418          * Only at the endpoints it is possible to have an unchanging
11419          * timeout.
11420          */
11421         if (npkts < rcv_intr_count) {
11422                 /*
11423                  * Not enough packets arrived before the timeout, adjust
11424                  * timeout downward.
11425                  */
11426                 if (timeout < 2) /* already at minimum? */
11427                         return;
11428                 timeout >>= 1;
11429         } else {
11430                 /*
11431                  * More than enough packets arrived before the timeout, adjust
11432                  * timeout upward.
11433                  */
11434                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11435                         return;
11436                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11437         }
11438
11439         rcd->rcvavail_timeout = timeout;
11440         /*
11441          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11442          * been verified to be in range
11443          */
11444         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11445                         (u64)timeout <<
11446                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11447 }
11448
11449 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11450                     u32 intr_adjust, u32 npkts)
11451 {
11452         struct hfi1_devdata *dd = rcd->dd;
11453         u64 reg;
11454         u32 ctxt = rcd->ctxt;
11455
11456         /*
11457          * Need to write timeout register before updating RcvHdrHead to ensure
11458          * that a new value is used when the HW decides to restart counting.
11459          */
11460         if (intr_adjust)
11461                 adjust_rcv_timeout(rcd, npkts);
11462         if (updegr) {
11463                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11464                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11465                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11466         }
11467         mmiowb();
11468         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11469                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11470                         << RCV_HDR_HEAD_HEAD_SHIFT);
11471         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11472         mmiowb();
11473 }
11474
11475 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11476 {
11477         u32 head, tail;
11478
11479         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11480                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11481
11482         if (rcd->rcvhdrtail_kvaddr)
11483                 tail = get_rcvhdrtail(rcd);
11484         else
11485                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11486
11487         return head == tail;
11488 }
11489
11490 /*
11491  * Context Control and Receive Array encoding for buffer size:
11492  *      0x0 invalid
11493  *      0x1   4 KB
11494  *      0x2   8 KB
11495  *      0x3  16 KB
11496  *      0x4  32 KB
11497  *      0x5  64 KB
11498  *      0x6 128 KB
11499  *      0x7 256 KB
11500  *      0x8 512 KB (Receive Array only)
11501  *      0x9   1 MB (Receive Array only)
11502  *      0xa   2 MB (Receive Array only)
11503  *
11504  *      0xB-0xF - reserved (Receive Array only)
11505  *
11506  *
11507  * This routine assumes that the value has already been sanity checked.
11508  */
11509 static u32 encoded_size(u32 size)
11510 {
11511         switch (size) {
11512         case   4 * 1024: return 0x1;
11513         case   8 * 1024: return 0x2;
11514         case  16 * 1024: return 0x3;
11515         case  32 * 1024: return 0x4;
11516         case  64 * 1024: return 0x5;
11517         case 128 * 1024: return 0x6;
11518         case 256 * 1024: return 0x7;
11519         case 512 * 1024: return 0x8;
11520         case   1 * 1024 * 1024: return 0x9;
11521         case   2 * 1024 * 1024: return 0xa;
11522         }
11523         return 0x1;     /* if invalid, go with the minimum size */
11524 }
11525
11526 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11527 {
11528         struct hfi1_ctxtdata *rcd;
11529         u64 rcvctrl, reg;
11530         int did_enable = 0;
11531
11532         rcd = dd->rcd[ctxt];
11533         if (!rcd)
11534                 return;
11535
11536         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11537
11538         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11539         /* if the context already enabled, don't do the extra steps */
11540         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11541             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11542                 /* reset the tail and hdr addresses, and sequence count */
11543                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11544                                 rcd->rcvhdrq_dma);
11545                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11546                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11547                                         rcd->rcvhdrqtailaddr_dma);
11548                 rcd->seq_cnt = 1;
11549
11550                 /* reset the cached receive header queue head value */
11551                 rcd->head = 0;
11552
11553                 /*
11554                  * Zero the receive header queue so we don't get false
11555                  * positives when checking the sequence number.  The
11556                  * sequence numbers could land exactly on the same spot.
11557                  * E.g. a rcd restart before the receive header wrapped.
11558                  */
11559                 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11560
11561                 /* starting timeout */
11562                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11563
11564                 /* enable the context */
11565                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11566
11567                 /* clean the egr buffer size first */
11568                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11569                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11570                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11571                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11572
11573                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11574                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11575                 did_enable = 1;
11576
11577                 /* zero RcvEgrIndexHead */
11578                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11579
11580                 /* set eager count and base index */
11581                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11582                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11583                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11584                         (((rcd->eager_base >> RCV_SHIFT)
11585                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11586                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11587                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11588
11589                 /*
11590                  * Set TID (expected) count and base index.
11591                  * rcd->expected_count is set to individual RcvArray entries,
11592                  * not pairs, and the CSR takes a pair-count in groups of
11593                  * four, so divide by 8.
11594                  */
11595                 reg = (((rcd->expected_count >> RCV_SHIFT)
11596                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11597                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11598                       (((rcd->expected_base >> RCV_SHIFT)
11599                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11600                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11601                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11602                 if (ctxt == HFI1_CTRL_CTXT)
11603                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11604         }
11605         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11606                 write_csr(dd, RCV_VL15, 0);
11607                 /*
11608                  * When receive context is being disabled turn on tail
11609                  * update with a dummy tail address and then disable
11610                  * receive context.
11611                  */
11612                 if (dd->rcvhdrtail_dummy_dma) {
11613                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11614                                         dd->rcvhdrtail_dummy_dma);
11615                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11616                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11617                 }
11618
11619                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11620         }
11621         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11622                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11623         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11624                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11625         if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11626                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11627         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11628                 /* See comment on RcvCtxtCtrl.TailUpd above */
11629                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11630                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11631         }
11632         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11633                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11634         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11635                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11636         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11637                 /*
11638                  * In one-packet-per-eager mode, the size comes from
11639                  * the RcvArray entry.
11640                  */
11641                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11642                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11643         }
11644         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11645                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11646         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11647                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11648         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11649                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11650         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11651                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11652         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11653                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11654         rcd->rcvctrl = rcvctrl;
11655         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11656         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11657
11658         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11659         if (did_enable &&
11660             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11661                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11662                 if (reg != 0) {
11663                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11664                                     ctxt, reg);
11665                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11666                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11667                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11668                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11669                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11670                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11671                                     ctxt, reg, reg == 0 ? "not" : "still");
11672                 }
11673         }
11674
11675         if (did_enable) {
11676                 /*
11677                  * The interrupt timeout and count must be set after
11678                  * the context is enabled to take effect.
11679                  */
11680                 /* set interrupt timeout */
11681                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11682                                 (u64)rcd->rcvavail_timeout <<
11683                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11684
11685                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11686                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11687                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11688         }
11689
11690         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11691                 /*
11692                  * If the context has been disabled and the Tail Update has
11693                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11694                  * so it doesn't contain an address that is invalid.
11695                  */
11696                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11697                                 dd->rcvhdrtail_dummy_dma);
11698 }
11699
11700 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11701 {
11702         int ret;
11703         u64 val = 0;
11704
11705         if (namep) {
11706                 ret = dd->cntrnameslen;
11707                 *namep = dd->cntrnames;
11708         } else {
11709                 const struct cntr_entry *entry;
11710                 int i, j;
11711
11712                 ret = (dd->ndevcntrs) * sizeof(u64);
11713
11714                 /* Get the start of the block of counters */
11715                 *cntrp = dd->cntrs;
11716
11717                 /*
11718                  * Now go and fill in each counter in the block.
11719                  */
11720                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11721                         entry = &dev_cntrs[i];
11722                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11723                         if (entry->flags & CNTR_DISABLED) {
11724                                 /* Nothing */
11725                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11726                         } else {
11727                                 if (entry->flags & CNTR_VL) {
11728                                         hfi1_cdbg(CNTR, "\tPer VL\n");
11729                                         for (j = 0; j < C_VL_COUNT; j++) {
11730                                                 val = entry->rw_cntr(entry,
11731                                                                   dd, j,
11732                                                                   CNTR_MODE_R,
11733                                                                   0);
11734                                                 hfi1_cdbg(
11735                                                    CNTR,
11736                                                    "\t\tRead 0x%llx for %d\n",
11737                                                    val, j);
11738                                                 dd->cntrs[entry->offset + j] =
11739                                                                             val;
11740                                         }
11741                                 } else if (entry->flags & CNTR_SDMA) {
11742                                         hfi1_cdbg(CNTR,
11743                                                   "\t Per SDMA Engine\n");
11744                                         for (j = 0; j < dd->chip_sdma_engines;
11745                                              j++) {
11746                                                 val =
11747                                                 entry->rw_cntr(entry, dd, j,
11748                                                                CNTR_MODE_R, 0);
11749                                                 hfi1_cdbg(CNTR,
11750                                                           "\t\tRead 0x%llx for %d\n",
11751                                                           val, j);
11752                                                 dd->cntrs[entry->offset + j] =
11753                                                                         val;
11754                                         }
11755                                 } else {
11756                                         val = entry->rw_cntr(entry, dd,
11757                                                         CNTR_INVALID_VL,
11758                                                         CNTR_MODE_R, 0);
11759                                         dd->cntrs[entry->offset] = val;
11760                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11761                                 }
11762                         }
11763                 }
11764         }
11765         return ret;
11766 }
11767
11768 /*
11769  * Used by sysfs to create files for hfi stats to read
11770  */
11771 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11772 {
11773         int ret;
11774         u64 val = 0;
11775
11776         if (namep) {
11777                 ret = ppd->dd->portcntrnameslen;
11778                 *namep = ppd->dd->portcntrnames;
11779         } else {
11780                 const struct cntr_entry *entry;
11781                 int i, j;
11782
11783                 ret = ppd->dd->nportcntrs * sizeof(u64);
11784                 *cntrp = ppd->cntrs;
11785
11786                 for (i = 0; i < PORT_CNTR_LAST; i++) {
11787                         entry = &port_cntrs[i];
11788                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11789                         if (entry->flags & CNTR_DISABLED) {
11790                                 /* Nothing */
11791                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11792                                 continue;
11793                         }
11794
11795                         if (entry->flags & CNTR_VL) {
11796                                 hfi1_cdbg(CNTR, "\tPer VL");
11797                                 for (j = 0; j < C_VL_COUNT; j++) {
11798                                         val = entry->rw_cntr(entry, ppd, j,
11799                                                                CNTR_MODE_R,
11800                                                                0);
11801                                         hfi1_cdbg(
11802                                            CNTR,
11803                                            "\t\tRead 0x%llx for %d",
11804                                            val, j);
11805                                         ppd->cntrs[entry->offset + j] = val;
11806                                 }
11807                         } else {
11808                                 val = entry->rw_cntr(entry, ppd,
11809                                                        CNTR_INVALID_VL,
11810                                                        CNTR_MODE_R,
11811                                                        0);
11812                                 ppd->cntrs[entry->offset] = val;
11813                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11814                         }
11815                 }
11816         }
11817         return ret;
11818 }
11819
11820 static void free_cntrs(struct hfi1_devdata *dd)
11821 {
11822         struct hfi1_pportdata *ppd;
11823         int i;
11824
11825         if (dd->synth_stats_timer.data)
11826                 del_timer_sync(&dd->synth_stats_timer);
11827         dd->synth_stats_timer.data = 0;
11828         ppd = (struct hfi1_pportdata *)(dd + 1);
11829         for (i = 0; i < dd->num_pports; i++, ppd++) {
11830                 kfree(ppd->cntrs);
11831                 kfree(ppd->scntrs);
11832                 free_percpu(ppd->ibport_data.rvp.rc_acks);
11833                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11834                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11835                 ppd->cntrs = NULL;
11836                 ppd->scntrs = NULL;
11837                 ppd->ibport_data.rvp.rc_acks = NULL;
11838                 ppd->ibport_data.rvp.rc_qacks = NULL;
11839                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11840         }
11841         kfree(dd->portcntrnames);
11842         dd->portcntrnames = NULL;
11843         kfree(dd->cntrs);
11844         dd->cntrs = NULL;
11845         kfree(dd->scntrs);
11846         dd->scntrs = NULL;
11847         kfree(dd->cntrnames);
11848         dd->cntrnames = NULL;
11849 }
11850
11851 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11852                               u64 *psval, void *context, int vl)
11853 {
11854         u64 val;
11855         u64 sval = *psval;
11856
11857         if (entry->flags & CNTR_DISABLED) {
11858                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11859                 return 0;
11860         }
11861
11862         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11863
11864         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11865
11866         /* If its a synthetic counter there is more work we need to do */
11867         if (entry->flags & CNTR_SYNTH) {
11868                 if (sval == CNTR_MAX) {
11869                         /* No need to read already saturated */
11870                         return CNTR_MAX;
11871                 }
11872
11873                 if (entry->flags & CNTR_32BIT) {
11874                         /* 32bit counters can wrap multiple times */
11875                         u64 upper = sval >> 32;
11876                         u64 lower = (sval << 32) >> 32;
11877
11878                         if (lower > val) { /* hw wrapped */
11879                                 if (upper == CNTR_32BIT_MAX)
11880                                         val = CNTR_MAX;
11881                                 else
11882                                         upper++;
11883                         }
11884
11885                         if (val != CNTR_MAX)
11886                                 val = (upper << 32) | val;
11887
11888                 } else {
11889                         /* If we rolled we are saturated */
11890                         if ((val < sval) || (val > CNTR_MAX))
11891                                 val = CNTR_MAX;
11892                 }
11893         }
11894
11895         *psval = val;
11896
11897         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11898
11899         return val;
11900 }
11901
11902 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11903                                struct cntr_entry *entry,
11904                                u64 *psval, void *context, int vl, u64 data)
11905 {
11906         u64 val;
11907
11908         if (entry->flags & CNTR_DISABLED) {
11909                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11910                 return 0;
11911         }
11912
11913         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11914
11915         if (entry->flags & CNTR_SYNTH) {
11916                 *psval = data;
11917                 if (entry->flags & CNTR_32BIT) {
11918                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11919                                              (data << 32) >> 32);
11920                         val = data; /* return the full 64bit value */
11921                 } else {
11922                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11923                                              data);
11924                 }
11925         } else {
11926                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11927         }
11928
11929         *psval = val;
11930
11931         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11932
11933         return val;
11934 }
11935
11936 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11937 {
11938         struct cntr_entry *entry;
11939         u64 *sval;
11940
11941         entry = &dev_cntrs[index];
11942         sval = dd->scntrs + entry->offset;
11943
11944         if (vl != CNTR_INVALID_VL)
11945                 sval += vl;
11946
11947         return read_dev_port_cntr(dd, entry, sval, dd, vl);
11948 }
11949
11950 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11951 {
11952         struct cntr_entry *entry;
11953         u64 *sval;
11954
11955         entry = &dev_cntrs[index];
11956         sval = dd->scntrs + entry->offset;
11957
11958         if (vl != CNTR_INVALID_VL)
11959                 sval += vl;
11960
11961         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11962 }
11963
11964 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11965 {
11966         struct cntr_entry *entry;
11967         u64 *sval;
11968
11969         entry = &port_cntrs[index];
11970         sval = ppd->scntrs + entry->offset;
11971
11972         if (vl != CNTR_INVALID_VL)
11973                 sval += vl;
11974
11975         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11976             (index <= C_RCV_HDR_OVF_LAST)) {
11977                 /* We do not want to bother for disabled contexts */
11978                 return 0;
11979         }
11980
11981         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11982 }
11983
11984 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11985 {
11986         struct cntr_entry *entry;
11987         u64 *sval;
11988
11989         entry = &port_cntrs[index];
11990         sval = ppd->scntrs + entry->offset;
11991
11992         if (vl != CNTR_INVALID_VL)
11993                 sval += vl;
11994
11995         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11996             (index <= C_RCV_HDR_OVF_LAST)) {
11997                 /* We do not want to bother for disabled contexts */
11998                 return 0;
11999         }
12000
12001         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12002 }
12003
12004 static void update_synth_timer(unsigned long opaque)
12005 {
12006         u64 cur_tx;
12007         u64 cur_rx;
12008         u64 total_flits;
12009         u8 update = 0;
12010         int i, j, vl;
12011         struct hfi1_pportdata *ppd;
12012         struct cntr_entry *entry;
12013
12014         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12015
12016         /*
12017          * Rather than keep beating on the CSRs pick a minimal set that we can
12018          * check to watch for potential roll over. We can do this by looking at
12019          * the number of flits sent/recv. If the total flits exceeds 32bits then
12020          * we have to iterate all the counters and update.
12021          */
12022         entry = &dev_cntrs[C_DC_RCV_FLITS];
12023         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12024
12025         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12026         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12027
12028         hfi1_cdbg(
12029             CNTR,
12030             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12031             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12032
12033         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12034                 /*
12035                  * May not be strictly necessary to update but it won't hurt and
12036                  * simplifies the logic here.
12037                  */
12038                 update = 1;
12039                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12040                           dd->unit);
12041         } else {
12042                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12043                 hfi1_cdbg(CNTR,
12044                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12045                           total_flits, (u64)CNTR_32BIT_MAX);
12046                 if (total_flits >= CNTR_32BIT_MAX) {
12047                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12048                                   dd->unit);
12049                         update = 1;
12050                 }
12051         }
12052
12053         if (update) {
12054                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12055                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12056                         entry = &dev_cntrs[i];
12057                         if (entry->flags & CNTR_VL) {
12058                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12059                                         read_dev_cntr(dd, i, vl);
12060                         } else {
12061                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12062                         }
12063                 }
12064                 ppd = (struct hfi1_pportdata *)(dd + 1);
12065                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12066                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12067                                 entry = &port_cntrs[j];
12068                                 if (entry->flags & CNTR_VL) {
12069                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12070                                                 read_port_cntr(ppd, j, vl);
12071                                 } else {
12072                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12073                                 }
12074                         }
12075                 }
12076
12077                 /*
12078                  * We want the value in the register. The goal is to keep track
12079                  * of the number of "ticks" not the counter value. In other
12080                  * words if the register rolls we want to notice it and go ahead
12081                  * and force an update.
12082                  */
12083                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12084                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12085                                                 CNTR_MODE_R, 0);
12086
12087                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12088                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12089                                                 CNTR_MODE_R, 0);
12090
12091                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12092                           dd->unit, dd->last_tx, dd->last_rx);
12093
12094         } else {
12095                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12096         }
12097
12098         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12099 }
12100
12101 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12102 static int init_cntrs(struct hfi1_devdata *dd)
12103 {
12104         int i, rcv_ctxts, j;
12105         size_t sz;
12106         char *p;
12107         char name[C_MAX_NAME];
12108         struct hfi1_pportdata *ppd;
12109         const char *bit_type_32 = ",32";
12110         const int bit_type_32_sz = strlen(bit_type_32);
12111
12112         /* set up the stats timer; the add_timer is done at the end */
12113         setup_timer(&dd->synth_stats_timer, update_synth_timer,
12114                     (unsigned long)dd);
12115
12116         /***********************/
12117         /* per device counters */
12118         /***********************/
12119
12120         /* size names and determine how many we have*/
12121         dd->ndevcntrs = 0;
12122         sz = 0;
12123
12124         for (i = 0; i < DEV_CNTR_LAST; i++) {
12125                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12126                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12127                         continue;
12128                 }
12129
12130                 if (dev_cntrs[i].flags & CNTR_VL) {
12131                         dev_cntrs[i].offset = dd->ndevcntrs;
12132                         for (j = 0; j < C_VL_COUNT; j++) {
12133                                 snprintf(name, C_MAX_NAME, "%s%d",
12134                                          dev_cntrs[i].name, vl_from_idx(j));
12135                                 sz += strlen(name);
12136                                 /* Add ",32" for 32-bit counters */
12137                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12138                                         sz += bit_type_32_sz;
12139                                 sz++;
12140                                 dd->ndevcntrs++;
12141                         }
12142                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12143                         dev_cntrs[i].offset = dd->ndevcntrs;
12144                         for (j = 0; j < dd->chip_sdma_engines; j++) {
12145                                 snprintf(name, C_MAX_NAME, "%s%d",
12146                                          dev_cntrs[i].name, j);
12147                                 sz += strlen(name);
12148                                 /* Add ",32" for 32-bit counters */
12149                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12150                                         sz += bit_type_32_sz;
12151                                 sz++;
12152                                 dd->ndevcntrs++;
12153                         }
12154                 } else {
12155                         /* +1 for newline. */
12156                         sz += strlen(dev_cntrs[i].name) + 1;
12157                         /* Add ",32" for 32-bit counters */
12158                         if (dev_cntrs[i].flags & CNTR_32BIT)
12159                                 sz += bit_type_32_sz;
12160                         dev_cntrs[i].offset = dd->ndevcntrs;
12161                         dd->ndevcntrs++;
12162                 }
12163         }
12164
12165         /* allocate space for the counter values */
12166         dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12167         if (!dd->cntrs)
12168                 goto bail;
12169
12170         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12171         if (!dd->scntrs)
12172                 goto bail;
12173
12174         /* allocate space for the counter names */
12175         dd->cntrnameslen = sz;
12176         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12177         if (!dd->cntrnames)
12178                 goto bail;
12179
12180         /* fill in the names */
12181         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12182                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12183                         /* Nothing */
12184                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12185                         for (j = 0; j < C_VL_COUNT; j++) {
12186                                 snprintf(name, C_MAX_NAME, "%s%d",
12187                                          dev_cntrs[i].name,
12188                                          vl_from_idx(j));
12189                                 memcpy(p, name, strlen(name));
12190                                 p += strlen(name);
12191
12192                                 /* Counter is 32 bits */
12193                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12194                                         memcpy(p, bit_type_32, bit_type_32_sz);
12195                                         p += bit_type_32_sz;
12196                                 }
12197
12198                                 *p++ = '\n';
12199                         }
12200                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12201                         for (j = 0; j < dd->chip_sdma_engines; j++) {
12202                                 snprintf(name, C_MAX_NAME, "%s%d",
12203                                          dev_cntrs[i].name, j);
12204                                 memcpy(p, name, strlen(name));
12205                                 p += strlen(name);
12206
12207                                 /* Counter is 32 bits */
12208                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12209                                         memcpy(p, bit_type_32, bit_type_32_sz);
12210                                         p += bit_type_32_sz;
12211                                 }
12212
12213                                 *p++ = '\n';
12214                         }
12215                 } else {
12216                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12217                         p += strlen(dev_cntrs[i].name);
12218
12219                         /* Counter is 32 bits */
12220                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12221                                 memcpy(p, bit_type_32, bit_type_32_sz);
12222                                 p += bit_type_32_sz;
12223                         }
12224
12225                         *p++ = '\n';
12226                 }
12227         }
12228
12229         /*********************/
12230         /* per port counters */
12231         /*********************/
12232
12233         /*
12234          * Go through the counters for the overflows and disable the ones we
12235          * don't need. This varies based on platform so we need to do it
12236          * dynamically here.
12237          */
12238         rcv_ctxts = dd->num_rcv_contexts;
12239         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12240              i <= C_RCV_HDR_OVF_LAST; i++) {
12241                 port_cntrs[i].flags |= CNTR_DISABLED;
12242         }
12243
12244         /* size port counter names and determine how many we have*/
12245         sz = 0;
12246         dd->nportcntrs = 0;
12247         for (i = 0; i < PORT_CNTR_LAST; i++) {
12248                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12249                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12250                         continue;
12251                 }
12252
12253                 if (port_cntrs[i].flags & CNTR_VL) {
12254                         port_cntrs[i].offset = dd->nportcntrs;
12255                         for (j = 0; j < C_VL_COUNT; j++) {
12256                                 snprintf(name, C_MAX_NAME, "%s%d",
12257                                          port_cntrs[i].name, vl_from_idx(j));
12258                                 sz += strlen(name);
12259                                 /* Add ",32" for 32-bit counters */
12260                                 if (port_cntrs[i].flags & CNTR_32BIT)
12261                                         sz += bit_type_32_sz;
12262                                 sz++;
12263                                 dd->nportcntrs++;
12264                         }
12265                 } else {
12266                         /* +1 for newline */
12267                         sz += strlen(port_cntrs[i].name) + 1;
12268                         /* Add ",32" for 32-bit counters */
12269                         if (port_cntrs[i].flags & CNTR_32BIT)
12270                                 sz += bit_type_32_sz;
12271                         port_cntrs[i].offset = dd->nportcntrs;
12272                         dd->nportcntrs++;
12273                 }
12274         }
12275
12276         /* allocate space for the counter names */
12277         dd->portcntrnameslen = sz;
12278         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12279         if (!dd->portcntrnames)
12280                 goto bail;
12281
12282         /* fill in port cntr names */
12283         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12284                 if (port_cntrs[i].flags & CNTR_DISABLED)
12285                         continue;
12286
12287                 if (port_cntrs[i].flags & CNTR_VL) {
12288                         for (j = 0; j < C_VL_COUNT; j++) {
12289                                 snprintf(name, C_MAX_NAME, "%s%d",
12290                                          port_cntrs[i].name, vl_from_idx(j));
12291                                 memcpy(p, name, strlen(name));
12292                                 p += strlen(name);
12293
12294                                 /* Counter is 32 bits */
12295                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12296                                         memcpy(p, bit_type_32, bit_type_32_sz);
12297                                         p += bit_type_32_sz;
12298                                 }
12299
12300                                 *p++ = '\n';
12301                         }
12302                 } else {
12303                         memcpy(p, port_cntrs[i].name,
12304                                strlen(port_cntrs[i].name));
12305                         p += strlen(port_cntrs[i].name);
12306
12307                         /* Counter is 32 bits */
12308                         if (port_cntrs[i].flags & CNTR_32BIT) {
12309                                 memcpy(p, bit_type_32, bit_type_32_sz);
12310                                 p += bit_type_32_sz;
12311                         }
12312
12313                         *p++ = '\n';
12314                 }
12315         }
12316
12317         /* allocate per port storage for counter values */
12318         ppd = (struct hfi1_pportdata *)(dd + 1);
12319         for (i = 0; i < dd->num_pports; i++, ppd++) {
12320                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12321                 if (!ppd->cntrs)
12322                         goto bail;
12323
12324                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12325                 if (!ppd->scntrs)
12326                         goto bail;
12327         }
12328
12329         /* CPU counters need to be allocated and zeroed */
12330         if (init_cpu_counters(dd))
12331                 goto bail;
12332
12333         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12334         return 0;
12335 bail:
12336         free_cntrs(dd);
12337         return -ENOMEM;
12338 }
12339
12340 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12341 {
12342         switch (chip_lstate) {
12343         default:
12344                 dd_dev_err(dd,
12345                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12346                            chip_lstate);
12347                 /* fall through */
12348         case LSTATE_DOWN:
12349                 return IB_PORT_DOWN;
12350         case LSTATE_INIT:
12351                 return IB_PORT_INIT;
12352         case LSTATE_ARMED:
12353                 return IB_PORT_ARMED;
12354         case LSTATE_ACTIVE:
12355                 return IB_PORT_ACTIVE;
12356         }
12357 }
12358
12359 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12360 {
12361         /* look at the HFI meta-states only */
12362         switch (chip_pstate & 0xf0) {
12363         default:
12364                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12365                            chip_pstate);
12366                 /* fall through */
12367         case PLS_DISABLED:
12368                 return IB_PORTPHYSSTATE_DISABLED;
12369         case PLS_OFFLINE:
12370                 return OPA_PORTPHYSSTATE_OFFLINE;
12371         case PLS_POLLING:
12372                 return IB_PORTPHYSSTATE_POLLING;
12373         case PLS_CONFIGPHY:
12374                 return IB_PORTPHYSSTATE_TRAINING;
12375         case PLS_LINKUP:
12376                 return IB_PORTPHYSSTATE_LINKUP;
12377         case PLS_PHYTEST:
12378                 return IB_PORTPHYSSTATE_PHY_TEST;
12379         }
12380 }
12381
12382 /* return the OPA port logical state name */
12383 const char *opa_lstate_name(u32 lstate)
12384 {
12385         static const char * const port_logical_names[] = {
12386                 "PORT_NOP",
12387                 "PORT_DOWN",
12388                 "PORT_INIT",
12389                 "PORT_ARMED",
12390                 "PORT_ACTIVE",
12391                 "PORT_ACTIVE_DEFER",
12392         };
12393         if (lstate < ARRAY_SIZE(port_logical_names))
12394                 return port_logical_names[lstate];
12395         return "unknown";
12396 }
12397
12398 /* return the OPA port physical state name */
12399 const char *opa_pstate_name(u32 pstate)
12400 {
12401         static const char * const port_physical_names[] = {
12402                 "PHYS_NOP",
12403                 "reserved1",
12404                 "PHYS_POLL",
12405                 "PHYS_DISABLED",
12406                 "PHYS_TRAINING",
12407                 "PHYS_LINKUP",
12408                 "PHYS_LINK_ERR_RECOVER",
12409                 "PHYS_PHY_TEST",
12410                 "reserved8",
12411                 "PHYS_OFFLINE",
12412                 "PHYS_GANGED",
12413                 "PHYS_TEST",
12414         };
12415         if (pstate < ARRAY_SIZE(port_physical_names))
12416                 return port_physical_names[pstate];
12417         return "unknown";
12418 }
12419
12420 /*
12421  * Read the hardware link state and set the driver's cached value of it.
12422  * Return the (new) current value.
12423  */
12424 u32 get_logical_state(struct hfi1_pportdata *ppd)
12425 {
12426         u32 new_state;
12427
12428         new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12429         if (new_state != ppd->lstate) {
12430                 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12431                             opa_lstate_name(new_state), new_state);
12432                 ppd->lstate = new_state;
12433         }
12434         /*
12435          * Set port status flags in the page mapped into userspace
12436          * memory. Do it here to ensure a reliable state - this is
12437          * the only function called by all state handling code.
12438          * Always set the flags due to the fact that the cache value
12439          * might have been changed explicitly outside of this
12440          * function.
12441          */
12442         if (ppd->statusp) {
12443                 switch (ppd->lstate) {
12444                 case IB_PORT_DOWN:
12445                 case IB_PORT_INIT:
12446                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12447                                            HFI1_STATUS_IB_READY);
12448                         break;
12449                 case IB_PORT_ARMED:
12450                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12451                         break;
12452                 case IB_PORT_ACTIVE:
12453                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12454                         break;
12455                 }
12456         }
12457         return ppd->lstate;
12458 }
12459
12460 /**
12461  * wait_logical_linkstate - wait for an IB link state change to occur
12462  * @ppd: port device
12463  * @state: the state to wait for
12464  * @msecs: the number of milliseconds to wait
12465  *
12466  * Wait up to msecs milliseconds for IB link state change to occur.
12467  * For now, take the easy polling route.
12468  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12469  */
12470 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12471                                   int msecs)
12472 {
12473         unsigned long timeout;
12474
12475         timeout = jiffies + msecs_to_jiffies(msecs);
12476         while (1) {
12477                 if (get_logical_state(ppd) == state)
12478                         return 0;
12479                 if (time_after(jiffies, timeout))
12480                         break;
12481                 msleep(20);
12482         }
12483         dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12484
12485         return -ETIMEDOUT;
12486 }
12487
12488 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12489 {
12490         u32 pstate;
12491         u32 ib_pstate;
12492
12493         pstate = read_physical_state(ppd->dd);
12494         ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12495         if (ppd->last_pstate != ib_pstate) {
12496                 dd_dev_info(ppd->dd,
12497                             "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12498                             __func__, opa_pstate_name(ib_pstate), ib_pstate,
12499                             pstate);
12500                 ppd->last_pstate = ib_pstate;
12501         }
12502         return ib_pstate;
12503 }
12504
12505 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12506 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12507
12508 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12509 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12510
12511 int hfi1_init_ctxt(struct send_context *sc)
12512 {
12513         if (sc) {
12514                 struct hfi1_devdata *dd = sc->dd;
12515                 u64 reg;
12516                 u8 set = (sc->type == SC_USER ?
12517                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12518                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12519                 reg = read_kctxt_csr(dd, sc->hw_context,
12520                                      SEND_CTXT_CHECK_ENABLE);
12521                 if (set)
12522                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12523                 else
12524                         SET_STATIC_RATE_CONTROL_SMASK(reg);
12525                 write_kctxt_csr(dd, sc->hw_context,
12526                                 SEND_CTXT_CHECK_ENABLE, reg);
12527         }
12528         return 0;
12529 }
12530
12531 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12532 {
12533         int ret = 0;
12534         u64 reg;
12535
12536         if (dd->icode != ICODE_RTL_SILICON) {
12537                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12538                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12539                                     __func__);
12540                 return -EINVAL;
12541         }
12542         reg = read_csr(dd, ASIC_STS_THERM);
12543         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12544                       ASIC_STS_THERM_CURR_TEMP_MASK);
12545         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12546                         ASIC_STS_THERM_LO_TEMP_MASK);
12547         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12548                         ASIC_STS_THERM_HI_TEMP_MASK);
12549         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12550                           ASIC_STS_THERM_CRIT_TEMP_MASK);
12551         /* triggers is a 3-bit value - 1 bit per trigger. */
12552         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12553
12554         return ret;
12555 }
12556
12557 /* ========================================================================= */
12558
12559 /*
12560  * Enable/disable chip from delivering interrupts.
12561  */
12562 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12563 {
12564         int i;
12565
12566         /*
12567          * In HFI, the mask needs to be 1 to allow interrupts.
12568          */
12569         if (enable) {
12570                 /* enable all interrupts */
12571                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12572                         write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12573
12574                 init_qsfp_int(dd);
12575         } else {
12576                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12577                         write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12578         }
12579 }
12580
12581 /*
12582  * Clear all interrupt sources on the chip.
12583  */
12584 static void clear_all_interrupts(struct hfi1_devdata *dd)
12585 {
12586         int i;
12587
12588         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12589                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12590
12591         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12592         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12593         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12594         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12595         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12596         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12597         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12598         for (i = 0; i < dd->chip_send_contexts; i++)
12599                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12600         for (i = 0; i < dd->chip_sdma_engines; i++)
12601                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12602
12603         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12604         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12605         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12606 }
12607
12608 /* Move to pcie.c? */
12609 static void disable_intx(struct pci_dev *pdev)
12610 {
12611         pci_intx(pdev, 0);
12612 }
12613
12614 static void clean_up_interrupts(struct hfi1_devdata *dd)
12615 {
12616         int i;
12617
12618         /* remove irqs - must happen before disabling/turning off */
12619         if (dd->num_msix_entries) {
12620                 /* MSI-X */
12621                 struct hfi1_msix_entry *me = dd->msix_entries;
12622
12623                 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12624                         if (!me->arg) /* => no irq, no affinity */
12625                                 continue;
12626                         hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12627                         free_irq(me->msix.vector, me->arg);
12628                 }
12629         } else {
12630                 /* INTx */
12631                 if (dd->requested_intx_irq) {
12632                         free_irq(dd->pcidev->irq, dd);
12633                         dd->requested_intx_irq = 0;
12634                 }
12635         }
12636
12637         /* turn off interrupts */
12638         if (dd->num_msix_entries) {
12639                 /* MSI-X */
12640                 pci_disable_msix(dd->pcidev);
12641         } else {
12642                 /* INTx */
12643                 disable_intx(dd->pcidev);
12644         }
12645
12646         /* clean structures */
12647         kfree(dd->msix_entries);
12648         dd->msix_entries = NULL;
12649         dd->num_msix_entries = 0;
12650 }
12651
12652 /*
12653  * Remap the interrupt source from the general handler to the given MSI-X
12654  * interrupt.
12655  */
12656 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12657 {
12658         u64 reg;
12659         int m, n;
12660
12661         /* clear from the handled mask of the general interrupt */
12662         m = isrc / 64;
12663         n = isrc % 64;
12664         dd->gi_mask[m] &= ~((u64)1 << n);
12665
12666         /* direct the chip source to the given MSI-X interrupt */
12667         m = isrc / 8;
12668         n = isrc % 8;
12669         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12670         reg &= ~((u64)0xff << (8 * n));
12671         reg |= ((u64)msix_intr & 0xff) << (8 * n);
12672         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12673 }
12674
12675 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12676                                   int engine, int msix_intr)
12677 {
12678         /*
12679          * SDMA engine interrupt sources grouped by type, rather than
12680          * engine.  Per-engine interrupts are as follows:
12681          *      SDMA
12682          *      SDMAProgress
12683          *      SDMAIdle
12684          */
12685         remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12686                    msix_intr);
12687         remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12688                    msix_intr);
12689         remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12690                    msix_intr);
12691 }
12692
12693 static int request_intx_irq(struct hfi1_devdata *dd)
12694 {
12695         int ret;
12696
12697         snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12698                  dd->unit);
12699         ret = request_irq(dd->pcidev->irq, general_interrupt,
12700                           IRQF_SHARED, dd->intx_name, dd);
12701         if (ret)
12702                 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12703                            ret);
12704         else
12705                 dd->requested_intx_irq = 1;
12706         return ret;
12707 }
12708
12709 static int request_msix_irqs(struct hfi1_devdata *dd)
12710 {
12711         int first_general, last_general;
12712         int first_sdma, last_sdma;
12713         int first_rx, last_rx;
12714         int i, ret = 0;
12715
12716         /* calculate the ranges we are going to use */
12717         first_general = 0;
12718         last_general = first_general + 1;
12719         first_sdma = last_general;
12720         last_sdma = first_sdma + dd->num_sdma;
12721         first_rx = last_sdma;
12722         last_rx = first_rx + dd->n_krcv_queues;
12723
12724         /*
12725          * Sanity check - the code expects all SDMA chip source
12726          * interrupts to be in the same CSR, starting at bit 0.  Verify
12727          * that this is true by checking the bit location of the start.
12728          */
12729         BUILD_BUG_ON(IS_SDMA_START % 64);
12730
12731         for (i = 0; i < dd->num_msix_entries; i++) {
12732                 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12733                 const char *err_info;
12734                 irq_handler_t handler;
12735                 irq_handler_t thread = NULL;
12736                 void *arg;
12737                 int idx;
12738                 struct hfi1_ctxtdata *rcd = NULL;
12739                 struct sdma_engine *sde = NULL;
12740
12741                 /* obtain the arguments to request_irq */
12742                 if (first_general <= i && i < last_general) {
12743                         idx = i - first_general;
12744                         handler = general_interrupt;
12745                         arg = dd;
12746                         snprintf(me->name, sizeof(me->name),
12747                                  DRIVER_NAME "_%d", dd->unit);
12748                         err_info = "general";
12749                         me->type = IRQ_GENERAL;
12750                 } else if (first_sdma <= i && i < last_sdma) {
12751                         idx = i - first_sdma;
12752                         sde = &dd->per_sdma[idx];
12753                         handler = sdma_interrupt;
12754                         arg = sde;
12755                         snprintf(me->name, sizeof(me->name),
12756                                  DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12757                         err_info = "sdma";
12758                         remap_sdma_interrupts(dd, idx, i);
12759                         me->type = IRQ_SDMA;
12760                 } else if (first_rx <= i && i < last_rx) {
12761                         idx = i - first_rx;
12762                         rcd = dd->rcd[idx];
12763                         /* no interrupt if no rcd */
12764                         if (!rcd)
12765                                 continue;
12766                         /*
12767                          * Set the interrupt register and mask for this
12768                          * context's interrupt.
12769                          */
12770                         rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12771                         rcd->imask = ((u64)1) <<
12772                                         ((IS_RCVAVAIL_START + idx) % 64);
12773                         handler = receive_context_interrupt;
12774                         thread = receive_context_thread;
12775                         arg = rcd;
12776                         snprintf(me->name, sizeof(me->name),
12777                                  DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12778                         err_info = "receive context";
12779                         remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12780                         me->type = IRQ_RCVCTXT;
12781                 } else {
12782                         /* not in our expected range - complain, then
12783                          * ignore it
12784                          */
12785                         dd_dev_err(dd,
12786                                    "Unexpected extra MSI-X interrupt %d\n", i);
12787                         continue;
12788                 }
12789                 /* no argument, no interrupt */
12790                 if (!arg)
12791                         continue;
12792                 /* make sure the name is terminated */
12793                 me->name[sizeof(me->name) - 1] = 0;
12794
12795                 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12796                                            me->name, arg);
12797                 if (ret) {
12798                         dd_dev_err(dd,
12799                                    "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12800                                    err_info, me->msix.vector, idx, ret);
12801                         return ret;
12802                 }
12803                 /*
12804                  * assign arg after request_irq call, so it will be
12805                  * cleaned up
12806                  */
12807                 me->arg = arg;
12808
12809                 ret = hfi1_get_irq_affinity(dd, me);
12810                 if (ret)
12811                         dd_dev_err(dd,
12812                                    "unable to pin IRQ %d\n", ret);
12813         }
12814
12815         return ret;
12816 }
12817
12818 /*
12819  * Set the general handler to accept all interrupts, remap all
12820  * chip interrupts back to MSI-X 0.
12821  */
12822 static void reset_interrupts(struct hfi1_devdata *dd)
12823 {
12824         int i;
12825
12826         /* all interrupts handled by the general handler */
12827         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12828                 dd->gi_mask[i] = ~(u64)0;
12829
12830         /* all chip interrupts map to MSI-X 0 */
12831         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12832                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12833 }
12834
12835 static int set_up_interrupts(struct hfi1_devdata *dd)
12836 {
12837         struct hfi1_msix_entry *entries;
12838         u32 total, request;
12839         int i, ret;
12840         int single_interrupt = 0; /* we expect to have all the interrupts */
12841
12842         /*
12843          * Interrupt count:
12844          *      1 general, "slow path" interrupt (includes the SDMA engines
12845          *              slow source, SDMACleanupDone)
12846          *      N interrupts - one per used SDMA engine
12847          *      M interrupt - one per kernel receive context
12848          */
12849         total = 1 + dd->num_sdma + dd->n_krcv_queues;
12850
12851         entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12852         if (!entries) {
12853                 ret = -ENOMEM;
12854                 goto fail;
12855         }
12856         /* 1-1 MSI-X entry assignment */
12857         for (i = 0; i < total; i++)
12858                 entries[i].msix.entry = i;
12859
12860         /* ask for MSI-X interrupts */
12861         request = total;
12862         request_msix(dd, &request, entries);
12863
12864         if (request == 0) {
12865                 /* using INTx */
12866                 /* dd->num_msix_entries already zero */
12867                 kfree(entries);
12868                 single_interrupt = 1;
12869                 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12870         } else {
12871                 /* using MSI-X */
12872                 dd->num_msix_entries = request;
12873                 dd->msix_entries = entries;
12874
12875                 if (request != total) {
12876                         /* using MSI-X, with reduced interrupts */
12877                         dd_dev_err(
12878                                 dd,
12879                                 "cannot handle reduced interrupt case, want %u, got %u\n",
12880                                 total, request);
12881                         ret = -EINVAL;
12882                         goto fail;
12883                 }
12884                 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12885         }
12886
12887         /* mask all interrupts */
12888         set_intr_state(dd, 0);
12889         /* clear all pending interrupts */
12890         clear_all_interrupts(dd);
12891
12892         /* reset general handler mask, chip MSI-X mappings */
12893         reset_interrupts(dd);
12894
12895         if (single_interrupt)
12896                 ret = request_intx_irq(dd);
12897         else
12898                 ret = request_msix_irqs(dd);
12899         if (ret)
12900                 goto fail;
12901
12902         return 0;
12903
12904 fail:
12905         clean_up_interrupts(dd);
12906         return ret;
12907 }
12908
12909 /*
12910  * Set up context values in dd.  Sets:
12911  *
12912  *      num_rcv_contexts - number of contexts being used
12913  *      n_krcv_queues - number of kernel contexts
12914  *      first_user_ctxt - first non-kernel context in array of contexts
12915  *      freectxts  - number of free user contexts
12916  *      num_send_contexts - number of PIO send contexts being used
12917  */
12918 static int set_up_context_variables(struct hfi1_devdata *dd)
12919 {
12920         unsigned long num_kernel_contexts;
12921         int total_contexts;
12922         int ret;
12923         unsigned ngroups;
12924         int qos_rmt_count;
12925         int user_rmt_reduced;
12926
12927         /*
12928          * Kernel receive contexts:
12929          * - Context 0 - control context (VL15/multicast/error)
12930          * - Context 1 - first kernel context
12931          * - Context 2 - second kernel context
12932          * ...
12933          */
12934         if (n_krcvqs)
12935                 /*
12936                  * n_krcvqs is the sum of module parameter kernel receive
12937                  * contexts, krcvqs[].  It does not include the control
12938                  * context, so add that.
12939                  */
12940                 num_kernel_contexts = n_krcvqs + 1;
12941         else
12942                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
12943         /*
12944          * Every kernel receive context needs an ACK send context.
12945          * one send context is allocated for each VL{0-7} and VL15
12946          */
12947         if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12948                 dd_dev_err(dd,
12949                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
12950                            (int)(dd->chip_send_contexts - num_vls - 1),
12951                            num_kernel_contexts);
12952                 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12953         }
12954         /*
12955          * User contexts:
12956          *      - default to 1 user context per real (non-HT) CPU core if
12957          *        num_user_contexts is negative
12958          */
12959         if (num_user_contexts < 0)
12960                 num_user_contexts =
12961                         cpumask_weight(&node_affinity.real_cpu_mask);
12962
12963         total_contexts = num_kernel_contexts + num_user_contexts;
12964
12965         /*
12966          * Adjust the counts given a global max.
12967          */
12968         if (total_contexts > dd->chip_rcv_contexts) {
12969                 dd_dev_err(dd,
12970                            "Reducing # user receive contexts to: %d, from %d\n",
12971                            (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12972                            (int)num_user_contexts);
12973                 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12974                 /* recalculate */
12975                 total_contexts = num_kernel_contexts + num_user_contexts;
12976         }
12977
12978         /* each user context requires an entry in the RMT */
12979         qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12980         if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12981                 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12982                 dd_dev_err(dd,
12983                            "RMT size is reducing the number of user receive contexts from %d to %d\n",
12984                            (int)num_user_contexts,
12985                            user_rmt_reduced);
12986                 /* recalculate */
12987                 num_user_contexts = user_rmt_reduced;
12988                 total_contexts = num_kernel_contexts + num_user_contexts;
12989         }
12990
12991         /* the first N are kernel contexts, the rest are user contexts */
12992         dd->num_rcv_contexts = total_contexts;
12993         dd->n_krcv_queues = num_kernel_contexts;
12994         dd->first_user_ctxt = num_kernel_contexts;
12995         dd->num_user_contexts = num_user_contexts;
12996         dd->freectxts = num_user_contexts;
12997         dd_dev_info(dd,
12998                     "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12999                     (int)dd->chip_rcv_contexts,
13000                     (int)dd->num_rcv_contexts,
13001                     (int)dd->n_krcv_queues,
13002                     (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13003
13004         /*
13005          * Receive array allocation:
13006          *   All RcvArray entries are divided into groups of 8. This
13007          *   is required by the hardware and will speed up writes to
13008          *   consecutive entries by using write-combining of the entire
13009          *   cacheline.
13010          *
13011          *   The number of groups are evenly divided among all contexts.
13012          *   any left over groups will be given to the first N user
13013          *   contexts.
13014          */
13015         dd->rcv_entries.group_size = RCV_INCREMENT;
13016         ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13017         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13018         dd->rcv_entries.nctxt_extra = ngroups -
13019                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13020         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13021                     dd->rcv_entries.ngroups,
13022                     dd->rcv_entries.nctxt_extra);
13023         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13024             MAX_EAGER_ENTRIES * 2) {
13025                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13026                         dd->rcv_entries.group_size;
13027                 dd_dev_info(dd,
13028                             "RcvArray group count too high, change to %u\n",
13029                             dd->rcv_entries.ngroups);
13030                 dd->rcv_entries.nctxt_extra = 0;
13031         }
13032         /*
13033          * PIO send contexts
13034          */
13035         ret = init_sc_pools_and_sizes(dd);
13036         if (ret >= 0) { /* success */
13037                 dd->num_send_contexts = ret;
13038                 dd_dev_info(
13039                         dd,
13040                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13041                         dd->chip_send_contexts,
13042                         dd->num_send_contexts,
13043                         dd->sc_sizes[SC_KERNEL].count,
13044                         dd->sc_sizes[SC_ACK].count,
13045                         dd->sc_sizes[SC_USER].count,
13046                         dd->sc_sizes[SC_VL15].count);
13047                 ret = 0;        /* success */
13048         }
13049
13050         return ret;
13051 }
13052
13053 /*
13054  * Set the device/port partition key table. The MAD code
13055  * will ensure that, at least, the partial management
13056  * partition key is present in the table.
13057  */
13058 static void set_partition_keys(struct hfi1_pportdata *ppd)
13059 {
13060         struct hfi1_devdata *dd = ppd->dd;
13061         u64 reg = 0;
13062         int i;
13063
13064         dd_dev_info(dd, "Setting partition keys\n");
13065         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13066                 reg |= (ppd->pkeys[i] &
13067                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13068                         ((i % 4) *
13069                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13070                 /* Each register holds 4 PKey values. */
13071                 if ((i % 4) == 3) {
13072                         write_csr(dd, RCV_PARTITION_KEY +
13073                                   ((i - 3) * 2), reg);
13074                         reg = 0;
13075                 }
13076         }
13077
13078         /* Always enable HW pkeys check when pkeys table is set */
13079         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13080 }
13081
13082 /*
13083  * These CSRs and memories are uninitialized on reset and must be
13084  * written before reading to set the ECC/parity bits.
13085  *
13086  * NOTE: All user context CSRs that are not mmaped write-only
13087  * (e.g. the TID flows) must be initialized even if the driver never
13088  * reads them.
13089  */
13090 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13091 {
13092         int i, j;
13093
13094         /* CceIntMap */
13095         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13096                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13097
13098         /* SendCtxtCreditReturnAddr */
13099         for (i = 0; i < dd->chip_send_contexts; i++)
13100                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13101
13102         /* PIO Send buffers */
13103         /* SDMA Send buffers */
13104         /*
13105          * These are not normally read, and (presently) have no method
13106          * to be read, so are not pre-initialized
13107          */
13108
13109         /* RcvHdrAddr */
13110         /* RcvHdrTailAddr */
13111         /* RcvTidFlowTable */
13112         for (i = 0; i < dd->chip_rcv_contexts; i++) {
13113                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13114                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13115                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13116                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13117         }
13118
13119         /* RcvArray */
13120         for (i = 0; i < dd->chip_rcv_array_count; i++)
13121                 write_csr(dd, RCV_ARRAY + (8 * i),
13122                           RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
13123
13124         /* RcvQPMapTable */
13125         for (i = 0; i < 32; i++)
13126                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13127 }
13128
13129 /*
13130  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13131  */
13132 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13133                              u64 ctrl_bits)
13134 {
13135         unsigned long timeout;
13136         u64 reg;
13137
13138         /* is the condition present? */
13139         reg = read_csr(dd, CCE_STATUS);
13140         if ((reg & status_bits) == 0)
13141                 return;
13142
13143         /* clear the condition */
13144         write_csr(dd, CCE_CTRL, ctrl_bits);
13145
13146         /* wait for the condition to clear */
13147         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13148         while (1) {
13149                 reg = read_csr(dd, CCE_STATUS);
13150                 if ((reg & status_bits) == 0)
13151                         return;
13152                 if (time_after(jiffies, timeout)) {
13153                         dd_dev_err(dd,
13154                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13155                                    status_bits, reg & status_bits);
13156                         return;
13157                 }
13158                 udelay(1);
13159         }
13160 }
13161
13162 /* set CCE CSRs to chip reset defaults */
13163 static void reset_cce_csrs(struct hfi1_devdata *dd)
13164 {
13165         int i;
13166
13167         /* CCE_REVISION read-only */
13168         /* CCE_REVISION2 read-only */
13169         /* CCE_CTRL - bits clear automatically */
13170         /* CCE_STATUS read-only, use CceCtrl to clear */
13171         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13172         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13173         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13174         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13175                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13176         /* CCE_ERR_STATUS read-only */
13177         write_csr(dd, CCE_ERR_MASK, 0);
13178         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13179         /* CCE_ERR_FORCE leave alone */
13180         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13181                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13182         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13183         /* CCE_PCIE_CTRL leave alone */
13184         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13185                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13186                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13187                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13188         }
13189         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13190                 /* CCE_MSIX_PBA read-only */
13191                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13192                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13193         }
13194         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13195                 write_csr(dd, CCE_INT_MAP, 0);
13196         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13197                 /* CCE_INT_STATUS read-only */
13198                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13199                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13200                 /* CCE_INT_FORCE leave alone */
13201                 /* CCE_INT_BLOCKED read-only */
13202         }
13203         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13204                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13205 }
13206
13207 /* set MISC CSRs to chip reset defaults */
13208 static void reset_misc_csrs(struct hfi1_devdata *dd)
13209 {
13210         int i;
13211
13212         for (i = 0; i < 32; i++) {
13213                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13214                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13215                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13216         }
13217         /*
13218          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13219          * only be written 128-byte chunks
13220          */
13221         /* init RSA engine to clear lingering errors */
13222         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13223         write_csr(dd, MISC_CFG_RSA_MU, 0);
13224         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13225         /* MISC_STS_8051_DIGEST read-only */
13226         /* MISC_STS_SBM_DIGEST read-only */
13227         /* MISC_STS_PCIE_DIGEST read-only */
13228         /* MISC_STS_FAB_DIGEST read-only */
13229         /* MISC_ERR_STATUS read-only */
13230         write_csr(dd, MISC_ERR_MASK, 0);
13231         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13232         /* MISC_ERR_FORCE leave alone */
13233 }
13234
13235 /* set TXE CSRs to chip reset defaults */
13236 static void reset_txe_csrs(struct hfi1_devdata *dd)
13237 {
13238         int i;
13239
13240         /*
13241          * TXE Kernel CSRs
13242          */
13243         write_csr(dd, SEND_CTRL, 0);
13244         __cm_reset(dd, 0);      /* reset CM internal state */
13245         /* SEND_CONTEXTS read-only */
13246         /* SEND_DMA_ENGINES read-only */
13247         /* SEND_PIO_MEM_SIZE read-only */
13248         /* SEND_DMA_MEM_SIZE read-only */
13249         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13250         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13251         /* SEND_PIO_ERR_STATUS read-only */
13252         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13253         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13254         /* SEND_PIO_ERR_FORCE leave alone */
13255         /* SEND_DMA_ERR_STATUS read-only */
13256         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13257         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13258         /* SEND_DMA_ERR_FORCE leave alone */
13259         /* SEND_EGRESS_ERR_STATUS read-only */
13260         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13261         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13262         /* SEND_EGRESS_ERR_FORCE leave alone */
13263         write_csr(dd, SEND_BTH_QP, 0);
13264         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13265         write_csr(dd, SEND_SC2VLT0, 0);
13266         write_csr(dd, SEND_SC2VLT1, 0);
13267         write_csr(dd, SEND_SC2VLT2, 0);
13268         write_csr(dd, SEND_SC2VLT3, 0);
13269         write_csr(dd, SEND_LEN_CHECK0, 0);
13270         write_csr(dd, SEND_LEN_CHECK1, 0);
13271         /* SEND_ERR_STATUS read-only */
13272         write_csr(dd, SEND_ERR_MASK, 0);
13273         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13274         /* SEND_ERR_FORCE read-only */
13275         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13276                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13277         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13278                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13279         for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13280                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13281         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13282                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13283         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13284                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13285         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13286         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13287         /* SEND_CM_CREDIT_USED_STATUS read-only */
13288         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13289         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13290         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13291         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13292         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13293         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13294                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13295         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13296         /* SEND_CM_CREDIT_USED_VL read-only */
13297         /* SEND_CM_CREDIT_USED_VL15 read-only */
13298         /* SEND_EGRESS_CTXT_STATUS read-only */
13299         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13300         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13301         /* SEND_EGRESS_ERR_INFO read-only */
13302         /* SEND_EGRESS_ERR_SOURCE read-only */
13303
13304         /*
13305          * TXE Per-Context CSRs
13306          */
13307         for (i = 0; i < dd->chip_send_contexts; i++) {
13308                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13309                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13310                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13311                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13312                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13313                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13314                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13315                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13316                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13317                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13318                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13319                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13320         }
13321
13322         /*
13323          * TXE Per-SDMA CSRs
13324          */
13325         for (i = 0; i < dd->chip_sdma_engines; i++) {
13326                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13327                 /* SEND_DMA_STATUS read-only */
13328                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13329                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13330                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13331                 /* SEND_DMA_HEAD read-only */
13332                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13333                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13334                 /* SEND_DMA_IDLE_CNT read-only */
13335                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13336                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13337                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13338                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13339                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13340                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13341                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13342                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13343                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13344                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13345                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13346                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13347                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13348                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13349         }
13350 }
13351
13352 /*
13353  * Expect on entry:
13354  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13355  */
13356 static void init_rbufs(struct hfi1_devdata *dd)
13357 {
13358         u64 reg;
13359         int count;
13360
13361         /*
13362          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13363          * clear.
13364          */
13365         count = 0;
13366         while (1) {
13367                 reg = read_csr(dd, RCV_STATUS);
13368                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13369                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13370                         break;
13371                 /*
13372                  * Give up after 1ms - maximum wait time.
13373                  *
13374                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13375                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13376                  *      136 KB / (66% * 250MB/s) = 844us
13377                  */
13378                 if (count++ > 500) {
13379                         dd_dev_err(dd,
13380                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13381                                    __func__, reg);
13382                         break;
13383                 }
13384                 udelay(2); /* do not busy-wait the CSR */
13385         }
13386
13387         /* start the init - expect RcvCtrl to be 0 */
13388         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13389
13390         /*
13391          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13392          * period after the write before RcvStatus.RxRbufInitDone is valid.
13393          * The delay in the first run through the loop below is sufficient and
13394          * required before the first read of RcvStatus.RxRbufInintDone.
13395          */
13396         read_csr(dd, RCV_CTRL);
13397
13398         /* wait for the init to finish */
13399         count = 0;
13400         while (1) {
13401                 /* delay is required first time through - see above */
13402                 udelay(2); /* do not busy-wait the CSR */
13403                 reg = read_csr(dd, RCV_STATUS);
13404                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13405                         break;
13406
13407                 /* give up after 100us - slowest possible at 33MHz is 73us */
13408                 if (count++ > 50) {
13409                         dd_dev_err(dd,
13410                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13411                                    __func__);
13412                         break;
13413                 }
13414         }
13415 }
13416
13417 /* set RXE CSRs to chip reset defaults */
13418 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13419 {
13420         int i, j;
13421
13422         /*
13423          * RXE Kernel CSRs
13424          */
13425         write_csr(dd, RCV_CTRL, 0);
13426         init_rbufs(dd);
13427         /* RCV_STATUS read-only */
13428         /* RCV_CONTEXTS read-only */
13429         /* RCV_ARRAY_CNT read-only */
13430         /* RCV_BUF_SIZE read-only */
13431         write_csr(dd, RCV_BTH_QP, 0);
13432         write_csr(dd, RCV_MULTICAST, 0);
13433         write_csr(dd, RCV_BYPASS, 0);
13434         write_csr(dd, RCV_VL15, 0);
13435         /* this is a clear-down */
13436         write_csr(dd, RCV_ERR_INFO,
13437                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13438         /* RCV_ERR_STATUS read-only */
13439         write_csr(dd, RCV_ERR_MASK, 0);
13440         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13441         /* RCV_ERR_FORCE leave alone */
13442         for (i = 0; i < 32; i++)
13443                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13444         for (i = 0; i < 4; i++)
13445                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13446         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13447                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13448         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13449                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13450         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13451                 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13452                 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13453                 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13454         }
13455         for (i = 0; i < 32; i++)
13456                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13457
13458         /*
13459          * RXE Kernel and User Per-Context CSRs
13460          */
13461         for (i = 0; i < dd->chip_rcv_contexts; i++) {
13462                 /* kernel */
13463                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13464                 /* RCV_CTXT_STATUS read-only */
13465                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13466                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13467                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13468                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13469                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13470                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13471                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13472                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13473                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13474                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13475
13476                 /* user */
13477                 /* RCV_HDR_TAIL read-only */
13478                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13479                 /* RCV_EGR_INDEX_TAIL read-only */
13480                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13481                 /* RCV_EGR_OFFSET_TAIL read-only */
13482                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13483                         write_uctxt_csr(dd, i,
13484                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13485                 }
13486         }
13487 }
13488
13489 /*
13490  * Set sc2vl tables.
13491  *
13492  * They power on to zeros, so to avoid send context errors
13493  * they need to be set:
13494  *
13495  * SC 0-7 -> VL 0-7 (respectively)
13496  * SC 15  -> VL 15
13497  * otherwise
13498  *        -> VL 0
13499  */
13500 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13501 {
13502         int i;
13503         /* init per architecture spec, constrained by hardware capability */
13504
13505         /* HFI maps sent packets */
13506         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13507                 0,
13508                 0, 0, 1, 1,
13509                 2, 2, 3, 3,
13510                 4, 4, 5, 5,
13511                 6, 6, 7, 7));
13512         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13513                 1,
13514                 8, 0, 9, 0,
13515                 10, 0, 11, 0,
13516                 12, 0, 13, 0,
13517                 14, 0, 15, 15));
13518         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13519                 2,
13520                 16, 0, 17, 0,
13521                 18, 0, 19, 0,
13522                 20, 0, 21, 0,
13523                 22, 0, 23, 0));
13524         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13525                 3,
13526                 24, 0, 25, 0,
13527                 26, 0, 27, 0,
13528                 28, 0, 29, 0,
13529                 30, 0, 31, 0));
13530
13531         /* DC maps received packets */
13532         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13533                 15_0,
13534                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13535                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13536         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13537                 31_16,
13538                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13539                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13540
13541         /* initialize the cached sc2vl values consistently with h/w */
13542         for (i = 0; i < 32; i++) {
13543                 if (i < 8 || i == 15)
13544                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13545                 else
13546                         *((u8 *)(dd->sc2vl) + i) = 0;
13547         }
13548 }
13549
13550 /*
13551  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13552  * depend on the chip going through a power-on reset - a driver may be loaded
13553  * and unloaded many times.
13554  *
13555  * Do not write any CSR values to the chip in this routine - there may be
13556  * a reset following the (possible) FLR in this routine.
13557  *
13558  */
13559 static void init_chip(struct hfi1_devdata *dd)
13560 {
13561         int i;
13562
13563         /*
13564          * Put the HFI CSRs in a known state.
13565          * Combine this with a DC reset.
13566          *
13567          * Stop the device from doing anything while we do a
13568          * reset.  We know there are no other active users of
13569          * the device since we are now in charge.  Turn off
13570          * off all outbound and inbound traffic and make sure
13571          * the device does not generate any interrupts.
13572          */
13573
13574         /* disable send contexts and SDMA engines */
13575         write_csr(dd, SEND_CTRL, 0);
13576         for (i = 0; i < dd->chip_send_contexts; i++)
13577                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13578         for (i = 0; i < dd->chip_sdma_engines; i++)
13579                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13580         /* disable port (turn off RXE inbound traffic) and contexts */
13581         write_csr(dd, RCV_CTRL, 0);
13582         for (i = 0; i < dd->chip_rcv_contexts; i++)
13583                 write_csr(dd, RCV_CTXT_CTRL, 0);
13584         /* mask all interrupt sources */
13585         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13586                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13587
13588         /*
13589          * DC Reset: do a full DC reset before the register clear.
13590          * A recommended length of time to hold is one CSR read,
13591          * so reread the CceDcCtrl.  Then, hold the DC in reset
13592          * across the clear.
13593          */
13594         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13595         (void)read_csr(dd, CCE_DC_CTRL);
13596
13597         if (use_flr) {
13598                 /*
13599                  * A FLR will reset the SPC core and part of the PCIe.
13600                  * The parts that need to be restored have already been
13601                  * saved.
13602                  */
13603                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13604
13605                 /* do the FLR, the DC reset will remain */
13606                 hfi1_pcie_flr(dd);
13607
13608                 /* restore command and BARs */
13609                 restore_pci_variables(dd);
13610
13611                 if (is_ax(dd)) {
13612                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13613                         hfi1_pcie_flr(dd);
13614                         restore_pci_variables(dd);
13615                 }
13616         } else {
13617                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13618                 reset_cce_csrs(dd);
13619                 reset_txe_csrs(dd);
13620                 reset_rxe_csrs(dd);
13621                 reset_misc_csrs(dd);
13622         }
13623         /* clear the DC reset */
13624         write_csr(dd, CCE_DC_CTRL, 0);
13625
13626         /* Set the LED off */
13627         setextled(dd, 0);
13628
13629         /*
13630          * Clear the QSFP reset.
13631          * An FLR enforces a 0 on all out pins. The driver does not touch
13632          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13633          * anything plugged constantly in reset, if it pays attention
13634          * to RESET_N.
13635          * Prime examples of this are optical cables. Set all pins high.
13636          * I2CCLK and I2CDAT will change per direction, and INT_N and
13637          * MODPRS_N are input only and their value is ignored.
13638          */
13639         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13640         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13641         init_chip_resources(dd);
13642 }
13643
13644 static void init_early_variables(struct hfi1_devdata *dd)
13645 {
13646         int i;
13647
13648         /* assign link credit variables */
13649         dd->vau = CM_VAU;
13650         dd->link_credits = CM_GLOBAL_CREDITS;
13651         if (is_ax(dd))
13652                 dd->link_credits--;
13653         dd->vcu = cu_to_vcu(hfi1_cu);
13654         /* enough room for 8 MAD packets plus header - 17K */
13655         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13656         if (dd->vl15_init > dd->link_credits)
13657                 dd->vl15_init = dd->link_credits;
13658
13659         write_uninitialized_csrs_and_memories(dd);
13660
13661         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13662                 for (i = 0; i < dd->num_pports; i++) {
13663                         struct hfi1_pportdata *ppd = &dd->pport[i];
13664
13665                         set_partition_keys(ppd);
13666                 }
13667         init_sc2vl_tables(dd);
13668 }
13669
13670 static void init_kdeth_qp(struct hfi1_devdata *dd)
13671 {
13672         /* user changed the KDETH_QP */
13673         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13674                 /* out of range or illegal value */
13675                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13676                 kdeth_qp = 0;
13677         }
13678         if (kdeth_qp == 0)      /* not set, or failed range check */
13679                 kdeth_qp = DEFAULT_KDETH_QP;
13680
13681         write_csr(dd, SEND_BTH_QP,
13682                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13683                   SEND_BTH_QP_KDETH_QP_SHIFT);
13684
13685         write_csr(dd, RCV_BTH_QP,
13686                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13687                   RCV_BTH_QP_KDETH_QP_SHIFT);
13688 }
13689
13690 /**
13691  * init_qpmap_table
13692  * @dd - device data
13693  * @first_ctxt - first context
13694  * @last_ctxt - first context
13695  *
13696  * This return sets the qpn mapping table that
13697  * is indexed by qpn[8:1].
13698  *
13699  * The routine will round robin the 256 settings
13700  * from first_ctxt to last_ctxt.
13701  *
13702  * The first/last looks ahead to having specialized
13703  * receive contexts for mgmt and bypass.  Normal
13704  * verbs traffic will assumed to be on a range
13705  * of receive contexts.
13706  */
13707 static void init_qpmap_table(struct hfi1_devdata *dd,
13708                              u32 first_ctxt,
13709                              u32 last_ctxt)
13710 {
13711         u64 reg = 0;
13712         u64 regno = RCV_QP_MAP_TABLE;
13713         int i;
13714         u64 ctxt = first_ctxt;
13715
13716         for (i = 0; i < 256; i++) {
13717                 reg |= ctxt << (8 * (i % 8));
13718                 ctxt++;
13719                 if (ctxt > last_ctxt)
13720                         ctxt = first_ctxt;
13721                 if (i % 8 == 7) {
13722                         write_csr(dd, regno, reg);
13723                         reg = 0;
13724                         regno += 8;
13725                 }
13726         }
13727
13728         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13729                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13730 }
13731
13732 struct rsm_map_table {
13733         u64 map[NUM_MAP_REGS];
13734         unsigned int used;
13735 };
13736
13737 struct rsm_rule_data {
13738         u8 offset;
13739         u8 pkt_type;
13740         u32 field1_off;
13741         u32 field2_off;
13742         u32 index1_off;
13743         u32 index1_width;
13744         u32 index2_off;
13745         u32 index2_width;
13746         u32 mask1;
13747         u32 value1;
13748         u32 mask2;
13749         u32 value2;
13750 };
13751
13752 /*
13753  * Return an initialized RMT map table for users to fill in.  OK if it
13754  * returns NULL, indicating no table.
13755  */
13756 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13757 {
13758         struct rsm_map_table *rmt;
13759         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13760
13761         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13762         if (rmt) {
13763                 memset(rmt->map, rxcontext, sizeof(rmt->map));
13764                 rmt->used = 0;
13765         }
13766
13767         return rmt;
13768 }
13769
13770 /*
13771  * Write the final RMT map table to the chip and free the table.  OK if
13772  * table is NULL.
13773  */
13774 static void complete_rsm_map_table(struct hfi1_devdata *dd,
13775                                    struct rsm_map_table *rmt)
13776 {
13777         int i;
13778
13779         if (rmt) {
13780                 /* write table to chip */
13781                 for (i = 0; i < NUM_MAP_REGS; i++)
13782                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13783
13784                 /* enable RSM */
13785                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13786         }
13787 }
13788
13789 /*
13790  * Add a receive side mapping rule.
13791  */
13792 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13793                          struct rsm_rule_data *rrd)
13794 {
13795         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13796                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13797                   1ull << rule_index | /* enable bit */
13798                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13799         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13800                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13801                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13802                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13803                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13804                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13805                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13806         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13807                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13808                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13809                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13810                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13811 }
13812
13813 /* return the number of RSM map table entries that will be used for QOS */
13814 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13815                            unsigned int *np)
13816 {
13817         int i;
13818         unsigned int m, n;
13819         u8 max_by_vl = 0;
13820
13821         /* is QOS active at all? */
13822         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13823             num_vls == 1 ||
13824             krcvqsset <= 1)
13825                 goto no_qos;
13826
13827         /* determine bits for qpn */
13828         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13829                 if (krcvqs[i] > max_by_vl)
13830                         max_by_vl = krcvqs[i];
13831         if (max_by_vl > 32)
13832                 goto no_qos;
13833         m = ilog2(__roundup_pow_of_two(max_by_vl));
13834
13835         /* determine bits for vl */
13836         n = ilog2(__roundup_pow_of_two(num_vls));
13837
13838         /* reject if too much is used */
13839         if ((m + n) > 7)
13840                 goto no_qos;
13841
13842         if (mp)
13843                 *mp = m;
13844         if (np)
13845                 *np = n;
13846
13847         return 1 << (m + n);
13848
13849 no_qos:
13850         if (mp)
13851                 *mp = 0;
13852         if (np)
13853                 *np = 0;
13854         return 0;
13855 }
13856
13857 /**
13858  * init_qos - init RX qos
13859  * @dd - device data
13860  * @rmt - RSM map table
13861  *
13862  * This routine initializes Rule 0 and the RSM map table to implement
13863  * quality of service (qos).
13864  *
13865  * If all of the limit tests succeed, qos is applied based on the array
13866  * interpretation of krcvqs where entry 0 is VL0.
13867  *
13868  * The number of vl bits (n) and the number of qpn bits (m) are computed to
13869  * feed both the RSM map table and the single rule.
13870  */
13871 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13872 {
13873         struct rsm_rule_data rrd;
13874         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13875         unsigned int rmt_entries;
13876         u64 reg;
13877
13878         if (!rmt)
13879                 goto bail;
13880         rmt_entries = qos_rmt_entries(dd, &m, &n);
13881         if (rmt_entries == 0)
13882                 goto bail;
13883         qpns_per_vl = 1 << m;
13884
13885         /* enough room in the map table? */
13886         rmt_entries = 1 << (m + n);
13887         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13888                 goto bail;
13889
13890         /* add qos entries to the the RSM map table */
13891         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13892                 unsigned tctxt;
13893
13894                 for (qpn = 0, tctxt = ctxt;
13895                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13896                         unsigned idx, regoff, regidx;
13897
13898                         /* generate the index the hardware will produce */
13899                         idx = rmt->used + ((qpn << n) ^ i);
13900                         regoff = (idx % 8) * 8;
13901                         regidx = idx / 8;
13902                         /* replace default with context number */
13903                         reg = rmt->map[regidx];
13904                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13905                                 << regoff);
13906                         reg |= (u64)(tctxt++) << regoff;
13907                         rmt->map[regidx] = reg;
13908                         if (tctxt == ctxt + krcvqs[i])
13909                                 tctxt = ctxt;
13910                 }
13911                 ctxt += krcvqs[i];
13912         }
13913
13914         rrd.offset = rmt->used;
13915         rrd.pkt_type = 2;
13916         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13917         rrd.field2_off = LRH_SC_MATCH_OFFSET;
13918         rrd.index1_off = LRH_SC_SELECT_OFFSET;
13919         rrd.index1_width = n;
13920         rrd.index2_off = QPN_SELECT_OFFSET;
13921         rrd.index2_width = m + n;
13922         rrd.mask1 = LRH_BTH_MASK;
13923         rrd.value1 = LRH_BTH_VALUE;
13924         rrd.mask2 = LRH_SC_MASK;
13925         rrd.value2 = LRH_SC_VALUE;
13926
13927         /* add rule 0 */
13928         add_rsm_rule(dd, 0, &rrd);
13929
13930         /* mark RSM map entries as used */
13931         rmt->used += rmt_entries;
13932         /* map everything else to the mcast/err/vl15 context */
13933         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13934         dd->qos_shift = n + 1;
13935         return;
13936 bail:
13937         dd->qos_shift = 1;
13938         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13939 }
13940
13941 static void init_user_fecn_handling(struct hfi1_devdata *dd,
13942                                     struct rsm_map_table *rmt)
13943 {
13944         struct rsm_rule_data rrd;
13945         u64 reg;
13946         int i, idx, regoff, regidx;
13947         u8 offset;
13948
13949         /* there needs to be enough room in the map table */
13950         if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13951                 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13952                 return;
13953         }
13954
13955         /*
13956          * RSM will extract the destination context as an index into the
13957          * map table.  The destination contexts are a sequential block
13958          * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13959          * Map entries are accessed as offset + extracted value.  Adjust
13960          * the added offset so this sequence can be placed anywhere in
13961          * the table - as long as the entries themselves do not wrap.
13962          * There are only enough bits in offset for the table size, so
13963          * start with that to allow for a "negative" offset.
13964          */
13965         offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13966                                                 (int)dd->first_user_ctxt);
13967
13968         for (i = dd->first_user_ctxt, idx = rmt->used;
13969                                 i < dd->num_rcv_contexts; i++, idx++) {
13970                 /* replace with identity mapping */
13971                 regoff = (idx % 8) * 8;
13972                 regidx = idx / 8;
13973                 reg = rmt->map[regidx];
13974                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13975                 reg |= (u64)i << regoff;
13976                 rmt->map[regidx] = reg;
13977         }
13978
13979         /*
13980          * For RSM intercept of Expected FECN packets:
13981          * o packet type 0 - expected
13982          * o match on F (bit 95), using select/match 1, and
13983          * o match on SH (bit 133), using select/match 2.
13984          *
13985          * Use index 1 to extract the 8-bit receive context from DestQP
13986          * (start at bit 64).  Use that as the RSM map table index.
13987          */
13988         rrd.offset = offset;
13989         rrd.pkt_type = 0;
13990         rrd.field1_off = 95;
13991         rrd.field2_off = 133;
13992         rrd.index1_off = 64;
13993         rrd.index1_width = 8;
13994         rrd.index2_off = 0;
13995         rrd.index2_width = 0;
13996         rrd.mask1 = 1;
13997         rrd.value1 = 1;
13998         rrd.mask2 = 1;
13999         rrd.value2 = 1;
14000
14001         /* add rule 1 */
14002         add_rsm_rule(dd, 1, &rrd);
14003
14004         rmt->used += dd->num_user_contexts;
14005 }
14006
14007 static void init_rxe(struct hfi1_devdata *dd)
14008 {
14009         struct rsm_map_table *rmt;
14010
14011         /* enable all receive errors */
14012         write_csr(dd, RCV_ERR_MASK, ~0ull);
14013
14014         rmt = alloc_rsm_map_table(dd);
14015         /* set up QOS, including the QPN map table */
14016         init_qos(dd, rmt);
14017         init_user_fecn_handling(dd, rmt);
14018         complete_rsm_map_table(dd, rmt);
14019         kfree(rmt);
14020
14021         /*
14022          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14023          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14024          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14025          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14026          * Max_PayLoad_Size set to its minimum of 128.
14027          *
14028          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14029          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14030          * tune_pcie_caps() which is called after this routine.
14031          */
14032 }
14033
14034 static void init_other(struct hfi1_devdata *dd)
14035 {
14036         /* enable all CCE errors */
14037         write_csr(dd, CCE_ERR_MASK, ~0ull);
14038         /* enable *some* Misc errors */
14039         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14040         /* enable all DC errors, except LCB */
14041         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14042         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14043 }
14044
14045 /*
14046  * Fill out the given AU table using the given CU.  A CU is defined in terms
14047  * AUs.  The table is a an encoding: given the index, how many AUs does that
14048  * represent?
14049  *
14050  * NOTE: Assumes that the register layout is the same for the
14051  * local and remote tables.
14052  */
14053 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14054                                u32 csr0to3, u32 csr4to7)
14055 {
14056         write_csr(dd, csr0to3,
14057                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14058                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14059                   2ull * cu <<
14060                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14061                   4ull * cu <<
14062                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14063         write_csr(dd, csr4to7,
14064                   8ull * cu <<
14065                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14066                   16ull * cu <<
14067                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14068                   32ull * cu <<
14069                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14070                   64ull * cu <<
14071                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14072 }
14073
14074 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14075 {
14076         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14077                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14078 }
14079
14080 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14081 {
14082         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14083                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14084 }
14085
14086 static void init_txe(struct hfi1_devdata *dd)
14087 {
14088         int i;
14089
14090         /* enable all PIO, SDMA, general, and Egress errors */
14091         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14092         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14093         write_csr(dd, SEND_ERR_MASK, ~0ull);
14094         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14095
14096         /* enable all per-context and per-SDMA engine errors */
14097         for (i = 0; i < dd->chip_send_contexts; i++)
14098                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14099         for (i = 0; i < dd->chip_sdma_engines; i++)
14100                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14101
14102         /* set the local CU to AU mapping */
14103         assign_local_cm_au_table(dd, dd->vcu);
14104
14105         /*
14106          * Set reasonable default for Credit Return Timer
14107          * Don't set on Simulator - causes it to choke.
14108          */
14109         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14110                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14111 }
14112
14113 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14114 {
14115         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14116         unsigned sctxt;
14117         int ret = 0;
14118         u64 reg;
14119
14120         if (!rcd || !rcd->sc) {
14121                 ret = -EINVAL;
14122                 goto done;
14123         }
14124         sctxt = rcd->sc->hw_context;
14125         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14126                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14127                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14128         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14129         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14130                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14131         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14132         /*
14133          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14134          */
14135         if (!is_ax(dd)) {
14136                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14137                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14138                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14139         }
14140
14141         /* Enable J_KEY check on receive context. */
14142         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14143                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14144                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14145         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14146 done:
14147         return ret;
14148 }
14149
14150 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14151 {
14152         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14153         unsigned sctxt;
14154         int ret = 0;
14155         u64 reg;
14156
14157         if (!rcd || !rcd->sc) {
14158                 ret = -EINVAL;
14159                 goto done;
14160         }
14161         sctxt = rcd->sc->hw_context;
14162         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14163         /*
14164          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14165          * This check would not have been enabled for A0 h/w, see
14166          * set_ctxt_jkey().
14167          */
14168         if (!is_ax(dd)) {
14169                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14170                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14171                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14172         }
14173         /* Turn off the J_KEY on the receive side */
14174         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14175 done:
14176         return ret;
14177 }
14178
14179 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14180 {
14181         struct hfi1_ctxtdata *rcd;
14182         unsigned sctxt;
14183         int ret = 0;
14184         u64 reg;
14185
14186         if (ctxt < dd->num_rcv_contexts) {
14187                 rcd = dd->rcd[ctxt];
14188         } else {
14189                 ret = -EINVAL;
14190                 goto done;
14191         }
14192         if (!rcd || !rcd->sc) {
14193                 ret = -EINVAL;
14194                 goto done;
14195         }
14196         sctxt = rcd->sc->hw_context;
14197         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14198                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14199         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14200         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14201         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14202         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14203         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14204 done:
14205         return ret;
14206 }
14207
14208 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14209 {
14210         struct hfi1_ctxtdata *rcd;
14211         unsigned sctxt;
14212         int ret = 0;
14213         u64 reg;
14214
14215         if (ctxt < dd->num_rcv_contexts) {
14216                 rcd = dd->rcd[ctxt];
14217         } else {
14218                 ret = -EINVAL;
14219                 goto done;
14220         }
14221         if (!rcd || !rcd->sc) {
14222                 ret = -EINVAL;
14223                 goto done;
14224         }
14225         sctxt = rcd->sc->hw_context;
14226         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14227         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14228         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14229         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14230 done:
14231         return ret;
14232 }
14233
14234 /*
14235  * Start doing the clean up the the chip. Our clean up happens in multiple
14236  * stages and this is just the first.
14237  */
14238 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14239 {
14240         aspm_exit(dd);
14241         free_cntrs(dd);
14242         free_rcverr(dd);
14243         clean_up_interrupts(dd);
14244         finish_chip_resources(dd);
14245 }
14246
14247 #define HFI_BASE_GUID(dev) \
14248         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14249
14250 /*
14251  * Information can be shared between the two HFIs on the same ASIC
14252  * in the same OS.  This function finds the peer device and sets
14253  * up a shared structure.
14254  */
14255 static int init_asic_data(struct hfi1_devdata *dd)
14256 {
14257         unsigned long flags;
14258         struct hfi1_devdata *tmp, *peer = NULL;
14259         struct hfi1_asic_data *asic_data;
14260         int ret = 0;
14261
14262         /* pre-allocate the asic structure in case we are the first device */
14263         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14264         if (!asic_data)
14265                 return -ENOMEM;
14266
14267         spin_lock_irqsave(&hfi1_devs_lock, flags);
14268         /* Find our peer device */
14269         list_for_each_entry(tmp, &hfi1_dev_list, list) {
14270                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14271                     dd->unit != tmp->unit) {
14272                         peer = tmp;
14273                         break;
14274                 }
14275         }
14276
14277         if (peer) {
14278                 /* use already allocated structure */
14279                 dd->asic_data = peer->asic_data;
14280                 kfree(asic_data);
14281         } else {
14282                 dd->asic_data = asic_data;
14283                 mutex_init(&dd->asic_data->asic_resource_mutex);
14284         }
14285         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14286         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14287
14288         /* first one through - set up i2c devices */
14289         if (!peer)
14290                 ret = set_up_i2c(dd, dd->asic_data);
14291
14292         return ret;
14293 }
14294
14295 /*
14296  * Set dd->boardname.  Use a generic name if a name is not returned from
14297  * EFI variable space.
14298  *
14299  * Return 0 on success, -ENOMEM if space could not be allocated.
14300  */
14301 static int obtain_boardname(struct hfi1_devdata *dd)
14302 {
14303         /* generic board description */
14304         const char generic[] =
14305                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14306         unsigned long size;
14307         int ret;
14308
14309         ret = read_hfi1_efi_var(dd, "description", &size,
14310                                 (void **)&dd->boardname);
14311         if (ret) {
14312                 dd_dev_info(dd, "Board description not found\n");
14313                 /* use generic description */
14314                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14315                 if (!dd->boardname)
14316                         return -ENOMEM;
14317         }
14318         return 0;
14319 }
14320
14321 /*
14322  * Check the interrupt registers to make sure that they are mapped correctly.
14323  * It is intended to help user identify any mismapping by VMM when the driver
14324  * is running in a VM. This function should only be called before interrupt
14325  * is set up properly.
14326  *
14327  * Return 0 on success, -EINVAL on failure.
14328  */
14329 static int check_int_registers(struct hfi1_devdata *dd)
14330 {
14331         u64 reg;
14332         u64 all_bits = ~(u64)0;
14333         u64 mask;
14334
14335         /* Clear CceIntMask[0] to avoid raising any interrupts */
14336         mask = read_csr(dd, CCE_INT_MASK);
14337         write_csr(dd, CCE_INT_MASK, 0ull);
14338         reg = read_csr(dd, CCE_INT_MASK);
14339         if (reg)
14340                 goto err_exit;
14341
14342         /* Clear all interrupt status bits */
14343         write_csr(dd, CCE_INT_CLEAR, all_bits);
14344         reg = read_csr(dd, CCE_INT_STATUS);
14345         if (reg)
14346                 goto err_exit;
14347
14348         /* Set all interrupt status bits */
14349         write_csr(dd, CCE_INT_FORCE, all_bits);
14350         reg = read_csr(dd, CCE_INT_STATUS);
14351         if (reg != all_bits)
14352                 goto err_exit;
14353
14354         /* Restore the interrupt mask */
14355         write_csr(dd, CCE_INT_CLEAR, all_bits);
14356         write_csr(dd, CCE_INT_MASK, mask);
14357
14358         return 0;
14359 err_exit:
14360         write_csr(dd, CCE_INT_MASK, mask);
14361         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14362         return -EINVAL;
14363 }
14364
14365 /**
14366  * Allocate and initialize the device structure for the hfi.
14367  * @dev: the pci_dev for hfi1_ib device
14368  * @ent: pci_device_id struct for this dev
14369  *
14370  * Also allocates, initializes, and returns the devdata struct for this
14371  * device instance
14372  *
14373  * This is global, and is called directly at init to set up the
14374  * chip-specific function pointers for later use.
14375  */
14376 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14377                                   const struct pci_device_id *ent)
14378 {
14379         struct hfi1_devdata *dd;
14380         struct hfi1_pportdata *ppd;
14381         u64 reg;
14382         int i, ret;
14383         static const char * const inames[] = { /* implementation names */
14384                 "RTL silicon",
14385                 "RTL VCS simulation",
14386                 "RTL FPGA emulation",
14387                 "Functional simulator"
14388         };
14389         struct pci_dev *parent = pdev->bus->self;
14390
14391         dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14392                                 sizeof(struct hfi1_pportdata));
14393         if (IS_ERR(dd))
14394                 goto bail;
14395         ppd = dd->pport;
14396         for (i = 0; i < dd->num_pports; i++, ppd++) {
14397                 int vl;
14398                 /* init common fields */
14399                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14400                 /* DC supports 4 link widths */
14401                 ppd->link_width_supported =
14402                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14403                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14404                 ppd->link_width_downgrade_supported =
14405                         ppd->link_width_supported;
14406                 /* start out enabling only 4X */
14407                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14408                 ppd->link_width_downgrade_enabled =
14409                                         ppd->link_width_downgrade_supported;
14410                 /* link width active is 0 when link is down */
14411                 /* link width downgrade active is 0 when link is down */
14412
14413                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14414                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14415                         hfi1_early_err(&pdev->dev,
14416                                        "Invalid num_vls %u, using %u VLs\n",
14417                                     num_vls, HFI1_MAX_VLS_SUPPORTED);
14418                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14419                 }
14420                 ppd->vls_supported = num_vls;
14421                 ppd->vls_operational = ppd->vls_supported;
14422                 ppd->actual_vls_operational = ppd->vls_supported;
14423                 /* Set the default MTU. */
14424                 for (vl = 0; vl < num_vls; vl++)
14425                         dd->vld[vl].mtu = hfi1_max_mtu;
14426                 dd->vld[15].mtu = MAX_MAD_PACKET;
14427                 /*
14428                  * Set the initial values to reasonable default, will be set
14429                  * for real when link is up.
14430                  */
14431                 ppd->lstate = IB_PORT_DOWN;
14432                 ppd->overrun_threshold = 0x4;
14433                 ppd->phy_error_threshold = 0xf;
14434                 ppd->port_crc_mode_enabled = link_crc_mask;
14435                 /* initialize supported LTP CRC mode */
14436                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14437                 /* initialize enabled LTP CRC mode */
14438                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14439                 /* start in offline */
14440                 ppd->host_link_state = HLS_DN_OFFLINE;
14441                 init_vl_arb_caches(ppd);
14442                 ppd->last_pstate = 0xff; /* invalid value */
14443         }
14444
14445         dd->link_default = HLS_DN_POLL;
14446
14447         /*
14448          * Do remaining PCIe setup and save PCIe values in dd.
14449          * Any error printing is already done by the init code.
14450          * On return, we have the chip mapped.
14451          */
14452         ret = hfi1_pcie_ddinit(dd, pdev);
14453         if (ret < 0)
14454                 goto bail_free;
14455
14456         /* verify that reads actually work, save revision for reset check */
14457         dd->revision = read_csr(dd, CCE_REVISION);
14458         if (dd->revision == ~(u64)0) {
14459                 dd_dev_err(dd, "cannot read chip CSRs\n");
14460                 ret = -EINVAL;
14461                 goto bail_cleanup;
14462         }
14463         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14464                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14465         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14466                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14467
14468         /*
14469          * Check interrupt registers mapping if the driver has no access to
14470          * the upstream component. In this case, it is likely that the driver
14471          * is running in a VM.
14472          */
14473         if (!parent) {
14474                 ret = check_int_registers(dd);
14475                 if (ret)
14476                         goto bail_cleanup;
14477         }
14478
14479         /*
14480          * obtain the hardware ID - NOT related to unit, which is a
14481          * software enumeration
14482          */
14483         reg = read_csr(dd, CCE_REVISION2);
14484         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14485                                         & CCE_REVISION2_HFI_ID_MASK;
14486         /* the variable size will remove unwanted bits */
14487         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14488         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14489         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14490                     dd->icode < ARRAY_SIZE(inames) ?
14491                     inames[dd->icode] : "unknown", (int)dd->irev);
14492
14493         /* speeds the hardware can support */
14494         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14495         /* speeds allowed to run at */
14496         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14497         /* give a reasonable active value, will be set on link up */
14498         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14499
14500         dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14501         dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14502         dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14503         dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14504         dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14505         /* fix up link widths for emulation _p */
14506         ppd = dd->pport;
14507         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14508                 ppd->link_width_supported =
14509                         ppd->link_width_enabled =
14510                         ppd->link_width_downgrade_supported =
14511                         ppd->link_width_downgrade_enabled =
14512                                 OPA_LINK_WIDTH_1X;
14513         }
14514         /* insure num_vls isn't larger than number of sdma engines */
14515         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14516                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14517                            num_vls, dd->chip_sdma_engines);
14518                 num_vls = dd->chip_sdma_engines;
14519                 ppd->vls_supported = dd->chip_sdma_engines;
14520                 ppd->vls_operational = ppd->vls_supported;
14521         }
14522
14523         /*
14524          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14525          * Limit the max if larger than the field holds.  If timeout is
14526          * non-zero, then the calculated field will be at least 1.
14527          *
14528          * Must be after icode is set up - the cclock rate depends
14529          * on knowing the hardware being used.
14530          */
14531         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14532         if (dd->rcv_intr_timeout_csr >
14533                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14534                 dd->rcv_intr_timeout_csr =
14535                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14536         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14537                 dd->rcv_intr_timeout_csr = 1;
14538
14539         /* needs to be done before we look for the peer device */
14540         read_guid(dd);
14541
14542         /* set up shared ASIC data with peer device */
14543         ret = init_asic_data(dd);
14544         if (ret)
14545                 goto bail_cleanup;
14546
14547         /* obtain chip sizes, reset chip CSRs */
14548         init_chip(dd);
14549
14550         /* read in the PCIe link speed information */
14551         ret = pcie_speeds(dd);
14552         if (ret)
14553                 goto bail_cleanup;
14554
14555         /* call before get_platform_config(), after init_chip_resources() */
14556         ret = eprom_init(dd);
14557         if (ret)
14558                 goto bail_free_rcverr;
14559
14560         /* Needs to be called before hfi1_firmware_init */
14561         get_platform_config(dd);
14562
14563         /* read in firmware */
14564         ret = hfi1_firmware_init(dd);
14565         if (ret)
14566                 goto bail_cleanup;
14567
14568         /*
14569          * In general, the PCIe Gen3 transition must occur after the
14570          * chip has been idled (so it won't initiate any PCIe transactions
14571          * e.g. an interrupt) and before the driver changes any registers
14572          * (the transition will reset the registers).
14573          *
14574          * In particular, place this call after:
14575          * - init_chip()     - the chip will not initiate any PCIe transactions
14576          * - pcie_speeds()   - reads the current link speed
14577          * - hfi1_firmware_init() - the needed firmware is ready to be
14578          *                          downloaded
14579          */
14580         ret = do_pcie_gen3_transition(dd);
14581         if (ret)
14582                 goto bail_cleanup;
14583
14584         /* start setting dd values and adjusting CSRs */
14585         init_early_variables(dd);
14586
14587         parse_platform_config(dd);
14588
14589         ret = obtain_boardname(dd);
14590         if (ret)
14591                 goto bail_cleanup;
14592
14593         snprintf(dd->boardversion, BOARD_VERS_MAX,
14594                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14595                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14596                  (u32)dd->majrev,
14597                  (u32)dd->minrev,
14598                  (dd->revision >> CCE_REVISION_SW_SHIFT)
14599                     & CCE_REVISION_SW_MASK);
14600
14601         ret = set_up_context_variables(dd);
14602         if (ret)
14603                 goto bail_cleanup;
14604
14605         /* set initial RXE CSRs */
14606         init_rxe(dd);
14607         /* set initial TXE CSRs */
14608         init_txe(dd);
14609         /* set initial non-RXE, non-TXE CSRs */
14610         init_other(dd);
14611         /* set up KDETH QP prefix in both RX and TX CSRs */
14612         init_kdeth_qp(dd);
14613
14614         ret = hfi1_dev_affinity_init(dd);
14615         if (ret)
14616                 goto bail_cleanup;
14617
14618         /* send contexts must be set up before receive contexts */
14619         ret = init_send_contexts(dd);
14620         if (ret)
14621                 goto bail_cleanup;
14622
14623         ret = hfi1_create_ctxts(dd);
14624         if (ret)
14625                 goto bail_cleanup;
14626
14627         dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14628         /*
14629          * rcd[0] is guaranteed to be valid by this point. Also, all
14630          * context are using the same value, as per the module parameter.
14631          */
14632         dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14633
14634         ret = init_pervl_scs(dd);
14635         if (ret)
14636                 goto bail_cleanup;
14637
14638         /* sdma init */
14639         for (i = 0; i < dd->num_pports; ++i) {
14640                 ret = sdma_init(dd, i);
14641                 if (ret)
14642                         goto bail_cleanup;
14643         }
14644
14645         /* use contexts created by hfi1_create_ctxts */
14646         ret = set_up_interrupts(dd);
14647         if (ret)
14648                 goto bail_cleanup;
14649
14650         /* set up LCB access - must be after set_up_interrupts() */
14651         init_lcb_access(dd);
14652
14653         /*
14654          * Serial number is created from the base guid:
14655          * [27:24] = base guid [38:35]
14656          * [23: 0] = base guid [23: 0]
14657          */
14658         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14659                  (dd->base_guid & 0xFFFFFF) |
14660                      ((dd->base_guid >> 11) & 0xF000000));
14661
14662         dd->oui1 = dd->base_guid >> 56 & 0xFF;
14663         dd->oui2 = dd->base_guid >> 48 & 0xFF;
14664         dd->oui3 = dd->base_guid >> 40 & 0xFF;
14665
14666         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14667         if (ret)
14668                 goto bail_clear_intr;
14669
14670         thermal_init(dd);
14671
14672         ret = init_cntrs(dd);
14673         if (ret)
14674                 goto bail_clear_intr;
14675
14676         ret = init_rcverr(dd);
14677         if (ret)
14678                 goto bail_free_cntrs;
14679
14680         init_completion(&dd->user_comp);
14681
14682         /* The user refcount starts with one to inidicate an active device */
14683         atomic_set(&dd->user_refcount, 1);
14684
14685         goto bail;
14686
14687 bail_free_rcverr:
14688         free_rcverr(dd);
14689 bail_free_cntrs:
14690         free_cntrs(dd);
14691 bail_clear_intr:
14692         clean_up_interrupts(dd);
14693 bail_cleanup:
14694         hfi1_pcie_ddcleanup(dd);
14695 bail_free:
14696         hfi1_free_devdata(dd);
14697         dd = ERR_PTR(ret);
14698 bail:
14699         return dd;
14700 }
14701
14702 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14703                         u32 dw_len)
14704 {
14705         u32 delta_cycles;
14706         u32 current_egress_rate = ppd->current_egress_rate;
14707         /* rates here are in units of 10^6 bits/sec */
14708
14709         if (desired_egress_rate == -1)
14710                 return 0; /* shouldn't happen */
14711
14712         if (desired_egress_rate >= current_egress_rate)
14713                 return 0; /* we can't help go faster, only slower */
14714
14715         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14716                         egress_cycles(dw_len * 4, current_egress_rate);
14717
14718         return (u16)delta_cycles;
14719 }
14720
14721 /**
14722  * create_pbc - build a pbc for transmission
14723  * @flags: special case flags or-ed in built pbc
14724  * @srate: static rate
14725  * @vl: vl
14726  * @dwlen: dword length (header words + data words + pbc words)
14727  *
14728  * Create a PBC with the given flags, rate, VL, and length.
14729  *
14730  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14731  * for verbs, which does not use this PSM feature.  The lone other caller
14732  * is for the diagnostic interface which calls this if the user does not
14733  * supply their own PBC.
14734  */
14735 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14736                u32 dw_len)
14737 {
14738         u64 pbc, delay = 0;
14739
14740         if (unlikely(srate_mbs))
14741                 delay = delay_cycles(ppd, srate_mbs, dw_len);
14742
14743         pbc = flags
14744                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14745                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14746                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14747                 | (dw_len & PBC_LENGTH_DWS_MASK)
14748                         << PBC_LENGTH_DWS_SHIFT;
14749
14750         return pbc;
14751 }
14752
14753 #define SBUS_THERMAL    0x4f
14754 #define SBUS_THERM_MONITOR_MODE 0x1
14755
14756 #define THERM_FAILURE(dev, ret, reason) \
14757         dd_dev_err((dd),                                                \
14758                    "Thermal sensor initialization failed: %s (%d)\n",   \
14759                    (reason), (ret))
14760
14761 /*
14762  * Initialize the thermal sensor.
14763  *
14764  * After initialization, enable polling of thermal sensor through
14765  * SBus interface. In order for this to work, the SBus Master
14766  * firmware has to be loaded due to the fact that the HW polling
14767  * logic uses SBus interrupts, which are not supported with
14768  * default firmware. Otherwise, no data will be returned through
14769  * the ASIC_STS_THERM CSR.
14770  */
14771 static int thermal_init(struct hfi1_devdata *dd)
14772 {
14773         int ret = 0;
14774
14775         if (dd->icode != ICODE_RTL_SILICON ||
14776             check_chip_resource(dd, CR_THERM_INIT, NULL))
14777                 return ret;
14778
14779         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14780         if (ret) {
14781                 THERM_FAILURE(dd, ret, "Acquire SBus");
14782                 return ret;
14783         }
14784
14785         dd_dev_info(dd, "Initializing thermal sensor\n");
14786         /* Disable polling of thermal readings */
14787         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14788         msleep(100);
14789         /* Thermal Sensor Initialization */
14790         /*    Step 1: Reset the Thermal SBus Receiver */
14791         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14792                                 RESET_SBUS_RECEIVER, 0);
14793         if (ret) {
14794                 THERM_FAILURE(dd, ret, "Bus Reset");
14795                 goto done;
14796         }
14797         /*    Step 2: Set Reset bit in Thermal block */
14798         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14799                                 WRITE_SBUS_RECEIVER, 0x1);
14800         if (ret) {
14801                 THERM_FAILURE(dd, ret, "Therm Block Reset");
14802                 goto done;
14803         }
14804         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14805         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14806                                 WRITE_SBUS_RECEIVER, 0x32);
14807         if (ret) {
14808                 THERM_FAILURE(dd, ret, "Write Clock Div");
14809                 goto done;
14810         }
14811         /*    Step 4: Select temperature mode */
14812         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14813                                 WRITE_SBUS_RECEIVER,
14814                                 SBUS_THERM_MONITOR_MODE);
14815         if (ret) {
14816                 THERM_FAILURE(dd, ret, "Write Mode Sel");
14817                 goto done;
14818         }
14819         /*    Step 5: De-assert block reset and start conversion */
14820         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14821                                 WRITE_SBUS_RECEIVER, 0x2);
14822         if (ret) {
14823                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14824                 goto done;
14825         }
14826         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14827         msleep(22);
14828
14829         /* Enable polling of thermal readings */
14830         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14831
14832         /* Set initialized flag */
14833         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14834         if (ret)
14835                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14836
14837 done:
14838         release_chip_resource(dd, CR_SBUS);
14839         return ret;
14840 }
14841
14842 static void handle_temp_err(struct hfi1_devdata *dd)
14843 {
14844         struct hfi1_pportdata *ppd = &dd->pport[0];
14845         /*
14846          * Thermal Critical Interrupt
14847          * Put the device into forced freeze mode, take link down to
14848          * offline, and put DC into reset.
14849          */
14850         dd_dev_emerg(dd,
14851                      "Critical temperature reached! Forcing device into freeze mode!\n");
14852         dd->flags |= HFI1_FORCED_FREEZE;
14853         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14854         /*
14855          * Shut DC down as much and as quickly as possible.
14856          *
14857          * Step 1: Take the link down to OFFLINE. This will cause the
14858          *         8051 to put the Serdes in reset. However, we don't want to
14859          *         go through the entire link state machine since we want to
14860          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14861          *         but rather an attempt to save the chip.
14862          *         Code below is almost the same as quiet_serdes() but avoids
14863          *         all the extra work and the sleeps.
14864          */
14865         ppd->driver_link_ready = 0;
14866         ppd->link_enabled = 0;
14867         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14868                                 PLS_OFFLINE);
14869         /*
14870          * Step 2: Shutdown LCB and 8051
14871          *         After shutdown, do not restore DC_CFG_RESET value.
14872          */
14873         dc_shutdown(dd);
14874 }