2 * Copyright (c) 2011 - 2017 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * This file contains all of the code that is specific to the
37 * QLogic_IB 7220 chip (except that specific to the SerDes)
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
50 static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
51 static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
52 static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
53 static u32 qib_7220_iblink_state(u64);
54 static u8 qib_7220_phys_portstate(u64);
55 static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
56 static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
59 * This file contains almost all the chip-specific register information and
60 * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
61 * exception of SerDes support, which in in qib_sd7220.c.
64 /* Below uses machine-generated qib_chipnum_regs.h file */
65 #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
67 /* Use defines to tie machine-generated names to lower-case names */
68 #define kr_control KREG_IDX(Control)
69 #define kr_counterregbase KREG_IDX(CntrRegBase)
70 #define kr_errclear KREG_IDX(ErrClear)
71 #define kr_errmask KREG_IDX(ErrMask)
72 #define kr_errstatus KREG_IDX(ErrStatus)
73 #define kr_extctrl KREG_IDX(EXTCtrl)
74 #define kr_extstatus KREG_IDX(EXTStatus)
75 #define kr_gpio_clear KREG_IDX(GPIOClear)
76 #define kr_gpio_mask KREG_IDX(GPIOMask)
77 #define kr_gpio_out KREG_IDX(GPIOOut)
78 #define kr_gpio_status KREG_IDX(GPIOStatus)
79 #define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
80 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
81 #define kr_hwerrclear KREG_IDX(HwErrClear)
82 #define kr_hwerrmask KREG_IDX(HwErrMask)
83 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
84 #define kr_ibcctrl KREG_IDX(IBCCtrl)
85 #define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
86 #define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
87 #define kr_ibcstatus KREG_IDX(IBCStatus)
88 #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
89 #define kr_intclear KREG_IDX(IntClear)
90 #define kr_intmask KREG_IDX(IntMask)
91 #define kr_intstatus KREG_IDX(IntStatus)
92 #define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
93 #define kr_palign KREG_IDX(PageAlign)
94 #define kr_partitionkey KREG_IDX(RcvPartitionKey)
95 #define kr_portcnt KREG_IDX(PortCnt)
96 #define kr_rcvbthqp KREG_IDX(RcvBTHQP)
97 #define kr_rcvctrl KREG_IDX(RcvCtrl)
98 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
99 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
100 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
101 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
102 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
103 #define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
104 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
105 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
106 #define kr_revision KREG_IDX(Revision)
107 #define kr_scratch KREG_IDX(Scratch)
108 #define kr_sendbuffererror KREG_IDX(SendBufErr0)
109 #define kr_sendctrl KREG_IDX(SendCtrl)
110 #define kr_senddmabase KREG_IDX(SendDmaBase)
111 #define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
112 #define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
113 #define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
114 #define kr_senddmahead KREG_IDX(SendDmaHead)
115 #define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
116 #define kr_senddmalengen KREG_IDX(SendDmaLenGen)
117 #define kr_senddmastatus KREG_IDX(SendDmaStatus)
118 #define kr_senddmatail KREG_IDX(SendDmaTail)
119 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
120 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
121 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
122 #define kr_sendpiosize KREG_IDX(SendBufSize)
123 #define kr_sendregbase KREG_IDX(SendRegBase)
124 #define kr_userregbase KREG_IDX(UserRegBase)
125 #define kr_xgxs_cfg KREG_IDX(XGXSCfg)
127 /* These must only be written via qib_write_kreg_ctxt() */
128 #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
129 #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
132 #define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
133 QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
135 #define cr_badformat CREG_IDX(RxVersionErrCnt)
136 #define cr_erricrc CREG_IDX(RxICRCErrCnt)
137 #define cr_errlink CREG_IDX(RxLinkMalformCnt)
138 #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
139 #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
140 #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
141 #define cr_err_rlen CREG_IDX(RxLenErrCnt)
142 #define cr_errslen CREG_IDX(TxLenErrCnt)
143 #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
144 #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
145 #define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
146 #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
147 #define cr_lbint CREG_IDX(LBIntCnt)
148 #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
149 #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
150 #define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
151 #define cr_pktrcv CREG_IDX(RxDataPktCnt)
152 #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
153 #define cr_pktsend CREG_IDX(TxDataPktCnt)
154 #define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
155 #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
156 #define cr_rcvebp CREG_IDX(RxEBPCnt)
157 #define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
158 #define cr_senddropped CREG_IDX(TxDroppedPktCnt)
159 #define cr_sendstall CREG_IDX(TxFlowStallCnt)
160 #define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
161 #define cr_wordrcv CREG_IDX(RxDwordCnt)
162 #define cr_wordsend CREG_IDX(TxDwordCnt)
163 #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
164 #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
165 #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
166 #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
167 #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
168 #define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
169 #define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
170 #define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
171 #define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
172 #define cr_rxvlerr CREG_IDX(RxVlErrCnt)
173 #define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
174 #define cr_psstat CREG_IDX(PSStat)
175 #define cr_psstart CREG_IDX(PSStart)
176 #define cr_psinterval CREG_IDX(PSInterval)
177 #define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
178 #define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
179 #define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
180 #define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
181 #define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
182 #define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
183 #define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
185 #define SYM_RMASK(regname, fldname) ((u64) \
186 QIB_7220_##regname##_##fldname##_RMASK)
187 #define SYM_MASK(regname, fldname) ((u64) \
188 QIB_7220_##regname##_##fldname##_RMASK << \
189 QIB_7220_##regname##_##fldname##_LSB)
190 #define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
191 #define SYM_FIELD(value, regname, fldname) ((u64) \
192 (((value) >> SYM_LSB(regname, fldname)) & \
193 SYM_RMASK(regname, fldname)))
194 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
195 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
198 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
199 /* cycle through TS1/TS2 till OK */
200 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
201 /* wait for TS1, then go on */
202 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
203 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
205 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
206 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
207 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
209 #define BLOB_7220_IBCHG 0x81
212 * We could have a single register get/put routine, that takes a group type,
213 * but this is somewhat clearer and cleaner. It also gives us some error
214 * checking. 64 bit register reads should always work, but are inefficient
215 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
216 * so we use kreg32 wherever possible. User register and counter register
217 * reads are always 32 bit reads, so only one form of those routines.
221 * qib_read_ureg32 - read 32-bit virtualized per-context register
223 * @regno: register number
224 * @ctxt: context number
226 * Return the contents of a register that is virtualized to be per context.
227 * Returns -1 on errors (not distinguishable from valid contents at
228 * runtime; we may add a separate error variable at some point).
230 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
231 enum qib_ureg regno, int ctxt)
233 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
237 return readl(regno + (u64 __iomem *)
238 ((char __iomem *)dd->userbase +
239 dd->ureg_align * ctxt));
241 return readl(regno + (u64 __iomem *)
243 (char __iomem *)dd->kregbase +
244 dd->ureg_align * ctxt));
248 * qib_write_ureg - write 32-bit virtualized per-context register
250 * @regno: register number
254 * Write the contents of a register that is virtualized to be per context.
256 static inline void qib_write_ureg(const struct qib_devdata *dd,
257 enum qib_ureg regno, u64 value, int ctxt)
262 ubase = (u64 __iomem *)
263 ((char __iomem *) dd->userbase +
264 dd->ureg_align * ctxt);
266 ubase = (u64 __iomem *)
268 (char __iomem *) dd->kregbase +
269 dd->ureg_align * ctxt);
271 if (dd->kregbase && (dd->flags & QIB_PRESENT))
272 writeq(value, &ubase[regno]);
276 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
277 * @dd: the qlogic_ib device
278 * @regno: the register number to write
279 * @ctxt: the context containing the register
280 * @value: the value to write
282 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
283 const u16 regno, unsigned ctxt,
286 qib_write_kreg(dd, regno + ctxt, value);
289 static inline void write_7220_creg(const struct qib_devdata *dd,
290 u16 regno, u64 value)
292 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
293 writeq(value, &dd->cspec->cregbase[regno]);
296 static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
298 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
300 return readq(&dd->cspec->cregbase[regno]);
303 static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
305 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
307 return readl(&dd->cspec->cregbase[regno]);
310 /* kr_revision bits */
311 #define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
312 #define QLOGIC_IB_R_EMULATORREV_SHIFT 40
314 /* kr_control bits */
315 #define QLOGIC_IB_C_RESET (1U << 7)
317 /* kr_intstatus, kr_intclear, kr_intmask bits */
318 #define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
319 #define QLOGIC_IB_I_RCVURG_SHIFT 32
320 #define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
321 #define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
322 #define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
324 #define QLOGIC_IB_C_FREEZEMODE 0x00000002
325 #define QLOGIC_IB_C_LINKENABLE 0x00000004
327 #define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
328 #define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
329 #define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
330 #define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
331 #define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
332 #define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
334 /* variables for sanity checking interrupt and errors */
335 #define QLOGIC_IB_I_BITSEXTANT \
336 (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
337 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
338 (QLOGIC_IB_I_RCVAVAIL_MASK << \
339 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
340 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
341 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
342 QLOGIC_IB_I_SERDESTRIMDONE)
344 #define IB_HWE_BITSEXTANT \
345 (HWE_MASK(RXEMemParityErr) | \
346 HWE_MASK(TXEMemParityErr) | \
347 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
348 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
349 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
350 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
351 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
352 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
353 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
354 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
355 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
356 HWE_MASK(PowerOnBISTFailed) | \
357 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
358 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
359 QLOGIC_IB_HWE_SERDESPLLFAILED | \
360 HWE_MASK(IBCBusToSPCParityErr) | \
361 HWE_MASK(IBCBusFromSPCParityErr) | \
362 QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
363 QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
364 QLOGIC_IB_HWE_SDMAMEMREADERR | \
365 QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
366 QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
367 QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
368 QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
369 QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
370 QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
371 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
372 QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
373 QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
375 #define IB_E_BITSEXTANT \
376 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
377 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
378 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
379 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
380 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
381 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
382 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
383 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
384 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
385 ERR_MASK(SendSpecialTriggerErr) | \
386 ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
387 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
388 ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
389 ERR_MASK(SendDroppedDataPktErr) | \
390 ERR_MASK(SendPioArmLaunchErr) | \
391 ERR_MASK(SendUnexpectedPktNumErr) | \
392 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
393 ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
394 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
395 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
396 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
397 ERR_MASK(SDmaUnexpDataErr) | \
398 ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
399 ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
400 ERR_MASK(SDmaDescAddrMisalignErr) | \
401 ERR_MASK(InvalidEEPCmd))
403 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
404 #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
405 #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
406 #define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
407 #define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
408 #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
409 #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
410 #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
411 #define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
412 #define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
413 #define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
414 #define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
415 #define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
416 /* specific to this chip */
417 #define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
418 #define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
419 #define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
420 #define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
421 #define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
422 #define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
423 #define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
424 #define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
425 #define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
426 #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
427 #define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
428 #define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
430 #define IBA7220_IBCC_LINKCMD_SHIFT 19
432 /* kr_ibcddrctrl bits */
433 #define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
434 #define IBA7220_IBC_DLIDLMC_SHIFT 32
436 #define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
437 SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
438 #define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
440 #define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
441 #define IBA7220_IBC_LREV_MASK 1
442 #define IBA7220_IBC_LREV_SHIFT 8
443 #define IBA7220_IBC_RXPOL_MASK 1
444 #define IBA7220_IBC_RXPOL_SHIFT 7
445 #define IBA7220_IBC_WIDTH_SHIFT 5
446 #define IBA7220_IBC_WIDTH_MASK 0x3
447 #define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
448 #define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
449 #define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
450 #define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
451 #define IBA7220_IBC_SPEED_SDR (1 << 2)
452 #define IBA7220_IBC_SPEED_DDR (1 << 3)
453 #define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
454 #define IBA7220_IBC_IBTA_1_2_MASK (1)
456 /* kr_ibcddrstatus */
457 /* link latency shift is 0, don't bother defining */
458 #define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
460 /* kr_extstatus bits */
461 #define QLOGIC_IB_EXTS_FREQSEL 0x2
462 #define QLOGIC_IB_EXTS_SERDESSEL 0x4
463 #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
464 #define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
466 /* kr_xgxsconfig bits */
467 #define QLOGIC_IB_XGXS_RESET 0x5ULL
468 #define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
470 /* kr_rcvpktledcnt */
471 #define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
472 #define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
474 #define _QIB_GPIO_SDA_NUM 1
475 #define _QIB_GPIO_SCL_NUM 0
476 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
477 #define QIB_TWSI_TEMP_DEV 0x98
479 /* HW counter clock is at 4nsec */
480 #define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
482 #define IBA7220_R_INTRAVAIL_SHIFT 17
483 #define IBA7220_R_PKEY_DIS_SHIFT 34
484 #define IBA7220_R_TAILUPD_SHIFT 35
485 #define IBA7220_R_CTXTCFG_SHIFT 36
487 #define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
490 * the size bits give us 2^N, in KB units. 0 marks as invalid,
491 * and 7 is reserved. We currently use only 2KB and 4KB
493 #define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
494 #define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
495 #define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
496 #define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
497 #define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
498 #define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
500 #define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
502 /* packet rate matching delay multiplier */
503 static u8 rate_to_delay[2][2] = {
509 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
510 [IB_RATE_2_5_GBPS] = 8,
511 [IB_RATE_5_GBPS] = 4,
512 [IB_RATE_10_GBPS] = 2,
513 [IB_RATE_20_GBPS] = 1
516 #define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
517 #define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
519 /* link training states, from IBC */
520 #define IB_7220_LT_STATE_DISABLED 0x00
521 #define IB_7220_LT_STATE_LINKUP 0x01
522 #define IB_7220_LT_STATE_POLLACTIVE 0x02
523 #define IB_7220_LT_STATE_POLLQUIET 0x03
524 #define IB_7220_LT_STATE_SLEEPDELAY 0x04
525 #define IB_7220_LT_STATE_SLEEPQUIET 0x05
526 #define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
527 #define IB_7220_LT_STATE_CFGRCVFCFG 0x09
528 #define IB_7220_LT_STATE_CFGWAITRMT 0x0a
529 #define IB_7220_LT_STATE_CFGIDLE 0x0b
530 #define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
531 #define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
532 #define IB_7220_LT_STATE_RECOVERIDLE 0x0f
534 /* link state machine states from IBC */
535 #define IB_7220_L_STATE_DOWN 0x0
536 #define IB_7220_L_STATE_INIT 0x1
537 #define IB_7220_L_STATE_ARM 0x2
538 #define IB_7220_L_STATE_ACTIVE 0x3
539 #define IB_7220_L_STATE_ACT_DEFER 0x4
541 static const u8 qib_7220_physportstate[0x20] = {
542 [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
543 [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
544 [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
545 [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
546 [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
547 [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
548 [IB_7220_LT_STATE_CFGDEBOUNCE] =
549 IB_PHYSPORTSTATE_CFG_TRAIN,
550 [IB_7220_LT_STATE_CFGRCVFCFG] =
551 IB_PHYSPORTSTATE_CFG_TRAIN,
552 [IB_7220_LT_STATE_CFGWAITRMT] =
553 IB_PHYSPORTSTATE_CFG_TRAIN,
554 [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
555 [IB_7220_LT_STATE_RECOVERRETRAIN] =
556 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
557 [IB_7220_LT_STATE_RECOVERWAITRMT] =
558 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
559 [IB_7220_LT_STATE_RECOVERIDLE] =
560 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
561 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
562 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
563 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
564 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
565 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
566 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
567 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
568 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
571 int qib_special_trigger;
572 module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
573 MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
575 #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
576 #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
578 #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
579 (1ULL << (SYM_LSB(regname, fldname) + (bit))))
581 #define TXEMEMPARITYERR_PIOBUF \
582 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
583 #define TXEMEMPARITYERR_PIOPBC \
584 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
585 #define TXEMEMPARITYERR_PIOLAUNCHFIFO \
586 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
588 #define RXEMEMPARITYERR_RCVBUF \
589 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
590 #define RXEMEMPARITYERR_LOOKUPQ \
591 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
592 #define RXEMEMPARITYERR_EXPTID \
593 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
594 #define RXEMEMPARITYERR_EAGERTID \
595 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
596 #define RXEMEMPARITYERR_FLAGBUF \
597 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
598 #define RXEMEMPARITYERR_DATAINFO \
599 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
600 #define RXEMEMPARITYERR_HDRINFO \
601 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
603 /* 7220 specific hardware errors... */
604 static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
605 /* generic hardware errors */
606 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
607 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
609 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
610 "TXE PIOBUF Memory Parity"),
611 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
612 "TXE PIOPBC Memory Parity"),
613 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
614 "TXE PIOLAUNCHFIFO Memory Parity"),
616 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
617 "RXE RCVBUF Memory Parity"),
618 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
619 "RXE LOOKUPQ Memory Parity"),
620 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
621 "RXE EAGERTID Memory Parity"),
622 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
623 "RXE EXPTID Memory Parity"),
624 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
625 "RXE FLAGBUF Memory Parity"),
626 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
627 "RXE DATAINFO Memory Parity"),
628 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
629 "RXE HDRINFO Memory Parity"),
631 /* chip-specific hardware errors */
632 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
633 "PCIe Poisoned TLP"),
634 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
635 "PCIe completion timeout"),
637 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
638 * parity or memory parity error failures, because most likely we
639 * won't be able to talk to the core of the chip. Nonetheless, we
640 * might see them, if they are in parts of the PCIe core that aren't
643 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
645 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
647 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
648 "PCIe XTLH core parity"),
649 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
650 "PCIe ADM TX core parity"),
651 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
652 "PCIe ADM RX core parity"),
653 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
655 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
656 "PCIe cpl header queue"),
657 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
658 "PCIe cpl data queue"),
659 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
660 "Send DMA memory read"),
661 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
662 "uC PLL clock not locked"),
663 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
664 "PCIe serdes Q0 no clock"),
665 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
666 "PCIe serdes Q1 no clock"),
667 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
668 "PCIe serdes Q2 no clock"),
669 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
670 "PCIe serdes Q3 no clock"),
671 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
672 "DDS RXEQ memory parity"),
673 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
674 "IB uC memory parity"),
675 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
676 "PCIe uC oct0 memory parity"),
677 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
678 "PCIe uC oct1 memory parity"),
681 #define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
683 #define QLOGIC_IB_E_PKTERRS (\
684 ERR_MASK(SendPktLenErr) | \
685 ERR_MASK(SendDroppedDataPktErr) | \
686 ERR_MASK(RcvVCRCErr) | \
687 ERR_MASK(RcvICRCErr) | \
688 ERR_MASK(RcvShortPktLenErr) | \
691 /* Convenience for decoding Send DMA errors */
692 #define QLOGIC_IB_E_SDMAERRS ( \
693 ERR_MASK(SDmaGenMismatchErr) | \
694 ERR_MASK(SDmaOutOfBoundErr) | \
695 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
696 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
697 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
698 ERR_MASK(SDmaUnexpDataErr) | \
699 ERR_MASK(SDmaDescAddrMisalignErr) | \
700 ERR_MASK(SDmaDisabledErr) | \
701 ERR_MASK(SendBufMisuseErr))
703 /* These are all rcv-related errors which we want to count for stats */
704 #define E_SUM_PKTERRS \
705 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
706 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
707 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
708 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
709 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
710 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
712 /* These are all send-related errors which we want to count for stats */
714 (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
715 ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
716 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
717 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
718 ERR_MASK(InvalidAddrErr))
721 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
722 * errors not related to freeze and cancelling buffers. Can't ignore
723 * armlaunch because could get more while still cleaning up, and need
724 * to cancel those as they happen.
726 #define E_SPKT_ERRS_IGNORE \
727 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
728 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
729 ERR_MASK(SendPktLenErr))
732 * these are errors that can occur when the link changes state while
733 * a packet is being sent or received. This doesn't cover things
734 * like EBP or VCRC that can be the result of a sending having the
735 * link change state, so we receive a "known bad" packet.
737 #define E_SUM_LINK_PKTERRS \
738 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
739 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
740 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
741 ERR_MASK(RcvUnexpectedCharErr))
743 static void autoneg_7220_work(struct work_struct *);
744 static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
747 * Called when we might have an error that is specific to a particular
748 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
749 * because we don't need to force the update of pioavail.
751 static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
753 unsigned long sbuf[3];
754 struct qib_devdata *dd = ppd->dd;
757 * It's possible that sendbuffererror could have bits set; might
758 * have already done this as a result of hardware error handling.
760 /* read these before writing errorclear */
761 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
762 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
763 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
765 if (sbuf[0] || sbuf[1] || sbuf[2])
766 qib_disarm_piobufs_set(dd, sbuf,
767 dd->piobcnt2k + dd->piobcnt4k);
770 static void qib_7220_txe_recover(struct qib_devdata *dd)
772 qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
773 qib_disarm_7220_senderrbufs(dd->pport);
777 * This is called with interrupts disabled and sdma_lock held.
779 static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
781 struct qib_devdata *dd = ppd->dd;
782 u64 set_sendctrl = 0;
783 u64 clr_sendctrl = 0;
785 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
786 set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
788 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
790 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
791 set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
793 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
795 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
796 set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
798 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
800 spin_lock(&dd->sendctrl_lock);
802 dd->sendctrl |= set_sendctrl;
803 dd->sendctrl &= ~clr_sendctrl;
805 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
806 qib_write_kreg(dd, kr_scratch, 0);
808 spin_unlock(&dd->sendctrl_lock);
811 static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
812 u64 err, char *buf, size_t blen)
814 static const struct {
818 { ERR_MASK(SDmaGenMismatchErr),
820 { ERR_MASK(SDmaOutOfBoundErr),
822 { ERR_MASK(SDmaTailOutOfBoundErr),
823 "SDmaTailOutOfBound" },
824 { ERR_MASK(SDmaBaseErr),
826 { ERR_MASK(SDma1stDescErr),
828 { ERR_MASK(SDmaRpyTagErr),
830 { ERR_MASK(SDmaDwEnErr),
832 { ERR_MASK(SDmaMissingDwErr),
834 { ERR_MASK(SDmaUnexpDataErr),
836 { ERR_MASK(SDmaDescAddrMisalignErr),
837 "SDmaDescAddrMisalign" },
838 { ERR_MASK(SendBufMisuseErr),
840 { ERR_MASK(SDmaDisabledErr),
846 for (i = 0; i < ARRAY_SIZE(errs); i++) {
847 if (err & errs[i].err)
848 bidx += scnprintf(buf + bidx, blen - bidx,
854 * This is called as part of link down clean up so disarm and flush
855 * all send buffers so that SMP packets can be sent.
857 static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
859 /* This will trigger the Abort interrupt */
860 sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
861 QIB_SENDCTRL_AVAIL_BLIP);
862 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
865 static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
868 * Set SendDmaLenGen and clear and set
869 * the MSB of the generation count to enable generation checking
870 * and load the internal generation counter.
872 qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
873 qib_write_kreg(ppd->dd, kr_senddmalengen,
874 ppd->sdma_descq_cnt |
875 (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
878 static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
880 qib_sdma_7220_setlengen(ppd);
881 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
882 ppd->sdma_head_dma[0] = 0;
885 #define DISABLES_SDMA ( \
886 ERR_MASK(SDmaDisabledErr) | \
887 ERR_MASK(SDmaBaseErr) | \
888 ERR_MASK(SDmaTailOutOfBoundErr) | \
889 ERR_MASK(SDmaOutOfBoundErr) | \
890 ERR_MASK(SDma1stDescErr) | \
891 ERR_MASK(SDmaRpyTagErr) | \
892 ERR_MASK(SDmaGenMismatchErr) | \
893 ERR_MASK(SDmaDescAddrMisalignErr) | \
894 ERR_MASK(SDmaMissingDwErr) | \
895 ERR_MASK(SDmaDwEnErr))
897 static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
900 struct qib_devdata *dd = ppd->dd;
903 errs &= QLOGIC_IB_E_SDMAERRS;
905 msg = dd->cspec->sdmamsgbuf;
906 qib_decode_7220_sdma_errs(ppd, errs, msg,
907 sizeof(dd->cspec->sdmamsgbuf));
908 spin_lock_irqsave(&ppd->sdma_lock, flags);
910 if (errs & ERR_MASK(SendBufMisuseErr)) {
911 unsigned long sbuf[3];
913 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
914 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
915 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
918 "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
919 ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
923 if (errs & ERR_MASK(SDmaUnexpDataErr))
924 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
927 switch (ppd->sdma_state.current_state) {
928 case qib_sdma_state_s00_hw_down:
929 /* not expecting any interrupts */
932 case qib_sdma_state_s10_hw_start_up_wait:
933 /* handled in intr path */
936 case qib_sdma_state_s20_idle:
937 /* not expecting any interrupts */
940 case qib_sdma_state_s30_sw_clean_up_wait:
941 /* not expecting any interrupts */
944 case qib_sdma_state_s40_hw_clean_up_wait:
945 if (errs & ERR_MASK(SDmaDisabledErr))
946 __qib_sdma_process_event(ppd,
947 qib_sdma_event_e50_hw_cleaned);
950 case qib_sdma_state_s50_hw_halt_wait:
951 /* handled in intr path */
954 case qib_sdma_state_s99_running:
955 if (errs & DISABLES_SDMA)
956 __qib_sdma_process_event(ppd,
957 qib_sdma_event_e7220_err_halted);
961 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
965 * Decode the error status into strings, deciding whether to always
966 * print * it or not depending on "normal packet errors" vs everything
967 * else. Return 1 if "real" errors, otherwise 0 if only packet
968 * errors, so caller can decide what to print with the string.
970 static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
976 if (err & QLOGIC_IB_E_PKTERRS) {
977 if (!(err & ~QLOGIC_IB_E_PKTERRS))
979 if ((err & ERR_MASK(RcvICRCErr)) &&
980 !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
981 strlcat(buf, "CRC ", blen);
985 if (err & ERR_MASK(RcvHdrLenErr))
986 strlcat(buf, "rhdrlen ", blen);
987 if (err & ERR_MASK(RcvBadTidErr))
988 strlcat(buf, "rbadtid ", blen);
989 if (err & ERR_MASK(RcvBadVersionErr))
990 strlcat(buf, "rbadversion ", blen);
991 if (err & ERR_MASK(RcvHdrErr))
992 strlcat(buf, "rhdr ", blen);
993 if (err & ERR_MASK(SendSpecialTriggerErr))
994 strlcat(buf, "sendspecialtrigger ", blen);
995 if (err & ERR_MASK(RcvLongPktLenErr))
996 strlcat(buf, "rlongpktlen ", blen);
997 if (err & ERR_MASK(RcvMaxPktLenErr))
998 strlcat(buf, "rmaxpktlen ", blen);
999 if (err & ERR_MASK(RcvMinPktLenErr))
1000 strlcat(buf, "rminpktlen ", blen);
1001 if (err & ERR_MASK(SendMinPktLenErr))
1002 strlcat(buf, "sminpktlen ", blen);
1003 if (err & ERR_MASK(RcvFormatErr))
1004 strlcat(buf, "rformaterr ", blen);
1005 if (err & ERR_MASK(RcvUnsupportedVLErr))
1006 strlcat(buf, "runsupvl ", blen);
1007 if (err & ERR_MASK(RcvUnexpectedCharErr))
1008 strlcat(buf, "runexpchar ", blen);
1009 if (err & ERR_MASK(RcvIBFlowErr))
1010 strlcat(buf, "ribflow ", blen);
1011 if (err & ERR_MASK(SendUnderRunErr))
1012 strlcat(buf, "sunderrun ", blen);
1013 if (err & ERR_MASK(SendPioArmLaunchErr))
1014 strlcat(buf, "spioarmlaunch ", blen);
1015 if (err & ERR_MASK(SendUnexpectedPktNumErr))
1016 strlcat(buf, "sunexperrpktnum ", blen);
1017 if (err & ERR_MASK(SendDroppedSmpPktErr))
1018 strlcat(buf, "sdroppedsmppkt ", blen);
1019 if (err & ERR_MASK(SendMaxPktLenErr))
1020 strlcat(buf, "smaxpktlen ", blen);
1021 if (err & ERR_MASK(SendUnsupportedVLErr))
1022 strlcat(buf, "sunsupVL ", blen);
1023 if (err & ERR_MASK(InvalidAddrErr))
1024 strlcat(buf, "invalidaddr ", blen);
1025 if (err & ERR_MASK(RcvEgrFullErr))
1026 strlcat(buf, "rcvegrfull ", blen);
1027 if (err & ERR_MASK(RcvHdrFullErr))
1028 strlcat(buf, "rcvhdrfull ", blen);
1029 if (err & ERR_MASK(IBStatusChanged))
1030 strlcat(buf, "ibcstatuschg ", blen);
1031 if (err & ERR_MASK(RcvIBLostLinkErr))
1032 strlcat(buf, "riblostlink ", blen);
1033 if (err & ERR_MASK(HardwareErr))
1034 strlcat(buf, "hardware ", blen);
1035 if (err & ERR_MASK(ResetNegated))
1036 strlcat(buf, "reset ", blen);
1037 if (err & QLOGIC_IB_E_SDMAERRS)
1038 qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
1039 if (err & ERR_MASK(InvalidEEPCmd))
1040 strlcat(buf, "invalideepromcmd ", blen);
1045 static void reenable_7220_chase(unsigned long opaque)
1047 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1049 ppd->cpspec->chase_timer.expires = 0;
1050 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1051 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1054 static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
1059 ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
1062 * Detect and handle the state chase issue, where we can
1063 * get stuck if we are unlucky on timing on both sides of
1064 * the link. If we are, we disable, set a timer, and
1068 case IB_7220_LT_STATE_CFGRCVFCFG:
1069 case IB_7220_LT_STATE_CFGWAITRMT:
1070 case IB_7220_LT_STATE_TXREVLANES:
1071 case IB_7220_LT_STATE_CFGENH:
1073 if (ppd->cpspec->chase_end &&
1074 time_after(tnow, ppd->cpspec->chase_end)) {
1075 ppd->cpspec->chase_end = 0;
1076 qib_set_ib_7220_lstate(ppd,
1077 QLOGIC_IB_IBCC_LINKCMD_DOWN,
1078 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1079 ppd->cpspec->chase_timer.expires = jiffies +
1081 add_timer(&ppd->cpspec->chase_timer);
1082 } else if (!ppd->cpspec->chase_end)
1083 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1087 ppd->cpspec->chase_end = 0;
1092 static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1095 u64 ignore_this_time = 0;
1098 struct qib_pportdata *ppd = dd->pport;
1101 /* don't report errors that are masked */
1102 errs &= dd->cspec->errormask;
1103 msg = dd->cspec->emsgbuf;
1105 /* do these first, they are most important */
1106 if (errs & ERR_MASK(HardwareErr))
1107 qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1109 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1110 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1111 qib_inc_eeprom_err(dd, log_idx, 1);
1113 if (errs & QLOGIC_IB_E_SDMAERRS)
1114 sdma_7220_errors(ppd, errs);
1116 if (errs & ~IB_E_BITSEXTANT)
1118 "error interrupt with unknown errors %llx set\n",
1119 (unsigned long long) (errs & ~IB_E_BITSEXTANT));
1121 if (errs & E_SUM_ERRS) {
1122 qib_disarm_7220_senderrbufs(ppd);
1123 if ((errs & E_SUM_LINK_PKTERRS) &&
1124 !(ppd->lflags & QIBL_LINKACTIVE)) {
1126 * This can happen when trying to bring the link
1127 * up, but the IB link changes state at the "wrong"
1128 * time. The IB logic then complains that the packet
1129 * isn't valid. We don't want to confuse people, so
1130 * we just don't print them, except at debug
1132 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1134 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1135 !(ppd->lflags & QIBL_LINKACTIVE)) {
1137 * This can happen when SMA is trying to bring the link
1138 * up, but the IB link changes state at the "wrong" time.
1139 * The IB logic then complains that the packet isn't
1140 * valid. We don't want to confuse people, so we just
1141 * don't print them, except at debug
1143 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1146 qib_write_kreg(dd, kr_errclear, errs);
1148 errs &= ~ignore_this_time;
1153 * The ones we mask off are handled specially below
1154 * or above. Also mask SDMADISABLED by default as it
1157 mask = ERR_MASK(IBStatusChanged) |
1158 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1159 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1161 qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1163 if (errs & E_SUM_PKTERRS)
1164 qib_stats.sps_rcverrs++;
1165 if (errs & E_SUM_ERRS)
1166 qib_stats.sps_txerrs++;
1167 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
1168 ERR_MASK(SDmaDisabledErr));
1170 if (errs & ERR_MASK(IBStatusChanged)) {
1173 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1174 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1175 handle_7220_chase(ppd, ibcs);
1177 /* Update our picture of width and speed from chip */
1178 ppd->link_width_active =
1179 ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
1180 IB_WIDTH_4X : IB_WIDTH_1X;
1181 ppd->link_speed_active =
1182 ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
1183 QIB_IB_DDR : QIB_IB_SDR;
1186 * Since going into a recovery state causes the link state
1187 * to go down and since recovery is transitory, it is better
1188 * if we "miss" ever seeing the link training state go into
1189 * recovery (i.e., ignore this transition for link state
1190 * special handling purposes) without updating lastibcstat.
1192 if (qib_7220_phys_portstate(ibcs) !=
1193 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1194 qib_handle_e_ibstatuschanged(ppd, ibcs);
1197 if (errs & ERR_MASK(ResetNegated)) {
1199 "Got reset, requires re-init (unload and reload driver)\n");
1200 dd->flags &= ~QIB_INITTED; /* needs re-init */
1201 /* mark as having had error */
1202 *dd->devstatusp |= QIB_STATUS_HWERROR;
1203 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1207 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1209 if (ppd->state_wanted & ppd->lflags)
1210 wake_up_interruptible(&ppd->state_wait);
1213 * If there were hdrq or egrfull errors, wake up any processes
1214 * waiting in poll. We used to try to check which contexts had
1215 * the overflow, but given the cost of that and the chip reads
1216 * to support it, it's better to just wake everybody up if we
1217 * get an overflow; waiters can poll again if it's not them.
1219 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1220 qib_handle_urcv(dd, ~0U);
1221 if (errs & ERR_MASK(RcvEgrFullErr))
1222 qib_stats.sps_buffull++;
1224 qib_stats.sps_hdrfull++;
1230 /* enable/disable chip from delivering interrupts */
1231 static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
1234 if (dd->flags & QIB_BADINTR)
1236 qib_write_kreg(dd, kr_intmask, ~0ULL);
1237 /* force re-interrupt of any pending interrupts. */
1238 qib_write_kreg(dd, kr_intclear, 0ULL);
1240 qib_write_kreg(dd, kr_intmask, 0ULL);
1244 * Try to cleanup as much as possible for anything that might have gone
1245 * wrong while in freeze mode, such as pio buffers being written by user
1246 * processes (causing armlaunch), send errors due to going into freeze mode,
1247 * etc., and try to avoid causing extra interrupts while doing so.
1248 * Forcibly update the in-memory pioavail register copies after cleanup
1249 * because the chip won't do it while in freeze mode (the register values
1250 * themselves are kept correct).
1251 * Make sure that we don't lose any important interrupts by using the chip
1252 * feature that says that writing 0 to a bit in *clear that is set in
1253 * *status will cause an interrupt to be generated again (if allowed by
1255 * This is in chip-specific code because of all of the register accesses,
1256 * even though the details are similar on most chips.
1258 static void qib_7220_clear_freeze(struct qib_devdata *dd)
1260 /* disable error interrupts, to avoid confusion */
1261 qib_write_kreg(dd, kr_errmask, 0ULL);
1263 /* also disable interrupts; errormask is sometimes overwritten */
1264 qib_7220_set_intr_state(dd, 0);
1266 qib_cancel_sends(dd->pport);
1268 /* clear the freeze, and be sure chip saw it */
1269 qib_write_kreg(dd, kr_control, dd->control);
1270 qib_read_kreg32(dd, kr_scratch);
1272 /* force in-memory update now we are out of freeze */
1273 qib_force_pio_avail_update(dd);
1276 * force new interrupt if any hwerr, error or interrupt bits are
1277 * still set, and clear "safe" send packet errors related to freeze
1278 * and cancelling sends. Re-enable error interrupts before possible
1279 * force of re-interrupt on pending interrupts.
1281 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1282 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1283 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1284 qib_7220_set_intr_state(dd, 1);
1288 * qib_7220_handle_hwerrors - display hardware errors.
1289 * @dd: the qlogic_ib device
1290 * @msg: the output buffer
1291 * @msgl: the size of the output buffer
1293 * Use same msg buffer as regular errors to avoid excessive stack
1294 * use. Most hardware errors are catastrophic, but for right now,
1295 * we'll print them and continue. We reuse the same message buffer as
1296 * handle_7220_errors() to avoid excessive stack usage.
1298 static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1307 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1310 if (hwerrs == ~0ULL) {
1312 "Read of hardware error status failed (all bits set); ignoring\n");
1315 qib_stats.sps_hwerrs++;
1318 * Always clear the error status register, except MEMBISTFAIL,
1319 * regardless of whether we continue or stop using the chip.
1320 * We want that set so we know it failed, even across driver reload.
1321 * We'll still ignore it in the hwerrmask. We do this partly for
1322 * diagnostics, but also for support.
1324 qib_write_kreg(dd, kr_hwerrclear,
1325 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
1327 hwerrs &= dd->cspec->hwerrmask;
1329 /* We log some errors to EEPROM, check if we have any of those. */
1330 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1331 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
1332 qib_inc_eeprom_err(dd, log_idx, 1);
1333 if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
1335 qib_devinfo(dd->pcidev,
1336 "Hardware error: hwerr=0x%llx (cleared)\n",
1337 (unsigned long long) hwerrs);
1339 if (hwerrs & ~IB_HWE_BITSEXTANT)
1341 "hwerror interrupt with unknown errors %llx set\n",
1342 (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
1344 if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
1345 qib_sd7220_clr_ibpar(dd);
1347 ctrl = qib_read_kreg32(dd, kr_control);
1348 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
1350 * Parity errors in send memory are recoverable by h/w
1351 * just do housekeeping, exit freeze mode and continue.
1353 if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
1354 TXEMEMPARITYERR_PIOPBC)) {
1355 qib_7220_txe_recover(dd);
1356 hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
1357 TXEMEMPARITYERR_PIOPBC);
1362 qib_7220_clear_freeze(dd);
1367 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
1370 "[Memory BIST test failed, InfiniPath hardware unusable]",
1372 /* ignore from now on, so disable until driver reloaded */
1373 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
1374 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1377 qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
1378 ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
1380 bitsmsg = dd->cspec->bitsmsgbuf;
1381 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
1382 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
1383 bits = (u32) ((hwerrs >>
1384 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1385 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1386 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1387 "[PCIe Mem Parity Errs %x] ", bits);
1388 strlcat(msg, bitsmsg, msgl);
1391 #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
1392 QLOGIC_IB_HWE_COREPLL_RFSLIP)
1394 if (hwerrs & _QIB_PLL_FAIL) {
1396 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1397 "[PLL failed (%llx), InfiniPath hardware unusable]",
1398 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1399 strlcat(msg, bitsmsg, msgl);
1400 /* ignore from now on, so disable until driver reloaded */
1401 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
1402 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1405 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
1407 * If it occurs, it is left masked since the eternal
1408 * interface is unused.
1410 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
1411 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1414 qib_dev_err(dd, "%s hardware error\n", msg);
1416 if (isfatal && !dd->diag_client) {
1418 "Fatal Hardware Error, no longer usable, SN %.16s\n",
1421 * For /sys status file and user programs to print; if no
1422 * trailing brace is copied, we'll know it was truncated.
1425 snprintf(dd->freezemsg, dd->freezelen,
1427 qib_disable_after_error(dd);
1433 * qib_7220_init_hwerrors - enable hardware errors
1434 * @dd: the qlogic_ib device
1436 * now that we have finished initializing everything that might reasonably
1437 * cause a hardware error, and cleared those errors bits as they occur,
1438 * we can enable hardware errors in the mask (potentially enabling
1439 * freeze mode), and enable hardware errors as errors (along with
1440 * everything else) in errormask
1442 static void qib_7220_init_hwerrors(struct qib_devdata *dd)
1447 extsval = qib_read_kreg64(dd, kr_extstatus);
1449 if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
1450 QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
1451 qib_dev_err(dd, "MemBIST did not complete!\n");
1452 if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
1453 qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
1455 val = ~0ULL; /* default to all hwerrors become interrupts, */
1457 val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
1458 dd->cspec->hwerrmask = val;
1460 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1461 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1464 qib_write_kreg(dd, kr_errclear, ~0ULL);
1465 /* enable errors that are masked, at least this first time. */
1466 qib_write_kreg(dd, kr_errmask, ~0ULL);
1467 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1468 /* clear any interrupts up to this point (ints still not enabled) */
1469 qib_write_kreg(dd, kr_intclear, ~0ULL);
1473 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
1474 * on chips that are count-based, rather than trigger-based. There is no
1475 * reference counting, but that's also fine, given the intended use.
1476 * Only chip-specific because it's all register accesses
1478 static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
1481 qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
1482 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1484 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1485 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1489 * Formerly took parameter <which> in pre-shifted,
1490 * pre-merged form with LinkCmd and LinkInitCmd
1491 * together, and assuming the zero was NOP.
1493 static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1497 struct qib_devdata *dd = ppd->dd;
1498 unsigned long flags;
1500 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1502 * If we are told to disable, note that so link-recovery
1503 * code does not attempt to bring us back up.
1505 spin_lock_irqsave(&ppd->lflags_lock, flags);
1506 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1507 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1508 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1510 * Any other linkinitcmd will lead to LINKDOWN and then
1511 * to INIT (if all is well), so clear flag to let
1512 * link-recovery code attempt to bring us back up.
1514 spin_lock_irqsave(&ppd->lflags_lock, flags);
1515 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1516 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1519 mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
1520 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1522 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
1523 /* write to chip to prevent back-to-back writes of ibc reg */
1524 qib_write_kreg(dd, kr_scratch, 0);
1528 * All detailed interaction with the SerDes has been moved to qib_sd7220.c
1530 * The portion of IBA7220-specific bringup_serdes() that actually deals with
1531 * registers and memory within the SerDes itself is qib_sd7220_init().
1535 * qib_7220_bringup_serdes - bring up the serdes
1536 * @ppd: physical port on the qlogic_ib device
1538 static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
1540 struct qib_devdata *dd = ppd->dd;
1541 u64 val, prev_val, guid, ibc;
1544 /* Put IBC in reset, sends disabled */
1545 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1546 qib_write_kreg(dd, kr_control, 0ULL);
1548 if (qib_compat_ddr_negotiate) {
1549 ppd->cpspec->ibdeltainprog = 1;
1550 ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
1551 ppd->cpspec->iblnkerrsnap =
1552 read_7220_creg32(dd, cr_iblinkerrrecov);
1555 /* flowcontrolwatermark is in units of KBytes */
1556 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1558 * How often flowctrl sent. More or less in usecs; balance against
1559 * watermark value, so that in theory senders always get a flow
1560 * control update in time to not let the IB link go idle.
1562 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1563 /* max error tolerance */
1564 ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
1565 /* use "real" buffer space for */
1566 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1567 /* IB credit flow control. */
1568 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1570 * set initial max size pkt IBC will send, including ICRC; it's the
1571 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1573 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1574 ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1576 /* initially come up waiting for TS1, without sending anything. */
1577 val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1578 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1579 qib_write_kreg(dd, kr_ibcctrl, val);
1581 if (!ppd->cpspec->ibcddrctrl) {
1582 /* not on re-init after reset */
1583 ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
1585 if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
1586 ppd->cpspec->ibcddrctrl |=
1587 IBA7220_IBC_SPEED_AUTONEG_MASK |
1588 IBA7220_IBC_IBTA_1_2_MASK;
1590 ppd->cpspec->ibcddrctrl |=
1591 ppd->link_speed_enabled == QIB_IB_DDR ?
1592 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
1593 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
1594 (IB_WIDTH_1X | IB_WIDTH_4X))
1595 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
1597 ppd->cpspec->ibcddrctrl |=
1598 ppd->link_width_enabled == IB_WIDTH_4X ?
1599 IBA7220_IBC_WIDTH_4X_ONLY :
1600 IBA7220_IBC_WIDTH_1X_ONLY;
1602 /* always enable these on driver reload, not sticky */
1603 ppd->cpspec->ibcddrctrl |=
1604 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
1605 ppd->cpspec->ibcddrctrl |=
1606 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
1608 /* enable automatic lane reversal detection for receive */
1609 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
1611 /* write to chip to prevent back-to-back writes of ibc reg */
1612 qib_write_kreg(dd, kr_scratch, 0);
1614 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
1615 qib_write_kreg(dd, kr_scratch, 0);
1617 qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
1618 qib_write_kreg(dd, kr_scratch, 0);
1620 ret = qib_sd7220_init(dd);
1622 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1624 val |= QLOGIC_IB_XGXS_FC_SAFE;
1625 if (val != prev_val) {
1626 qib_write_kreg(dd, kr_xgxs_cfg, val);
1627 qib_read_kreg32(dd, kr_scratch);
1629 if (val & QLOGIC_IB_XGXS_RESET)
1630 val &= ~QLOGIC_IB_XGXS_RESET;
1631 if (val != prev_val)
1632 qib_write_kreg(dd, kr_xgxs_cfg, val);
1634 /* first time through, set port guid */
1636 ppd->guid = dd->base_guid;
1637 guid = be64_to_cpu(ppd->guid);
1639 qib_write_kreg(dd, kr_hrtbt_guid, guid);
1641 dd->control |= QLOGIC_IB_C_LINKENABLE;
1642 qib_write_kreg(dd, kr_control, dd->control);
1644 /* write to chip to prevent back-to-back writes of ibc reg */
1645 qib_write_kreg(dd, kr_scratch, 0);
1650 * qib_7220_quiet_serdes - set serdes to txidle
1651 * @ppd: physical port of the qlogic_ib device
1652 * Called when driver is being unloaded
1654 static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1657 struct qib_devdata *dd = ppd->dd;
1658 unsigned long flags;
1661 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1662 qib_write_kreg(dd, kr_control,
1663 dd->control | QLOGIC_IB_C_FREEZEMODE);
1665 ppd->cpspec->chase_end = 0;
1666 if (ppd->cpspec->chase_timer.data) /* if initted */
1667 del_timer_sync(&ppd->cpspec->chase_timer);
1669 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
1670 ppd->cpspec->ibdeltainprog) {
1673 /* enable counter writes */
1674 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1675 qib_write_kreg(dd, kr_hwdiagctrl,
1676 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1678 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
1679 val = read_7220_creg32(dd, cr_ibsymbolerr);
1680 if (ppd->cpspec->ibdeltainprog)
1681 val -= val - ppd->cpspec->ibsymsnap;
1682 val -= ppd->cpspec->ibsymdelta;
1683 write_7220_creg(dd, cr_ibsymbolerr, val);
1685 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
1686 val = read_7220_creg32(dd, cr_iblinkerrrecov);
1687 if (ppd->cpspec->ibdeltainprog)
1688 val -= val - ppd->cpspec->iblnkerrsnap;
1689 val -= ppd->cpspec->iblnkerrdelta;
1690 write_7220_creg(dd, cr_iblinkerrrecov, val);
1693 /* and disable counter writes */
1694 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1696 qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1698 spin_lock_irqsave(&ppd->lflags_lock, flags);
1699 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1700 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1701 wake_up(&ppd->cpspec->autoneg_wait);
1702 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
1704 shutdown_7220_relock_poll(ppd->dd);
1705 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
1706 val |= QLOGIC_IB_XGXS_RESET;
1707 qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
1711 * qib_setup_7220_setextled - set the state of the two external LEDs
1712 * @dd: the qlogic_ib device
1713 * @on: whether the link is up or not
1715 * The exact combo of LEDs if on is true is determined by looking
1718 * These LEDs indicate the physical and logical state of IB link.
1719 * For this chip (at least with recommended board pinouts), LED1
1720 * is Yellow (logical state) and LED2 is Green (physical state),
1722 * Note: We try to match the Mellanox HCA LED behavior as best
1723 * we can. Green indicates physical link state is OK (something is
1724 * plugged in, and we can train).
1725 * Amber indicates the link is logically up (ACTIVE).
1726 * Mellanox further blinks the amber LED to indicate data packet
1727 * activity, but we have no hardware support for that, so it would
1728 * require waking up every 10-20 msecs and checking the counters
1729 * on the chip, and then turning the LED off if appropriate. That's
1730 * visible overhead, so not something we will do.
1733 static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
1735 struct qib_devdata *dd = ppd->dd;
1736 u64 extctl, ledblink = 0, val, lst, ltst;
1737 unsigned long flags;
1740 * The diags use the LED to indicate diag info, so we leave
1741 * the external LED alone when the diags are running.
1743 if (dd->diag_client)
1746 if (ppd->led_override) {
1747 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1748 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1749 lst = (ppd->led_override & QIB_LED_LOG) ?
1750 IB_PORT_ACTIVE : IB_PORT_DOWN;
1752 val = qib_read_kreg64(dd, kr_ibcstatus);
1753 ltst = qib_7220_phys_portstate(val);
1754 lst = qib_7220_iblink_state(val);
1760 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1761 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1762 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1763 if (ltst == IB_PHYSPORTSTATE_LINKUP) {
1764 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1766 * counts are in chip clock (4ns) periods.
1767 * This is 1/16 sec (66.6ms) on,
1768 * 3/16 sec (187.5 ms) off, with packets rcvd
1770 ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
1771 | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
1773 if (lst == IB_PORT_ACTIVE)
1774 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1775 dd->cspec->extctrl = extctl;
1776 qib_write_kreg(dd, kr_extctrl, extctl);
1777 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1779 if (ledblink) /* blink the LED on packet receive */
1780 qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
1783 static void qib_7220_free_irq(struct qib_devdata *dd)
1785 if (dd->cspec->irq) {
1786 free_irq(dd->cspec->irq, dd);
1793 * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1794 * @dd: the qlogic_ib device
1796 * This is called during driver unload.
1799 static void qib_setup_7220_cleanup(struct qib_devdata *dd)
1801 qib_7220_free_irq(dd);
1802 kfree(dd->cspec->cntrs);
1803 kfree(dd->cspec->portcntrs);
1807 * This is only called for SDmaInt.
1808 * SDmaDisabled is handled on the error path.
1810 static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
1812 unsigned long flags;
1814 spin_lock_irqsave(&ppd->sdma_lock, flags);
1816 switch (ppd->sdma_state.current_state) {
1817 case qib_sdma_state_s00_hw_down:
1820 case qib_sdma_state_s10_hw_start_up_wait:
1821 __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
1824 case qib_sdma_state_s20_idle:
1827 case qib_sdma_state_s30_sw_clean_up_wait:
1830 case qib_sdma_state_s40_hw_clean_up_wait:
1833 case qib_sdma_state_s50_hw_halt_wait:
1834 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1837 case qib_sdma_state_s99_running:
1838 /* too chatty to print here */
1839 __qib_sdma_intr(ppd);
1842 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1845 static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
1847 unsigned long flags;
1849 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1851 if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
1854 * blip the availupd off, next write will be on, so
1855 * we ensure an avail update, regardless of threshold or
1856 * buffers becoming free, whenever we want an interrupt
1858 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
1859 ~SYM_MASK(SendCtrl, SendBufAvailUpd));
1860 qib_write_kreg(dd, kr_scratch, 0ULL);
1861 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
1863 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
1864 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1865 qib_write_kreg(dd, kr_scratch, 0ULL);
1867 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1871 * Handle errors and unusual events first, separate function
1872 * to improve cache hits for fast path interrupt handling.
1874 static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
1876 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1878 "interrupt with unknown interrupts %Lx set\n",
1879 istat & ~QLOGIC_IB_I_BITSEXTANT);
1881 if (istat & QLOGIC_IB_I_GPIO) {
1885 * Boards for this chip currently don't use GPIO interrupts,
1886 * so clear by writing GPIOstatus to GPIOclear, and complain
1887 * to alert developer. To avoid endless repeats, clear
1888 * the bits in the mask, since there is some kind of
1889 * programming error or chip problem.
1891 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1893 * In theory, writing GPIOstatus to GPIOclear could
1894 * have a bad side-effect on some diagnostic that wanted
1895 * to poll for a status-change, but the various shadows
1896 * make that problematic at best. Diags will just suppress
1897 * all GPIO interrupts during such tests.
1899 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
1902 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1903 u32 gpio_irq = mask & gpiostatus;
1906 * A bit set in status and (chip) Mask register
1907 * would cause an interrupt. Since we are not
1908 * expecting any, report it. Also check that the
1909 * chip reflects our shadow, report issues,
1910 * and refresh from the shadow.
1913 * Clear any troublemakers, and update chip
1916 dd->cspec->gpio_mask &= ~gpio_irq;
1917 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1921 if (istat & QLOGIC_IB_I_ERROR) {
1924 qib_stats.sps_errints++;
1925 estat = qib_read_kreg64(dd, kr_errstatus);
1927 qib_devinfo(dd->pcidev,
1928 "error interrupt (%Lx), but no error bits set!\n",
1931 handle_7220_errors(dd, estat);
1935 static irqreturn_t qib_7220intr(int irq, void *data)
1937 struct qib_devdata *dd = data;
1944 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1946 * This return value is not great, but we do not want the
1947 * interrupt core code to remove our interrupt handler
1948 * because we don't appear to be handling an interrupt
1949 * during a chip reset.
1955 istat = qib_read_kreg64(dd, kr_intstatus);
1957 if (unlikely(!istat)) {
1958 ret = IRQ_NONE; /* not our interrupt, or already handled */
1961 if (unlikely(istat == -1)) {
1962 qib_bad_intrstatus(dd);
1963 /* don't know if it was our interrupt or not */
1968 this_cpu_inc(*dd->int_counter);
1969 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1970 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1971 unlikely_7220_intr(dd, istat);
1974 * Clear the interrupt bits we found set, relatively early, so we
1975 * "know" know the chip will have seen this by the time we process
1976 * the queue, and will re-interrupt if necessary. The processor
1977 * itself won't take the interrupt again until we return.
1979 qib_write_kreg(dd, kr_intclear, istat);
1982 * Handle kernel receive queues before checking for pio buffers
1983 * available since receives can overflow; piobuf waiters can afford
1984 * a few extra cycles, since they were waiting anyway.
1987 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1988 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1990 rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1991 (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
1992 for (i = 0; i < dd->first_user_ctxt; i++) {
1993 if (ctxtrbits & rmask) {
1994 ctxtrbits &= ~rmask;
1995 qib_kreceive(dd->rcd[i], NULL, NULL);
2001 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
2002 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
2003 qib_handle_urcv(dd, ctxtrbits);
2007 /* only call for SDmaInt */
2008 if (istat & QLOGIC_IB_I_SDMAINT)
2009 sdma_7220_intr(dd->pport, istat);
2011 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2012 qib_ib_piobufavail(dd);
2020 * Set up our chip-specific interrupt handler.
2021 * The interrupt type has already been setup, so
2022 * we just need to do the registration and error checking.
2023 * If we are using MSI interrupts, we may fall back to
2024 * INTx later, if the interrupt handler doesn't get called
2025 * within 1/2 second (see verify_interrupt()).
2027 static void qib_setup_7220_interrupt(struct qib_devdata *dd)
2029 if (!dd->cspec->irq)
2031 "irq is 0, BIOS error? Interrupts won't work\n");
2033 int ret = request_irq(dd->cspec->irq, qib_7220intr,
2034 dd->msi_lo ? 0 : IRQF_SHARED,
2039 "Couldn't setup %s interrupt (irq=%d): %d\n",
2040 dd->msi_lo ? "MSI" : "INTx",
2041 dd->cspec->irq, ret);
2046 * qib_7220_boardname - fill in the board name
2047 * @dd: the qlogic_ib device
2049 * info is based on the board revision register
2051 static void qib_7220_boardname(struct qib_devdata *dd)
2055 boardid = SYM_FIELD(dd->revision, Revision,
2060 dd->boardname = "InfiniPath_QLE7240";
2063 dd->boardname = "InfiniPath_QLE7280";
2066 qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
2067 dd->boardname = "Unknown_InfiniPath_7220";
2071 if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
2073 "Unsupported InfiniPath hardware revision %u.%u!\n",
2074 dd->majrev, dd->minrev);
2076 snprintf(dd->boardversion, sizeof(dd->boardversion),
2077 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
2078 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
2079 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
2080 dd->majrev, dd->minrev,
2081 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
2085 * This routine sleeps, so it can only be called from user context, not
2086 * from interrupt context.
2088 static int qib_setup_7220_reset(struct qib_devdata *dd)
2094 u8 int_line, clinesz;
2095 unsigned long flags;
2097 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
2099 /* Use dev_err so it shows up in logs, etc. */
2100 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
2102 /* no interrupts till re-initted */
2103 qib_7220_set_intr_state(dd, 0);
2105 dd->pport->cpspec->ibdeltainprog = 0;
2106 dd->pport->cpspec->ibsymdelta = 0;
2107 dd->pport->cpspec->iblnkerrdelta = 0;
2110 * Keep chip from being accessed until we are ready. Use
2111 * writeq() directly, to allow the write even though QIB_PRESENT
2114 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2115 /* so we check interrupts work again */
2116 dd->z_int_counter = qib_int_counter(dd);
2117 val = dd->control | QLOGIC_IB_C_RESET;
2118 writeq(val, &dd->kregbase[kr_control]);
2119 mb(); /* prevent compiler reordering around actual reset */
2121 for (i = 1; i <= 5; i++) {
2123 * Allow MBIST, etc. to complete; longer on each retry.
2124 * We sometimes get machine checks from bus timeout if no
2125 * response, so for now, make it *really* long.
2127 msleep(1000 + (1 + i) * 2000);
2129 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
2132 * Use readq directly, so we don't need to mark it as PRESENT
2133 * until we get a successful indication that all is well.
2135 val = readq(&dd->kregbase[kr_revision]);
2136 if (val == dd->revision) {
2137 dd->flags |= QIB_PRESENT; /* it's back */
2138 ret = qib_reinit_intr(dd);
2142 ret = 0; /* failed */
2146 if (qib_pcie_params(dd, dd->lbus_width, NULL))
2148 "Reset failed to setup PCIe or interrupts; continuing anyway\n");
2150 /* hold IBC in reset, no sends, etc till later */
2151 qib_write_kreg(dd, kr_control, 0ULL);
2153 /* clear the reset error, init error/hwerror mask */
2154 qib_7220_init_hwerrors(dd);
2156 /* do setup similar to speed or link-width changes */
2157 if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
2158 dd->cspec->presets_needed = 1;
2159 spin_lock_irqsave(&dd->pport->lflags_lock, flags);
2160 dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
2161 dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2162 spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
2169 * qib_7220_put_tid - write a TID to the chip
2170 * @dd: the qlogic_ib device
2171 * @tidptr: pointer to the expected TID (in chip) to update
2172 * @tidtype: 0 for eager, 1 for expected
2173 * @pa: physical address of in memory buffer; tidinvalid if freeing
2175 static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
2176 u32 type, unsigned long pa)
2178 if (pa != dd->tidinvalid) {
2179 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
2181 /* paranoia checks */
2182 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
2183 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
2187 if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
2189 "Physical page address 0x%lx larger than supported\n",
2194 if (type == RCVHQ_RCV_TYPE_EAGER)
2195 chippa |= dd->tidtemplate;
2196 else /* for now, always full 4KB page */
2197 chippa |= IBA7220_TID_SZ_4K;
2205 * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2206 * @dd: the qlogic_ib device
2209 * clear all TID entries for a ctxt, expected and eager.
2210 * Used from qib_close(). On this chip, TIDs are only 32 bits,
2211 * not 64, but they are still on 64 bit boundaries, so tidbase
2212 * is declared as u64 * for the pointer math, even though we write 32 bits
2214 static void qib_7220_clear_tids(struct qib_devdata *dd,
2215 struct qib_ctxtdata *rcd)
2217 u64 __iomem *tidbase;
2218 unsigned long tidinv;
2222 if (!dd->kregbase || !rcd)
2227 tidinv = dd->tidinvalid;
2228 tidbase = (u64 __iomem *)
2229 ((char __iomem *)(dd->kregbase) +
2231 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
2233 for (i = 0; i < dd->rcvtidcnt; i++)
2234 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
2237 tidbase = (u64 __iomem *)
2238 ((char __iomem *)(dd->kregbase) +
2240 rcd->rcvegr_tid_base * sizeof(*tidbase));
2242 for (i = 0; i < rcd->rcvegrcnt; i++)
2243 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2248 * qib_7220_tidtemplate - setup constants for TID updates
2249 * @dd: the qlogic_ib device
2251 * We setup stuff that we use a lot, to avoid calculating each time
2253 static void qib_7220_tidtemplate(struct qib_devdata *dd)
2255 if (dd->rcvegrbufsize == 2048)
2256 dd->tidtemplate = IBA7220_TID_SZ_2K;
2257 else if (dd->rcvegrbufsize == 4096)
2258 dd->tidtemplate = IBA7220_TID_SZ_4K;
2263 * qib_init_7220_get_base_info - set chip-specific flags for user code
2264 * @rcd: the qlogic_ib ctxt
2265 * @kbase: qib_base_info pointer
2267 * We set the PCIE flag because the lower bandwidth on PCIe vs
2268 * HyperTransport can affect some user packet algorithims.
2270 static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
2271 struct qib_base_info *kinfo)
2273 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2274 QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
2276 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
2277 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
2282 static struct qib_message_header *
2283 qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2285 u32 offset = qib_hdrget_offset(rhf_addr);
2287 return (struct qib_message_header *)
2288 (rhf_addr - dd->rhf_offset + offset);
2291 static void qib_7220_config_ctxts(struct qib_devdata *dd)
2293 unsigned long flags;
2296 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2297 dd->cspec->numctxts = nchipctxts;
2298 if (qib_n_krcv_queues > 1) {
2299 dd->qpn_mask = 0x3e;
2300 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2301 if (dd->first_user_ctxt > nchipctxts)
2302 dd->first_user_ctxt = nchipctxts;
2304 dd->first_user_ctxt = dd->num_pports;
2305 dd->n_krcv_queues = dd->first_user_ctxt;
2307 if (!qib_cfgctxts) {
2308 int nctxts = dd->first_user_ctxt + num_online_cpus();
2312 else if (nctxts <= 9)
2314 else if (nctxts <= nchipctxts)
2315 dd->ctxtcnt = nchipctxts;
2316 } else if (qib_cfgctxts <= nchipctxts)
2317 dd->ctxtcnt = qib_cfgctxts;
2318 if (!dd->ctxtcnt) /* none of the above, set to max */
2319 dd->ctxtcnt = nchipctxts;
2322 * Chip can be configured for 5, 9, or 17 ctxts, and choice
2323 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
2324 * Lock to be paranoid about later motion, etc.
2326 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2327 if (dd->ctxtcnt > 9)
2328 dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
2329 else if (dd->ctxtcnt > 5)
2330 dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
2331 /* else configure for default 5 receive ctxts */
2333 dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
2334 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2335 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2337 /* kr_rcvegrcnt changes based on the number of contexts enabled */
2338 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
2339 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
2342 static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
2345 u64 maskr; /* right-justified mask */
2348 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2349 ret = ppd->link_width_enabled;
2352 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
2353 ret = ppd->link_width_active;
2356 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2357 ret = ppd->link_speed_enabled;
2360 case QIB_IB_CFG_SPD: /* Get current Link spd */
2361 ret = ppd->link_speed_active;
2364 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2365 lsb = IBA7220_IBC_RXPOL_SHIFT;
2366 maskr = IBA7220_IBC_RXPOL_MASK;
2369 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2370 lsb = IBA7220_IBC_LREV_SHIFT;
2371 maskr = IBA7220_IBC_LREV_MASK;
2374 case QIB_IB_CFG_LINKLATENCY:
2375 ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
2376 & IBA7220_DDRSTAT_LINKLAT_MASK;
2379 case QIB_IB_CFG_OP_VLS:
2380 ret = ppd->vls_operational;
2383 case QIB_IB_CFG_VL_HIGH_CAP:
2387 case QIB_IB_CFG_VL_LOW_CAP:
2391 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2392 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2396 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2397 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2401 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2402 /* will only take effect when the link state changes */
2403 ret = (ppd->cpspec->ibcctrl &
2404 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2405 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2408 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2409 lsb = IBA7220_IBC_HRTBT_SHIFT;
2410 maskr = IBA7220_IBC_HRTBT_MASK;
2413 case QIB_IB_CFG_PMA_TICKS:
2415 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
2416 * Since the clock is always 250MHz, the value is 1 or 0.
2418 ret = (ppd->link_speed_active == QIB_IB_DDR);
2425 ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
2430 static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2432 struct qib_devdata *dd = ppd->dd;
2433 u64 maskr; /* right-justified mask */
2434 int lsb, ret = 0, setforce = 0;
2436 unsigned long flags;
2440 case QIB_IB_CFG_LIDLMC:
2442 * Set LID and LMC. Combined to avoid possible hazard
2443 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2445 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2446 maskr = IBA7220_IBC_DLIDLMC_MASK;
2449 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
2451 * As with speed, only write the actual register if
2452 * the link is currently down, otherwise takes effect
2453 * on next link change.
2455 ppd->link_width_enabled = val;
2456 if (!(ppd->lflags & QIBL_LINKDOWN))
2459 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2460 * will get called because we want update
2461 * link_width_active, and the change may not take
2462 * effect for some time (if we are in POLL), so this
2463 * flag will force the updown routine to be called
2464 * on the next ibstatuschange down interrupt, even
2465 * if it's not an down->up transition.
2467 val--; /* convert from IB to chip */
2468 maskr = IBA7220_IBC_WIDTH_MASK;
2469 lsb = IBA7220_IBC_WIDTH_SHIFT;
2473 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2475 * If we turn off IB1.2, need to preset SerDes defaults,
2476 * but not right now. Set a flag for the next time
2477 * we command the link down. As with width, only write the
2478 * actual register if the link is currently down, otherwise
2479 * takes effect on next link change. Since setting is being
2480 * explicitly requested (via MAD or sysfs), clear autoneg
2481 * failure status if speed autoneg is enabled.
2483 ppd->link_speed_enabled = val;
2484 if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
2486 dd->cspec->presets_needed = 1;
2487 if (!(ppd->lflags & QIBL_LINKDOWN))
2490 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2491 * will get called because we want update
2492 * link_speed_active, and the change may not take
2493 * effect for some time (if we are in POLL), so this
2494 * flag will force the updown routine to be called
2495 * on the next ibstatuschange down interrupt, even
2496 * if it's not an down->up transition.
2498 if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
2499 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2500 IBA7220_IBC_IBTA_1_2_MASK;
2501 spin_lock_irqsave(&ppd->lflags_lock, flags);
2502 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2503 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2505 val = val == QIB_IB_DDR ?
2506 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2507 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2508 IBA7220_IBC_IBTA_1_2_MASK;
2509 /* IBTA 1.2 mode + speed bits are contiguous */
2510 lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
2514 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2515 lsb = IBA7220_IBC_RXPOL_SHIFT;
2516 maskr = IBA7220_IBC_RXPOL_MASK;
2519 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2520 lsb = IBA7220_IBC_LREV_SHIFT;
2521 maskr = IBA7220_IBC_LREV_MASK;
2524 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2525 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2528 ppd->cpspec->ibcctrl &=
2529 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2530 ppd->cpspec->ibcctrl |= (u64) val <<
2531 SYM_LSB(IBCCtrl, OverrunThreshold);
2532 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2533 qib_write_kreg(dd, kr_scratch, 0);
2537 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2538 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2541 ppd->cpspec->ibcctrl &=
2542 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2543 ppd->cpspec->ibcctrl |= (u64) val <<
2544 SYM_LSB(IBCCtrl, PhyerrThreshold);
2545 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2546 qib_write_kreg(dd, kr_scratch, 0);
2550 case QIB_IB_CFG_PKEYS: /* update pkeys */
2551 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2552 ((u64) ppd->pkeys[2] << 32) |
2553 ((u64) ppd->pkeys[3] << 48);
2554 qib_write_kreg(dd, kr_partitionkey, maskr);
2557 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2558 /* will only take effect when the link state changes */
2559 if (val == IB_LINKINITCMD_POLL)
2560 ppd->cpspec->ibcctrl &=
2561 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2563 ppd->cpspec->ibcctrl |=
2564 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2565 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2566 qib_write_kreg(dd, kr_scratch, 0);
2569 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2571 * Update our housekeeping variables, and set IBC max
2572 * size, same as init code; max IBC is max we allow in
2573 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2574 * Set even if it's unchanged, print debug message only
2577 val = (ppd->ibmaxlen >> 2) + 1;
2578 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2579 ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
2580 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2581 qib_write_kreg(dd, kr_scratch, 0);
2584 case QIB_IB_CFG_LSTATE: /* set the IB link state */
2585 switch (val & 0xffff0000) {
2586 case IB_LINKCMD_DOWN:
2587 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2588 if (!ppd->cpspec->ibdeltainprog &&
2589 qib_compat_ddr_negotiate) {
2590 ppd->cpspec->ibdeltainprog = 1;
2591 ppd->cpspec->ibsymsnap =
2592 read_7220_creg32(dd, cr_ibsymbolerr);
2593 ppd->cpspec->iblnkerrsnap =
2594 read_7220_creg32(dd, cr_iblinkerrrecov);
2598 case IB_LINKCMD_ARMED:
2599 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2602 case IB_LINKCMD_ACTIVE:
2603 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2608 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2611 switch (val & 0xffff) {
2612 case IB_LINKINITCMD_NOP:
2616 case IB_LINKINITCMD_POLL:
2617 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2620 case IB_LINKINITCMD_SLEEP:
2621 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2624 case IB_LINKINITCMD_DISABLE:
2625 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2626 ppd->cpspec->chase_end = 0;
2628 * stop state chase counter and timer, if running.
2629 * wait forpending timer, but don't clear .data (ppd)!
2631 if (ppd->cpspec->chase_timer.expires) {
2632 del_timer_sync(&ppd->cpspec->chase_timer);
2633 ppd->cpspec->chase_timer.expires = 0;
2639 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2643 qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2645 maskr = IBA7220_IBC_WIDTH_MASK;
2646 lsb = IBA7220_IBC_WIDTH_SHIFT;
2647 tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
2648 /* If the width active on the chip does not match the
2649 * width in the shadow register, write the new active
2650 * width to the chip.
2651 * We don't have to worry about speed as the speed is taken
2652 * care of by set_7220_ibspeed_fast called by ib_updown.
2654 if (ppd->link_width_enabled-1 != tmp) {
2655 ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2656 ppd->cpspec->ibcddrctrl |=
2657 (((u64)(ppd->link_width_enabled-1) & maskr) <<
2659 qib_write_kreg(dd, kr_ibcddrctrl,
2660 ppd->cpspec->ibcddrctrl);
2661 qib_write_kreg(dd, kr_scratch, 0);
2662 spin_lock_irqsave(&ppd->lflags_lock, flags);
2663 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2664 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2668 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2669 if (val > IBA7220_IBC_HRTBT_MASK) {
2673 lsb = IBA7220_IBC_HRTBT_SHIFT;
2674 maskr = IBA7220_IBC_HRTBT_MASK;
2681 ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2682 ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
2683 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
2684 qib_write_kreg(dd, kr_scratch, 0);
2686 spin_lock_irqsave(&ppd->lflags_lock, flags);
2687 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2688 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2694 static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2699 if (!strncmp(what, "ibc", 3)) {
2700 ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2701 val = 0; /* disable heart beat, so link will come up */
2702 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2703 ppd->dd->unit, ppd->port);
2704 } else if (!strncmp(what, "off", 3)) {
2705 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2706 /* enable heart beat again */
2707 val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
2708 qib_devinfo(ppd->dd->pcidev,
2709 "Disabling IB%u:%u IBC loopback (normal)\n",
2710 ppd->dd->unit, ppd->port);
2714 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2715 ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
2716 << IBA7220_IBC_HRTBT_SHIFT);
2717 ppd->cpspec->ibcddrctrl = ddr | val;
2718 qib_write_kreg(ppd->dd, kr_ibcddrctrl,
2719 ppd->cpspec->ibcddrctrl);
2720 qib_write_kreg(ppd->dd, kr_scratch, 0);
2725 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2726 u32 updegr, u32 egrhd, u32 npkts)
2729 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2731 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2735 static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
2739 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2740 if (rcd->rcvhdrtail_kvaddr)
2741 tail = qib_get_rcvhdrtail(rcd);
2743 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2744 return head == tail;
2748 * Modify the RCVCTRL register in chip-specific way. This
2749 * is a function because bit positions and (future) register
2750 * location is chip-specifc, but the needed operations are
2751 * generic. <op> is a bit-mask because we often want to
2752 * do multiple modifications.
2754 static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
2757 struct qib_devdata *dd = ppd->dd;
2759 unsigned long flags;
2761 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2762 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2763 dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
2764 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2765 dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
2766 if (op & QIB_RCVCTRL_PKEY_ENB)
2767 dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2768 if (op & QIB_RCVCTRL_PKEY_DIS)
2769 dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2771 mask = (1ULL << dd->ctxtcnt) - 1;
2773 mask = (1ULL << ctxt);
2774 if (op & QIB_RCVCTRL_CTXT_ENB) {
2775 /* always done for specific ctxt */
2776 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2777 if (!(dd->flags & QIB_NODMA_RTAIL))
2778 dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
2779 /* Write these registers before the context is enabled. */
2780 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2781 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2782 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2783 dd->rcd[ctxt]->rcvhdrq_phys);
2784 dd->rcd[ctxt]->seq_cnt = 1;
2786 if (op & QIB_RCVCTRL_CTXT_DIS)
2787 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2788 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2789 dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
2790 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2791 dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
2792 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2793 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2794 /* arm rcv interrupt */
2795 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2796 dd->rhdrhead_intr_off;
2797 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2799 if (op & QIB_RCVCTRL_CTXT_ENB) {
2801 * Init the context registers also; if we were
2802 * disabled, tail and head should both be zero
2803 * already from the enable, but since we don't
2804 * know, we have to do it explicitly.
2806 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2807 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2809 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2810 dd->rcd[ctxt]->head = val;
2811 /* If kctxt, interrupt on next receive. */
2812 if (ctxt < dd->first_user_ctxt)
2813 val |= dd->rhdrhead_intr_off;
2814 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2816 if (op & QIB_RCVCTRL_CTXT_DIS) {
2818 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
2819 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
2823 for (i = 0; i < dd->cfgctxts; i++) {
2824 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2826 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
2830 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2834 * Modify the SENDCTRL register in chip-specific way. This
2835 * is a function there may be multiple such registers with
2836 * slightly different layouts. To start, we assume the
2837 * "canonical" register layout of the first chips.
2838 * Chip requires no back-back sendctrl writes, so write
2839 * scratch register after writing sendctrl
2841 static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
2843 struct qib_devdata *dd = ppd->dd;
2844 u64 tmp_dd_sendctrl;
2845 unsigned long flags;
2847 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2849 /* First the ones that are "sticky", saved in shadow */
2850 if (op & QIB_SENDCTRL_CLEAR)
2852 if (op & QIB_SENDCTRL_SEND_DIS)
2853 dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
2854 else if (op & QIB_SENDCTRL_SEND_ENB) {
2855 dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
2856 if (dd->flags & QIB_USE_SPCL_TRIG)
2857 dd->sendctrl |= SYM_MASK(SendCtrl,
2860 if (op & QIB_SENDCTRL_AVAIL_DIS)
2861 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2862 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2863 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
2865 if (op & QIB_SENDCTRL_DISARM_ALL) {
2868 tmp_dd_sendctrl = dd->sendctrl;
2870 * disarm any that are not yet launched, disabling sends
2871 * and updates until done.
2873 last = dd->piobcnt2k + dd->piobcnt4k;
2875 ~(SYM_MASK(SendCtrl, SPioEnable) |
2876 SYM_MASK(SendCtrl, SendBufAvailUpd));
2877 for (i = 0; i < last; i++) {
2878 qib_write_kreg(dd, kr_sendctrl,
2880 SYM_MASK(SendCtrl, Disarm) | i);
2881 qib_write_kreg(dd, kr_scratch, 0);
2885 tmp_dd_sendctrl = dd->sendctrl;
2887 if (op & QIB_SENDCTRL_FLUSH)
2888 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2889 if (op & QIB_SENDCTRL_DISARM)
2890 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2891 ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
2892 SYM_LSB(SendCtrl, DisarmPIOBuf));
2893 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
2894 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
2895 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2897 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2898 qib_write_kreg(dd, kr_scratch, 0);
2900 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2901 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2902 qib_write_kreg(dd, kr_scratch, 0);
2905 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2907 if (op & QIB_SENDCTRL_FLUSH) {
2910 * ensure writes have hit chip, then do a few
2911 * more reads, to allow DMA of pioavail registers
2912 * to occur, so in-memory copy is in sync with
2913 * the chip. Not always safe to sleep.
2915 v = qib_read_kreg32(dd, kr_scratch);
2916 qib_write_kreg(dd, kr_scratch, v);
2917 v = qib_read_kreg32(dd, kr_scratch);
2918 qib_write_kreg(dd, kr_scratch, v);
2919 qib_read_kreg32(dd, kr_scratch);
2924 * qib_portcntr_7220 - read a per-port counter
2925 * @dd: the qlogic_ib device
2926 * @creg: the counter to snapshot
2928 static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
2931 struct qib_devdata *dd = ppd->dd;
2933 /* 0xffff for unimplemented or synthesized counters */
2934 static const u16 xlator[] = {
2935 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2936 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2937 [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
2938 [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
2939 [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
2940 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2941 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2942 [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
2943 [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
2944 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2945 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2946 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2947 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2948 [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
2949 [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
2950 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2951 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2952 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2953 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2954 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2955 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2956 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2957 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2958 [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
2959 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2960 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2961 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2962 [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
2963 [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
2964 [QIBPORTCNTR_PSSTART] = cr_psstart,
2965 [QIBPORTCNTR_PSSTAT] = cr_psstat,
2966 [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
2967 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2968 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2971 if (reg >= ARRAY_SIZE(xlator)) {
2972 qib_devinfo(ppd->dd->pcidev,
2973 "Unimplemented portcounter %u\n", reg);
2978 if (reg == QIBPORTCNTR_KHDROVFL) {
2981 /* sum over all kernel contexts */
2982 for (i = 0; i < dd->first_user_ctxt; i++)
2983 ret += read_7220_creg32(dd, cr_portovfl + i);
2989 * only fast incrementing counters are 64bit; use 32 bit reads to
2990 * avoid two independent reads when on opteron
2992 if ((creg == cr_wordsend || creg == cr_wordrcv ||
2993 creg == cr_pktsend || creg == cr_pktrcv))
2994 ret = read_7220_creg(dd, creg);
2996 ret = read_7220_creg32(dd, creg);
2997 if (creg == cr_ibsymbolerr) {
2998 if (dd->pport->cpspec->ibdeltainprog)
2999 ret -= ret - ppd->cpspec->ibsymsnap;
3000 ret -= dd->pport->cpspec->ibsymdelta;
3001 } else if (creg == cr_iblinkerrrecov) {
3002 if (dd->pport->cpspec->ibdeltainprog)
3003 ret -= ret - ppd->cpspec->iblnkerrsnap;
3004 ret -= dd->pport->cpspec->iblnkerrdelta;
3011 * Device counter names (not port-specific), one line per stat,
3012 * single string. Used by utilities like ipathstats to print the stats
3013 * in a way which works for different versions of drivers, without changing
3014 * the utility. Names need to be 12 chars or less (w/o newline), for proper
3015 * display by utility.
3016 * Non-error counters are first.
3017 * Start of "error" conters is indicated by a leading "E " on the first
3018 * "error" counter, and doesn't count in label length.
3019 * The EgrOvfl list needs to be last so we truncate them at the configured
3020 * context count for the device.
3021 * cntr7220indices contains the corresponding register indices.
3023 static const char cntr7220names[] =
3046 static const size_t cntr7220indices[] = {
3071 * same as cntr7220names and cntr7220indices, but for port-specific counters.
3072 * portcntr7220indices is somewhat complicated by some registers needing
3073 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
3075 static const char portcntr7220names[] =
3083 "TxDmaDesc\n" /* 7220 and 7322-only */
3084 "E RxDlidFltr\n" /* 7220 and 7322-only */
3107 "RxLclPhyErr\n" /* 7220 and 7322-only */
3108 "RxVL15Drop\n" /* 7220 and 7322-only */
3109 "RxVlErr\n" /* 7220 and 7322-only */
3110 "XcessBufOvfl\n" /* 7220 and 7322-only */
3113 #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
3114 static const size_t portcntr7220indices[] = {
3115 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
3117 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
3118 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
3120 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
3121 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
3125 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
3126 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
3127 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
3128 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
3129 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
3130 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
3131 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
3132 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
3133 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
3135 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
3136 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
3137 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
3138 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
3139 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
3140 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
3146 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
3147 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
3148 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
3149 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
3152 /* do all the setup to make the counter reads efficient later */
3153 static void init_7220_cntrnames(struct qib_devdata *dd)
3158 for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
3160 /* we always have at least one counter before the egrovfl */
3161 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
3163 s = strchr(s + 1, '\n');
3167 dd->cspec->ncntrs = i;
3169 /* full list; size is without terminating null */
3170 dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
3172 dd->cspec->cntrnamelen = 1 + s - cntr7220names;
3173 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
3174 * sizeof(u64), GFP_KERNEL);
3176 for (i = 0, s = (char *)portcntr7220names; s; i++)
3177 s = strchr(s + 1, '\n');
3178 dd->cspec->nportcntrs = i - 1;
3179 dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
3180 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
3181 * sizeof(u64), GFP_KERNEL);
3184 static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
3189 if (!dd->cspec->cntrs) {
3195 *namep = (char *)cntr7220names;
3196 ret = dd->cspec->cntrnamelen;
3198 ret = 0; /* final read after getting everything */
3200 u64 *cntr = dd->cspec->cntrs;
3203 ret = dd->cspec->ncntrs * sizeof(u64);
3204 if (!cntr || pos >= ret) {
3205 /* everything read, or couldn't get memory */
3211 for (i = 0; i < dd->cspec->ncntrs; i++)
3212 *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
3218 static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
3219 char **namep, u64 **cntrp)
3223 if (!dd->cspec->portcntrs) {
3228 *namep = (char *)portcntr7220names;
3229 ret = dd->cspec->portcntrnamelen;
3231 ret = 0; /* final read after getting everything */
3233 u64 *cntr = dd->cspec->portcntrs;
3234 struct qib_pportdata *ppd = &dd->pport[port];
3237 ret = dd->cspec->nportcntrs * sizeof(u64);
3238 if (!cntr || pos >= ret) {
3239 /* everything read, or couldn't get memory */
3244 for (i = 0; i < dd->cspec->nportcntrs; i++) {
3245 if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
3246 *cntr++ = qib_portcntr_7220(ppd,
3247 portcntr7220indices[i] &
3250 *cntr++ = read_7220_creg32(dd,
3251 portcntr7220indices[i]);
3259 * qib_get_7220_faststats - get word counters from chip before they overflow
3260 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
3262 * This needs more work; in particular, decision on whether we really
3263 * need traffic_wds done the way it is
3264 * called from add_timer
3266 static void qib_get_7220_faststats(unsigned long opaque)
3268 struct qib_devdata *dd = (struct qib_devdata *) opaque;
3269 struct qib_pportdata *ppd = dd->pport;
3270 unsigned long flags;
3274 * don't access the chip while running diags, or memory diags can
3277 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
3278 /* but re-arm the timer, for diags case; won't hurt other */
3282 * We now try to maintain an activity timer, based on traffic
3283 * exceeding a threshold, so we need to check the word-counts
3284 * even if they are 64-bit.
3286 traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
3287 qib_portcntr_7220(ppd, cr_wordrcv);
3288 spin_lock_irqsave(&dd->eep_st_lock, flags);
3289 traffic_wds -= dd->traffic_wds;
3290 dd->traffic_wds += traffic_wds;
3291 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3293 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
3297 * If we are using MSI, try to fallback to INTx.
3299 static int qib_7220_intr_fallback(struct qib_devdata *dd)
3304 qib_devinfo(dd->pcidev,
3305 "MSI interrupt not detected, trying INTx interrupts\n");
3306 qib_7220_free_irq(dd);
3307 qib_enable_intx(dd);
3309 * Some newer kernels require free_irq before disable_msi,
3310 * and irq can be changed during disable and INTx enable
3311 * and we need to therefore use the pcidev->irq value,
3312 * not our saved MSI value.
3314 dd->cspec->irq = dd->pcidev->irq;
3315 qib_setup_7220_interrupt(dd);
3320 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
3321 * than resetting the IBC or external link state, and useful in some
3322 * cases to cause some retraining. To do this right, we reset IBC
3325 static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
3328 struct qib_devdata *dd = ppd->dd;
3330 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
3331 val = prev_val | QLOGIC_IB_XGXS_RESET;
3332 prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
3333 qib_write_kreg(dd, kr_control,
3334 dd->control & ~QLOGIC_IB_C_LINKENABLE);
3335 qib_write_kreg(dd, kr_xgxs_cfg, val);
3336 qib_read_kreg32(dd, kr_scratch);
3337 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
3338 qib_write_kreg(dd, kr_control, dd->control);
3342 * For this chip, we want to use the same buffer every time
3343 * when we are trying to bring the link up (they are always VL15
3344 * packets). At that link state the packet should always go out immediately
3345 * (or at least be discarded at the tx interface if the link is down).
3346 * If it doesn't, and the buffer isn't available, that means some other
3347 * sender has gotten ahead of us, and is preventing our packet from going
3348 * out. In that case, we flush all packets, and try again. If that still
3349 * fails, we fail the request, and hope things work the next time around.
3351 * We don't need very complicated heuristics on whether the packet had
3352 * time to go out or not, since even at SDR 1X, it goes out in very short
3353 * time periods, covered by the chip reads done here and as part of the
3356 static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3359 u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
3361 unsigned long flags;
3364 * always blip to get avail list updated, since it's almost
3365 * always needed, and is fairly cheap.
3367 sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3368 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3369 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3373 spin_lock_irqsave(&ppd->sdma_lock, flags);
3374 if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
3375 ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
3376 __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
3380 qib_7220_sdma_hw_clean_up(ppd);
3382 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3385 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3386 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3393 * This code for non-IBTA-compliant IB speed negotiation is only known to
3394 * work for the SDR to DDR transition, and only between an HCA and a switch
3395 * with recent firmware. It is based on observed heuristics, rather than
3396 * actual knowledge of the non-compliant speed negotiation.
3397 * It has a number of hard-coded fields, since the hope is to rewrite this
3398 * when a spec is available on how the negoation is intended to work.
3400 static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
3401 u32 dcnt, u32 *data)
3405 u32 __iomem *piobuf;
3407 struct qib_devdata *dd = ppd->dd;
3410 pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
3411 pbc |= PBC_7220_VL15_SEND;
3412 while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
3417 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
3418 writeq(pbc, piobuf);
3420 qib_pio_copy(piobuf + 2, hdr, 7);
3421 qib_pio_copy(piobuf + 9, data, dcnt);
3422 if (dd->flags & QIB_USE_SPCL_TRIG) {
3423 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
3426 __raw_writel(0xaebecede, piobuf + spcl_off);
3429 qib_sendbuf_done(dd, pnum);
3433 * _start packet gets sent twice at start, _done gets sent twice at end
3435 static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
3437 struct qib_devdata *dd = ppd->dd;
3439 u32 dw, i, hcnt, dcnt, *data;
3440 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
3441 static u32 madpayload_start[0x40] = {
3442 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3443 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3444 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
3446 static u32 madpayload_done[0x40] = {
3447 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3448 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3449 0x40000001, 0x1388, 0x15e, /* rest 0's */
3452 dcnt = ARRAY_SIZE(madpayload_start);
3453 hcnt = ARRAY_SIZE(hdr);
3455 /* for maintainability, do it at runtime */
3456 for (i = 0; i < hcnt; i++) {
3457 dw = (__force u32) cpu_to_be32(hdr[i]);
3460 for (i = 0; i < dcnt; i++) {
3461 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
3462 madpayload_start[i] = dw;
3463 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
3464 madpayload_done[i] = dw;
3469 data = which ? madpayload_done : madpayload_start;
3471 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3472 qib_read_kreg64(dd, kr_scratch);
3474 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3475 qib_read_kreg64(dd, kr_scratch);
3480 * Do the absolute minimum to cause an IB speed change, and make it
3481 * ready, but don't actually trigger the change. The caller will
3482 * do that when ready (if link is in Polling training state, it will
3483 * happen immediately, otherwise when link next goes down)
3485 * This routine should only be used as part of the DDR autonegotation
3486 * code for devices that are not compliant with IB 1.2 (or code that
3487 * fixes things up for same).
3489 * When link has gone down, and autoneg enabled, or autoneg has
3490 * failed and we give up until next time we set both speeds, and
3491 * then we want IBTA enabled as well as "use max enabled speed.
3493 static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
3495 ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
3496 IBA7220_IBC_IBTA_1_2_MASK);
3498 if (speed == (QIB_IB_SDR | QIB_IB_DDR))
3499 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
3500 IBA7220_IBC_IBTA_1_2_MASK;
3502 ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
3503 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
3505 qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
3506 qib_write_kreg(ppd->dd, kr_scratch, 0);
3510 * This routine is only used when we are not talking to another
3511 * IB 1.2-compliant device that we think can do DDR.
3512 * (This includes all existing switch chips as of Oct 2007.)
3513 * 1.2-compliant devices go directly to DDR prior to reaching INIT
3515 static void try_7220_autoneg(struct qib_pportdata *ppd)
3517 unsigned long flags;
3520 * Required for older non-IB1.2 DDR switches. Newer
3521 * non-IB-compliant switches don't need it, but so far,
3522 * aren't bothered by it either. "Magic constant"
3524 qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
3526 spin_lock_irqsave(&ppd->lflags_lock, flags);
3527 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
3528 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3529 autoneg_7220_send(ppd, 0);
3530 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3532 toggle_7220_rclkrls(ppd->dd);
3533 /* 2 msec is minimum length of a poll cycle */
3534 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
3535 msecs_to_jiffies(2));
3539 * Handle the empirically determined mechanism for auto-negotiation
3540 * of DDR speed with switches.
3542 static void autoneg_7220_work(struct work_struct *work)
3544 struct qib_pportdata *ppd;
3545 struct qib_devdata *dd;
3548 unsigned long flags;
3550 ppd = &container_of(work, struct qib_chippport_specific,
3551 autoneg_work.work)->pportdata;
3554 startms = jiffies_to_msecs(jiffies);
3557 * Busy wait for this first part, it should be at most a
3558 * few hundred usec, since we scheduled ourselves for 2msec.
3560 for (i = 0; i < 25; i++) {
3561 if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
3562 == IB_7220_LT_STATE_POLLQUIET) {
3563 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
3569 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3570 goto done; /* we got there early or told to stop */
3572 /* we expect this to timeout */
3573 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3574 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3575 msecs_to_jiffies(90)))
3578 toggle_7220_rclkrls(dd);
3580 /* we expect this to timeout */
3581 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3582 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3583 msecs_to_jiffies(1700)))
3586 set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
3587 toggle_7220_rclkrls(dd);
3590 * Wait up to 250 msec for link to train and get to INIT at DDR;
3591 * this should terminate early.
3593 wait_event_timeout(ppd->cpspec->autoneg_wait,
3594 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3595 msecs_to_jiffies(250));
3597 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
3598 spin_lock_irqsave(&ppd->lflags_lock, flags);
3599 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
3600 if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
3601 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
3602 dd->cspec->autoneg_tries = 0;
3604 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3605 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3609 static u32 qib_7220_iblink_state(u64 ibcs)
3611 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
3614 case IB_7220_L_STATE_INIT:
3615 state = IB_PORT_INIT;
3617 case IB_7220_L_STATE_ARM:
3618 state = IB_PORT_ARMED;
3620 case IB_7220_L_STATE_ACTIVE:
3622 case IB_7220_L_STATE_ACT_DEFER:
3623 state = IB_PORT_ACTIVE;
3625 default: /* fall through */
3626 case IB_7220_L_STATE_DOWN:
3627 state = IB_PORT_DOWN;
3633 /* returns the IBTA port state, rather than the IBC link training state */
3634 static u8 qib_7220_phys_portstate(u64 ibcs)
3636 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3637 return qib_7220_physportstate[state];
3640 static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3642 int ret = 0, symadj = 0;
3643 struct qib_devdata *dd = ppd->dd;
3644 unsigned long flags;
3646 spin_lock_irqsave(&ppd->lflags_lock, flags);
3647 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3648 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3652 * When the link goes down we don't want AEQ running, so it
3653 * won't interfere with IBC training, etc., and we need
3654 * to go back to the static SerDes preset values.
3656 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3657 QIBL_IB_AUTONEG_INPROG)))
3658 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3659 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3660 qib_sd7220_presets(dd);
3661 qib_cancel_sends(ppd); /* initial disarm, etc. */
3662 spin_lock_irqsave(&ppd->sdma_lock, flags);
3663 if (__qib_sdma_running(ppd))
3664 __qib_sdma_process_event(ppd,
3665 qib_sdma_event_e70_go_idle);
3666 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3668 /* this might better in qib_sd7220_presets() */
3669 set_7220_relock_poll(dd, ibup);
3671 if (qib_compat_ddr_negotiate &&
3672 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3673 QIBL_IB_AUTONEG_INPROG)) &&
3674 ppd->link_speed_active == QIB_IB_SDR &&
3675 (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
3676 (QIB_IB_DDR | QIB_IB_SDR) &&
3677 dd->cspec->autoneg_tries < AUTONEG_TRIES) {
3678 /* we are SDR, and DDR auto-negotiation enabled */
3679 ++dd->cspec->autoneg_tries;
3680 if (!ppd->cpspec->ibdeltainprog) {
3681 ppd->cpspec->ibdeltainprog = 1;
3682 ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
3684 ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
3687 try_7220_autoneg(ppd);
3688 ret = 1; /* no other IB status change processing */
3689 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3690 ppd->link_speed_active == QIB_IB_SDR) {
3691 autoneg_7220_send(ppd, 1);
3692 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3694 toggle_7220_rclkrls(dd);
3695 ret = 1; /* no other IB status change processing */
3697 if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3698 (ppd->link_speed_active & QIB_IB_DDR)) {
3699 spin_lock_irqsave(&ppd->lflags_lock, flags);
3700 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
3701 QIBL_IB_AUTONEG_FAILED);
3702 spin_unlock_irqrestore(&ppd->lflags_lock,
3704 dd->cspec->autoneg_tries = 0;
3705 /* re-enable SDR, for next link down */
3706 set_7220_ibspeed_fast(ppd,
3707 ppd->link_speed_enabled);
3708 wake_up(&ppd->cpspec->autoneg_wait);
3710 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
3712 * Clear autoneg failure flag, and do setup
3713 * so we'll try next time link goes down and
3714 * back to INIT (possibly connected to a
3715 * different device).
3717 spin_lock_irqsave(&ppd->lflags_lock, flags);
3718 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3719 spin_unlock_irqrestore(&ppd->lflags_lock,
3721 ppd->cpspec->ibcddrctrl |=
3722 IBA7220_IBC_IBTA_1_2_MASK;
3723 qib_write_kreg(dd, kr_ncmodectrl, 0);
3728 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3732 ppd->delay_mult = rate_to_delay
3733 [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
3734 [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
3736 set_7220_relock_poll(dd, ibup);
3737 spin_lock_irqsave(&ppd->sdma_lock, flags);
3739 * Unlike 7322, the 7220 needs this, due to lack of
3740 * interrupt in some cases when we have sdma active
3741 * when the link goes down.
3743 if (ppd->sdma_state.current_state !=
3744 qib_sdma_state_s20_idle)
3745 __qib_sdma_process_event(ppd,
3746 qib_sdma_event_e00_go_hw_down);
3747 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3752 if (ppd->cpspec->ibdeltainprog) {
3753 ppd->cpspec->ibdeltainprog = 0;
3754 ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
3755 cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
3756 ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
3757 cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
3759 } else if (!ibup && qib_compat_ddr_negotiate &&
3760 !ppd->cpspec->ibdeltainprog &&
3761 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3762 ppd->cpspec->ibdeltainprog = 1;
3763 ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
3765 ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
3770 qib_setup_7220_setextled(ppd, ibup);
3775 * Does read/modify/write to appropriate registers to
3776 * set output and direction bits selected by mask.
3777 * these are in their canonical postions (e.g. lsb of
3778 * dir will end up in D48 of extctrl on existing chips).
3779 * returns contents of GP Inputs.
3781 static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3783 u64 read_val, new_out;
3784 unsigned long flags;
3787 /* some bits being written, lock access to GPIO */
3790 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3791 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3792 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3793 new_out = (dd->cspec->gpio_out & ~mask) | out;
3795 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3796 qib_write_kreg(dd, kr_gpio_out, new_out);
3797 dd->cspec->gpio_out = new_out;
3798 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3801 * It is unlikely that a read at this time would get valid
3802 * data on a pin whose direction line was set in the same
3803 * call to this function. We include the read here because
3804 * that allows us to potentially combine a change on one pin with
3805 * a read on another, and because the old code did something like
3808 read_val = qib_read_kreg64(dd, kr_extstatus);
3809 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3813 * Read fundamental info we need to use the chip. These are
3814 * the registers that describe chip capabilities, and are
3815 * saved in shadow registers.
3817 static void get_7220_chip_params(struct qib_devdata *dd)
3823 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3825 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3826 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3827 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3828 dd->palign = qib_read_kreg32(dd, kr_palign);
3829 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3830 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3832 val = qib_read_kreg64(dd, kr_sendpiosize);
3833 dd->piosize2k = val & ~0U;
3834 dd->piosize4k = val >> 32;
3836 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3838 mtu = QIB_DEFAULT_MTU;
3839 dd->pport->ibmtu = (u32)mtu;
3841 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3842 dd->piobcnt2k = val & ~0U;
3843 dd->piobcnt4k = val >> 32;
3844 /* these may be adjusted in init_chip_wc_pat() */
3845 dd->pio2kbase = (u32 __iomem *)
3846 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
3847 if (dd->piobcnt4k) {
3848 dd->pio4kbase = (u32 __iomem *)
3849 ((char __iomem *) dd->kregbase +
3850 (dd->piobufbase >> 32));
3852 * 4K buffers take 2 pages; we use roundup just to be
3853 * paranoid; we calculate it once here, rather than on
3856 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3859 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3861 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3862 (sizeof(u64) * BITS_PER_BYTE / 2);
3866 * The chip base addresses in cspec and cpspec have to be set
3867 * after possible init_chip_wc_pat(), rather than in
3868 * qib_get_7220_chip_params(), so split out as separate function
3870 static void set_7220_baseaddrs(struct qib_devdata *dd)
3873 /* init after possible re-map in init_chip_wc_pat() */
3874 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3875 dd->cspec->cregbase = (u64 __iomem *)
3876 ((char __iomem *) dd->kregbase + cregbase);
3878 dd->egrtidbase = (u64 __iomem *)
3879 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3883 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
3884 SYM_MASK(SendCtrl, SPioEnable) | \
3885 SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
3886 SYM_MASK(SendCtrl, SendBufAvailUpd) | \
3887 SYM_MASK(SendCtrl, AvailUpdThld) | \
3888 SYM_MASK(SendCtrl, SDmaEnable) | \
3889 SYM_MASK(SendCtrl, SDmaIntEnable) | \
3890 SYM_MASK(SendCtrl, SDmaHalt) | \
3891 SYM_MASK(SendCtrl, SDmaSingleDescriptor))
3893 static int sendctrl_hook(struct qib_devdata *dd,
3894 const struct diag_observer *op,
3895 u32 offs, u64 *data, u64 mask, int only_32)
3897 unsigned long flags;
3898 unsigned idx = offs / sizeof(u64);
3899 u64 local_data, all_bits;
3901 if (idx != kr_sendctrl) {
3902 qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
3903 offs, only_32 ? "32" : "64");
3910 spin_lock_irqsave(&dd->sendctrl_lock, flags);
3911 if ((mask & all_bits) != all_bits) {
3913 * At least some mask bits are zero, so we need
3914 * to read. The judgement call is whether from
3915 * reg or shadow. First-cut: read reg, and complain
3916 * if any bits which should be shadowed are different
3917 * from their shadowed value.
3920 local_data = (u64)qib_read_kreg32(dd, idx);
3922 local_data = qib_read_kreg64(dd, idx);
3923 qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
3924 (u32)local_data, (u32)dd->sendctrl);
3925 if ((local_data & SENDCTRL_SHADOWED) !=
3926 (dd->sendctrl & SENDCTRL_SHADOWED))
3927 qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
3928 (u32)local_data, (u32) dd->sendctrl);
3929 *data = (local_data & ~mask) | (*data & mask);
3933 * At least some mask bits are one, so we need
3934 * to write, but only shadow some bits.
3936 u64 sval, tval; /* Shadowed, transient */
3939 * New shadow val is bits we don't want to touch,
3940 * ORed with bits we do, that are intended for shadow.
3942 sval = (dd->sendctrl & ~mask);
3943 sval |= *data & SENDCTRL_SHADOWED & mask;
3944 dd->sendctrl = sval;
3945 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
3946 qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
3947 (u32)tval, (u32)sval);
3948 qib_write_kreg(dd, kr_sendctrl, tval);
3949 qib_write_kreg(dd, kr_scratch, 0Ull);
3951 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3953 return only_32 ? 4 : 8;
3956 static const struct diag_observer sendctrl_observer = {
3957 sendctrl_hook, kr_sendctrl * sizeof(u64),
3958 kr_sendctrl * sizeof(u64)
3962 * write the final few registers that depend on some of the
3963 * init setup. Done late in init, just before bringing up
3966 static int qib_late_7220_initreg(struct qib_devdata *dd)
3971 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3972 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3973 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3974 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3975 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3976 if (val != dd->pioavailregs_phys) {
3978 "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
3979 (unsigned long) dd->pioavailregs_phys,
3980 (unsigned long long) val);
3983 qib_register_observer(dd, &sendctrl_observer);
3987 static int qib_init_7220_variables(struct qib_devdata *dd)
3989 struct qib_chippport_specific *cpspec;
3990 struct qib_pportdata *ppd;
3992 u32 sbufs, updthresh;
3994 cpspec = (struct qib_chippport_specific *)(dd + 1);
3995 ppd = &cpspec->pportdata;
3999 dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
4000 ppd->cpspec = cpspec;
4002 spin_lock_init(&dd->cspec->sdepb_lock);
4003 spin_lock_init(&dd->cspec->rcvmod_lock);
4004 spin_lock_init(&dd->cspec->gpio_lock);
4006 /* we haven't yet set QIB_PRESENT, so use read directly */
4007 dd->revision = readq(&dd->kregbase[kr_revision]);
4009 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
4011 "Revision register read failure, giving up initialization\n");
4015 dd->flags |= QIB_PRESENT; /* now register routines work */
4017 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4019 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4022 get_7220_chip_params(dd);
4023 qib_7220_boardname(dd);
4026 * GPIO bits for TWSI data and clock,
4027 * used for serial EEPROM.
4029 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
4030 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
4031 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
4033 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
4034 QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
4035 dd->flags |= qib_special_trigger ?
4036 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
4039 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
4040 * 2 is Some Misc, 3 is reserved for future.
4042 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
4044 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
4046 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
4048 init_waitqueue_head(&cpspec->autoneg_wait);
4049 INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
4051 ret = qib_init_pportdata(ppd, dd, 0, 1);
4054 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
4055 ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
4057 ppd->link_width_enabled = ppd->link_width_supported;
4058 ppd->link_speed_enabled = ppd->link_speed_supported;
4060 * Set the initial values to reasonable default, will be set
4061 * for real when link is up.
4063 ppd->link_width_active = IB_WIDTH_4X;
4064 ppd->link_speed_active = QIB_IB_SDR;
4065 ppd->delay_mult = rate_to_delay[0][1];
4066 ppd->vls_supported = IB_VL_VL0;
4067 ppd->vls_operational = ppd->vls_supported;
4070 qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
4072 setup_timer(&ppd->cpspec->chase_timer, reenable_7220_chase,
4073 (unsigned long)ppd);
4075 qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
4077 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
4078 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
4080 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
4082 /* we always allocate at least 2048 bytes for eager buffers */
4083 ret = ib_mtu_enum_to_int(qib_ibmtu);
4084 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
4085 BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
4086 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
4088 qib_7220_tidtemplate(dd);
4091 * We can request a receive interrupt for 1 or
4092 * more packets from current offset. For now, we set this
4093 * up for a single packet.
4095 dd->rhdrhead_intr_off = 1ULL << 32;
4097 /* setup the stats timer; the add_timer is done at end of init */
4098 init_timer(&dd->stats_timer);
4099 dd->stats_timer.function = qib_get_7220_faststats;
4100 dd->stats_timer.data = (unsigned long) dd;
4101 dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
4104 * Control[4] has been added to change the arbitration within
4105 * the SDMA engine between favoring data fetches over descriptor
4106 * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
4108 if (qib_sdma_fetch_arb)
4109 dd->control |= 1 << 4;
4111 dd->ureg_align = 0x10000; /* 64KB alignment */
4113 dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
4114 qib_7220_config_ctxts(dd);
4115 qib_set_ctxtcnt(dd); /* needed for PAT setup */
4117 ret = init_chip_wc_pat(dd, 0);
4120 set_7220_baseaddrs(dd); /* set chip access pointers now */
4126 ret = qib_create_ctxts(dd);
4127 init_7220_cntrnames(dd);
4129 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
4130 * reserve the update threshold amount for other kernel use, such
4131 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
4132 * unless we aren't enabling SDMA, in which case we want to use
4133 * all the 4k bufs for the kernel.
4134 * if this was less than the update threshold, we could wait
4135 * a long time for an update. Coded this way because we
4136 * sometimes change the update threshold for various reasons,
4137 * and we want this to remain robust.
4139 updthresh = 8U; /* update threshold */
4140 if (dd->flags & QIB_HAS_SEND_DMA) {
4141 dd->cspec->sdmabufcnt = dd->piobcnt4k;
4142 sbufs = updthresh > 3 ? updthresh : 3;
4144 dd->cspec->sdmabufcnt = 0;
4145 sbufs = dd->piobcnt4k;
4148 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
4149 dd->cspec->sdmabufcnt;
4150 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
4151 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
4152 dd->last_pio = dd->cspec->lastbuf_for_pio;
4153 dd->pbufsctxt = dd->lastctxt_piobuf /
4154 (dd->cfgctxts - dd->first_user_ctxt);
4157 * if we are at 16 user contexts, we will have one 7 sbufs
4158 * per context, so drop the update threshold to match. We
4159 * want to update before we actually run out, at low pbufs/ctxt
4160 * so give ourselves some margin
4162 if ((dd->pbufsctxt - 2) < updthresh)
4163 updthresh = dd->pbufsctxt - 2;
4165 dd->cspec->updthresh_dflt = updthresh;
4166 dd->cspec->updthresh = updthresh;
4168 /* before full enable, no interrupts, no locking needed */
4169 dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
4170 << SYM_LSB(SendCtrl, AvailUpdThld);
4172 dd->psxmitwait_supported = 1;
4173 dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
4178 static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
4181 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
4182 struct qib_devdata *dd = ppd->dd;
4185 if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
4186 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
4187 buf = get_7220_link_buf(ppd, pbufnum);
4189 if ((plen + 1) > dd->piosize2kmax_dwords)
4190 first = dd->piobcnt2k;
4193 /* try 4k if all 2k busy, so same last for both sizes */
4194 last = dd->cspec->lastbuf_for_pio;
4195 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
4200 /* these 2 "counters" are really control registers, and are always RW */
4201 static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
4204 write_7220_creg(ppd->dd, cr_psinterval, intv);
4205 write_7220_creg(ppd->dd, cr_psstart, start);
4209 * NOTE: no real attempt is made to generalize the SDMA stuff.
4210 * At some point "soon" we will have a new more generalized
4211 * set of sdma interface, and then we'll clean this up.
4214 /* Must be called with sdma_lock held, or before init finished */
4215 static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
4217 /* Commit writes to memory and advance the tail on the chip */
4219 ppd->sdma_descq_tail = tail;
4220 qib_write_kreg(ppd->dd, kr_senddmatail, tail);
4223 static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
4227 static struct sdma_set_state_action sdma_7220_action_table[] = {
4228 [qib_sdma_state_s00_hw_down] = {
4232 .go_s99_running_tofalse = 1,
4234 [qib_sdma_state_s10_hw_start_up_wait] = {
4239 [qib_sdma_state_s20_idle] = {
4244 [qib_sdma_state_s30_sw_clean_up_wait] = {
4249 [qib_sdma_state_s40_hw_clean_up_wait] = {
4254 [qib_sdma_state_s50_hw_halt_wait] = {
4259 [qib_sdma_state_s99_running] = {
4263 .go_s99_running_totrue = 1,
4267 static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
4269 ppd->sdma_state.set_state_action = sdma_7220_action_table;
4272 static int init_sdma_7220_regs(struct qib_pportdata *ppd)
4274 struct qib_devdata *dd = ppd->dd;
4276 u64 senddmabufmask[3] = { 0 };
4278 /* Set SendDmaBase */
4279 qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
4280 qib_sdma_7220_setlengen(ppd);
4281 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
4282 /* Set SendDmaHeadAddr */
4283 qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
4286 * Reserve all the former "kernel" piobufs, using high number range
4287 * so we get as many 4K buffers as possible
4289 n = dd->piobcnt2k + dd->piobcnt4k;
4290 i = n - dd->cspec->sdmabufcnt;
4292 for (; i < n; ++i) {
4293 unsigned word = i / 64;
4294 unsigned bit = i & 63;
4297 senddmabufmask[word] |= 1ULL << bit;
4299 qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
4300 qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
4301 qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
4303 ppd->sdma_state.first_sendbuf = i;
4304 ppd->sdma_state.last_sendbuf = n;
4309 /* sdma_lock must be held */
4310 static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
4312 struct qib_devdata *dd = ppd->dd;
4320 use_dmahead = __qib_sdma_running(ppd) &&
4321 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
4323 hwhead = use_dmahead ?
4324 (u16)le64_to_cpu(*ppd->sdma_head_dma) :
4325 (u16)qib_read_kreg32(dd, kr_senddmahead);
4327 swhead = ppd->sdma_descq_head;
4328 swtail = ppd->sdma_descq_tail;
4329 cnt = ppd->sdma_descq_cnt;
4331 if (swhead < swtail) {
4333 sane = (hwhead >= swhead) & (hwhead <= swtail);
4334 } else if (swhead > swtail) {
4335 /* wrapped around */
4336 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
4340 sane = (hwhead == swhead);
4343 if (unlikely(!sane)) {
4345 /* try one more time, directly from the register */
4349 /* assume no progress */
4356 static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
4358 u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
4360 return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
4361 (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
4362 (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
4363 !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
4367 * Compute the amount of delay before sending the next packet if the
4368 * port's send rate differs from the static rate set for the QP.
4369 * Since the delay affects this packet but the amount of the delay is
4370 * based on the length of the previous packet, use the last delay computed
4371 * and save the delay count for this packet to be used next time
4374 static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
4377 u8 snd_mult = ppd->delay_mult;
4378 u8 rcv_mult = ib_rate_to_delay[srate];
4379 u32 ret = ppd->cpspec->last_delay_mult;
4381 ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
4382 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
4384 /* Indicate VL15, if necessary */
4386 ret |= PBC_7220_VL15_SEND_CTRL;
4390 static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
4394 static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
4397 rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
4398 rcd->rcvegr_tid_base = 0;
4400 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
4401 rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
4402 (rcd->ctxt - 1) * rcd->rcvegrcnt;
4406 static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
4407 u32 len, u32 which, struct qib_ctxtdata *rcd)
4410 unsigned long flags;
4413 case TXCHK_CHG_TYPE_KERN:
4414 /* see if we need to raise avail update threshold */
4415 spin_lock_irqsave(&dd->uctxt_lock, flags);
4416 for (i = dd->first_user_ctxt;
4417 dd->cspec->updthresh != dd->cspec->updthresh_dflt
4418 && i < dd->cfgctxts; i++)
4419 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
4420 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
4421 < dd->cspec->updthresh_dflt)
4423 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
4424 if (i == dd->cfgctxts) {
4425 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4426 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
4427 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4428 dd->sendctrl |= (dd->cspec->updthresh &
4429 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
4430 SYM_LSB(SendCtrl, AvailUpdThld);
4431 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4432 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4435 case TXCHK_CHG_TYPE_USER:
4436 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4437 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
4438 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
4439 dd->cspec->updthresh = (rcd->piocnt /
4440 rcd->subctxt_cnt) - 1;
4441 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4442 dd->sendctrl |= (dd->cspec->updthresh &
4443 SYM_RMASK(SendCtrl, AvailUpdThld))
4444 << SYM_LSB(SendCtrl, AvailUpdThld);
4445 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4446 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4448 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4453 static void writescratch(struct qib_devdata *dd, u32 val)
4455 qib_write_kreg(dd, kr_scratch, val);
4458 #define VALID_TS_RD_REG_MASK 0xBF
4460 * qib_7220_tempsense_read - read register of temp sensor via TWSI
4461 * @dd: the qlogic_ib device
4462 * @regnum: register to read from
4464 * returns reg contents (0..255) or < 0 for error
4466 static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
4476 /* return a bogus value for (the one) register we do not have */
4477 if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
4482 ret = mutex_lock_interruptible(&dd->eep_lock);
4486 ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
4490 mutex_unlock(&dd->eep_lock);
4493 * There are three possibilities here:
4494 * ret is actual value (0..255)
4495 * ret is -ENXIO or -EINVAL from twsi code or this file
4496 * ret is -EINTR from mutex_lock_interruptible.
4502 #ifdef CONFIG_INFINIBAND_QIB_DCA
4503 static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
4509 /* Dummy function, as 7220 boards never disable EEPROM Write */
4510 static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4516 * qib_init_iba7220_funcs - set up the chip-specific function pointers
4517 * @dev: the pci_dev for qlogic_ib device
4518 * @ent: pci_device_id struct for this dev
4520 * This is global, and is called directly at init to set up the
4521 * chip-specific function pointers for later use.
4523 struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4524 const struct pci_device_id *ent)
4526 struct qib_devdata *dd;
4528 u32 boardid, minwidth;
4530 dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
4531 sizeof(struct qib_chippport_specific));
4535 dd->f_bringup_serdes = qib_7220_bringup_serdes;
4536 dd->f_cleanup = qib_setup_7220_cleanup;
4537 dd->f_clear_tids = qib_7220_clear_tids;
4538 dd->f_free_irq = qib_7220_free_irq;
4539 dd->f_get_base_info = qib_7220_get_base_info;
4540 dd->f_get_msgheader = qib_7220_get_msgheader;
4541 dd->f_getsendbuf = qib_7220_getsendbuf;
4542 dd->f_gpio_mod = gpio_7220_mod;
4543 dd->f_eeprom_wen = qib_7220_eeprom_wen;
4544 dd->f_hdrqempty = qib_7220_hdrqempty;
4545 dd->f_ib_updown = qib_7220_ib_updown;
4546 dd->f_init_ctxt = qib_7220_init_ctxt;
4547 dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
4548 dd->f_intr_fallback = qib_7220_intr_fallback;
4549 dd->f_late_initreg = qib_late_7220_initreg;
4550 dd->f_setpbc_control = qib_7220_setpbc_control;
4551 dd->f_portcntr = qib_portcntr_7220;
4552 dd->f_put_tid = qib_7220_put_tid;
4553 dd->f_quiet_serdes = qib_7220_quiet_serdes;
4554 dd->f_rcvctrl = rcvctrl_7220_mod;
4555 dd->f_read_cntrs = qib_read_7220cntrs;
4556 dd->f_read_portcntrs = qib_read_7220portcntrs;
4557 dd->f_reset = qib_setup_7220_reset;
4558 dd->f_init_sdma_regs = init_sdma_7220_regs;
4559 dd->f_sdma_busy = qib_sdma_7220_busy;
4560 dd->f_sdma_gethead = qib_sdma_7220_gethead;
4561 dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
4562 dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
4563 dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
4564 dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
4565 dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
4566 dd->f_sdma_init_early = qib_7220_sdma_init_early;
4567 dd->f_sendctrl = sendctrl_7220_mod;
4568 dd->f_set_armlaunch = qib_set_7220_armlaunch;
4569 dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
4570 dd->f_iblink_state = qib_7220_iblink_state;
4571 dd->f_ibphys_portstate = qib_7220_phys_portstate;
4572 dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
4573 dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
4574 dd->f_set_ib_loopback = qib_7220_set_loopback;
4575 dd->f_set_intr_state = qib_7220_set_intr_state;
4576 dd->f_setextled = qib_setup_7220_setextled;
4577 dd->f_txchk_change = qib_7220_txchk_change;
4578 dd->f_update_usrhead = qib_update_7220_usrhead;
4579 dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
4580 dd->f_xgxs_reset = qib_7220_xgxs_reset;
4581 dd->f_writescratch = writescratch;
4582 dd->f_tempsense_rd = qib_7220_tempsense_rd;
4583 #ifdef CONFIG_INFINIBAND_QIB_DCA
4584 dd->f_notify_dca = qib_7220_notify_dca;
4587 * Do remaining pcie setup and save pcie values in dd.
4588 * Any error printing is already done by the init code.
4589 * On return, we have the chip mapped, but chip registers
4590 * are not set up until start of qib_init_7220_variables.
4592 ret = qib_pcie_ddinit(dd, pdev, ent);
4596 /* initialize chip-specific variables */
4597 ret = qib_init_7220_variables(dd);
4604 boardid = SYM_FIELD(dd->revision, Revision,
4611 minwidth = 16; /* x16 capable boards */
4614 minwidth = 8; /* x8 capable boards */
4617 if (qib_pcie_params(dd, minwidth, NULL))
4619 "Failed to setup PCIe or interrupts; continuing anyway\n");
4621 /* save IRQ for possible later use */
4622 dd->cspec->irq = pdev->irq;
4624 if (qib_read_kreg64(dd, kr_hwerrstatus) &
4625 QLOGIC_IB_HWE_SERDESPLLFAILED)
4626 qib_write_kreg(dd, kr_hwerrclear,
4627 QLOGIC_IB_HWE_SERDESPLLFAILED);
4629 /* setup interrupt handler (interrupt type handled above) */
4630 qib_setup_7220_interrupt(dd);
4631 qib_7220_init_hwerrors(dd);
4633 /* clear diagctrl register, in case diags were running and crashed */
4634 qib_write_kreg(dd, kr_hwdiagctrl, 0);
4639 qib_pcie_ddcleanup(dd);
4641 qib_free_devdata(dd);