1 /* bnx2.c: QLogic bnx2 network driver.
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Michael Chan (mchan@broadcom.com)
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
54 #if IS_ENABLED(CONFIG_CNIC)
61 #define DRV_MODULE_NAME "bnx2"
62 #define DRV_MODULE_VERSION "2.2.6"
63 #define DRV_MODULE_RELDATE "January 29, 2014"
64 #define FW_MIPS_FILE_06 "/*(DEBLOBBED)*/"
65 #define FW_RV2P_FILE_06 "/*(DEBLOBBED)*/"
66 #define FW_MIPS_FILE_09 "/*(DEBLOBBED)*/"
67 #define FW_RV2P_FILE_09_Ax "/*(DEBLOBBED)*/"
68 #define FW_RV2P_FILE_09 "/*(DEBLOBBED)*/"
70 #define RUN_AT(x) (jiffies + (x))
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] =
76 "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
84 static int disable_msi = 0;
86 module_param(disable_msi, int, S_IRUGO);
87 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
103 /* indexed by board_t, above */
107 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
108 { "HP NC370T Multifunction Gigabit Server Adapter" },
109 { "HP NC370i Multifunction Gigabit Server Adapter" },
110 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
111 { "HP NC370F Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
113 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
114 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
116 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120 static const struct pci_device_id bnx2_pci_tbl[] = {
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
139 { PCI_VENDOR_ID_BROADCOM, 0x163b,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
141 { PCI_VENDOR_ID_BROADCOM, 0x163c,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 static const struct flash_spec flash_table[] =
148 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
149 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
151 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
152 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
153 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155 /* Expansion entry 0001 */
156 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Saifun SA25F010 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
163 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
165 "Non-buffered flash (128kB)"},
166 /* Saifun SA25F020 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
169 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
171 "Non-buffered flash (256kB)"},
172 /* Expansion entry 0100 */
173 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
174 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
178 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
179 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
181 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
182 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
183 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
184 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
185 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
186 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
187 /* Saifun SA25F005 (non-buffered flash) */
188 /* strap, cfg1, & write1 need updates */
189 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
190 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
192 "Non-buffered flash (64kB)"},
194 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
195 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
196 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198 /* Expansion entry 1001 */
199 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
200 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 /* Expansion entry 1010 */
204 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 /* ATMEL AT45DB011B (buffered flash) */
209 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
210 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
212 "Buffered flash (128kB)"},
213 /* Expansion entry 1100 */
214 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 /* Expansion entry 1101 */
219 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
220 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 /* Ateml Expansion entry 1110 */
224 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
225 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
227 "Entry 1110 (Atmel)"},
228 /* ATMEL AT45DB021B (buffered flash) */
229 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
230 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
231 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
232 "Buffered flash (256kB)"},
235 static const struct flash_spec flash_5709 = {
236 .flags = BNX2_NV_BUFFERED,
237 .page_bits = BCM5709_FLASH_PAGE_BITS,
238 .page_size = BCM5709_FLASH_PAGE_SIZE,
239 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
240 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
241 .name = "5709 Buffered flash (256kB)",
244 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246 static void bnx2_init_napi(struct bnx2 *bp);
247 static void bnx2_del_napi(struct bnx2 *bp);
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 /* The ring uses 256 indices for 255 entries, one of them
257 * needs to be skipped.
259 diff = txr->tx_prod - txr->tx_cons;
260 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
262 if (diff == BNX2_TX_DESC_CNT)
263 diff = BNX2_MAX_TX_DESC_CNT;
265 return bp->tx_ring_size - diff;
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 spin_lock_irqsave(&bp->indirect_lock, flags);
275 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_irqrestore(&bp->indirect_lock, flags);
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 spin_lock_irqsave(&bp->indirect_lock, flags);
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_irqrestore(&bp->indirect_lock, flags);
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
310 spin_lock_irqsave(&bp->indirect_lock, flags);
311 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
314 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
315 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
316 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
317 for (i = 0; i < 5; i++) {
318 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
319 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
324 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
325 BNX2_WR(bp, BNX2_CTX_DATA, val);
327 spin_unlock_irqrestore(&bp->indirect_lock, flags);
332 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
334 struct bnx2 *bp = netdev_priv(dev);
335 struct drv_ctl_io *io = &info->data.io;
338 case DRV_CTL_IO_WR_CMD:
339 bnx2_reg_wr_ind(bp, io->offset, io->data);
341 case DRV_CTL_IO_RD_CMD:
342 io->data = bnx2_reg_rd_ind(bp, io->offset);
344 case DRV_CTL_CTX_WR_CMD:
345 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
353 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
355 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
356 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
359 if (bp->flags & BNX2_FLAG_USING_MSIX) {
360 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
361 bnapi->cnic_present = 0;
362 sb_id = bp->irq_nvecs;
363 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
365 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
366 bnapi->cnic_tag = bnapi->last_status_idx;
367 bnapi->cnic_present = 1;
369 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
372 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
373 cp->irq_arr[0].status_blk = (void *)
374 ((unsigned long) bnapi->status_blk.msi +
375 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
376 cp->irq_arr[0].status_blk_num = sb_id;
380 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
383 struct bnx2 *bp = netdev_priv(dev);
384 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
389 if (cp->drv_state & CNIC_DRV_STATE_REGD)
392 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
395 bp->cnic_data = data;
396 rcu_assign_pointer(bp->cnic_ops, ops);
399 cp->drv_state = CNIC_DRV_STATE_REGD;
401 bnx2_setup_cnic_irq_info(bp);
406 static int bnx2_unregister_cnic(struct net_device *dev)
408 struct bnx2 *bp = netdev_priv(dev);
409 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
410 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412 mutex_lock(&bp->cnic_lock);
414 bnapi->cnic_present = 0;
415 RCU_INIT_POINTER(bp->cnic_ops, NULL);
416 mutex_unlock(&bp->cnic_lock);
421 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
423 struct bnx2 *bp = netdev_priv(dev);
424 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
426 if (!cp->max_iscsi_conn)
429 cp->drv_owner = THIS_MODULE;
430 cp->chip_id = bp->chip_id;
432 cp->io_base = bp->regview;
433 cp->drv_ctl = bnx2_drv_ctl;
434 cp->drv_register_cnic = bnx2_register_cnic;
435 cp->drv_unregister_cnic = bnx2_unregister_cnic;
441 bnx2_cnic_stop(struct bnx2 *bp)
443 struct cnic_ops *c_ops;
444 struct cnic_ctl_info info;
446 mutex_lock(&bp->cnic_lock);
447 c_ops = rcu_dereference_protected(bp->cnic_ops,
448 lockdep_is_held(&bp->cnic_lock));
450 info.cmd = CNIC_CTL_STOP_CMD;
451 c_ops->cnic_ctl(bp->cnic_data, &info);
453 mutex_unlock(&bp->cnic_lock);
457 bnx2_cnic_start(struct bnx2 *bp)
459 struct cnic_ops *c_ops;
460 struct cnic_ctl_info info;
462 mutex_lock(&bp->cnic_lock);
463 c_ops = rcu_dereference_protected(bp->cnic_ops,
464 lockdep_is_held(&bp->cnic_lock));
466 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
467 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
469 bnapi->cnic_tag = bnapi->last_status_idx;
471 info.cmd = CNIC_CTL_START_CMD;
472 c_ops->cnic_ctl(bp->cnic_data, &info);
474 mutex_unlock(&bp->cnic_lock);
480 bnx2_cnic_stop(struct bnx2 *bp)
485 bnx2_cnic_start(struct bnx2 *bp)
492 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
497 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
498 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
499 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
501 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
502 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
507 val1 = (bp->phy_addr << 21) | (reg << 16) |
508 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
509 BNX2_EMAC_MDIO_COMM_START_BUSY;
510 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
512 for (i = 0; i < 50; i++) {
515 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
516 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
519 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
520 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
526 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
535 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
536 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
537 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
539 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
540 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
549 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
554 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
555 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
556 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
558 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
559 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
564 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
565 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
566 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
567 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
569 for (i = 0; i < 50; i++) {
572 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
573 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
579 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
584 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
585 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
586 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
588 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
589 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
598 bnx2_disable_int(struct bnx2 *bp)
601 struct bnx2_napi *bnapi;
603 for (i = 0; i < bp->irq_nvecs; i++) {
604 bnapi = &bp->bnx2_napi[i];
605 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
606 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
608 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
612 bnx2_enable_int(struct bnx2 *bp)
615 struct bnx2_napi *bnapi;
617 for (i = 0; i < bp->irq_nvecs; i++) {
618 bnapi = &bp->bnx2_napi[i];
620 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
623 bnapi->last_status_idx);
625 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
626 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
627 bnapi->last_status_idx);
629 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
633 bnx2_disable_int_sync(struct bnx2 *bp)
637 atomic_inc(&bp->intr_sem);
638 if (!netif_running(bp->dev))
641 bnx2_disable_int(bp);
642 for (i = 0; i < bp->irq_nvecs; i++)
643 synchronize_irq(bp->irq_tbl[i].vector);
647 bnx2_napi_disable(struct bnx2 *bp)
651 for (i = 0; i < bp->irq_nvecs; i++)
652 napi_disable(&bp->bnx2_napi[i].napi);
656 bnx2_napi_enable(struct bnx2 *bp)
660 for (i = 0; i < bp->irq_nvecs; i++)
661 napi_enable(&bp->bnx2_napi[i].napi);
665 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
669 if (netif_running(bp->dev)) {
670 bnx2_napi_disable(bp);
671 netif_tx_disable(bp->dev);
673 bnx2_disable_int_sync(bp);
674 netif_carrier_off(bp->dev); /* prevent tx timeout */
678 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
680 if (atomic_dec_and_test(&bp->intr_sem)) {
681 if (netif_running(bp->dev)) {
682 netif_tx_wake_all_queues(bp->dev);
683 spin_lock_bh(&bp->phy_lock);
685 netif_carrier_on(bp->dev);
686 spin_unlock_bh(&bp->phy_lock);
687 bnx2_napi_enable(bp);
696 bnx2_free_tx_mem(struct bnx2 *bp)
700 for (i = 0; i < bp->num_tx_rings; i++) {
701 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
702 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
704 if (txr->tx_desc_ring) {
705 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
707 txr->tx_desc_mapping);
708 txr->tx_desc_ring = NULL;
710 kfree(txr->tx_buf_ring);
711 txr->tx_buf_ring = NULL;
716 bnx2_free_rx_mem(struct bnx2 *bp)
720 for (i = 0; i < bp->num_rx_rings; i++) {
721 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
722 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
725 for (j = 0; j < bp->rx_max_ring; j++) {
726 if (rxr->rx_desc_ring[j])
727 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
728 rxr->rx_desc_ring[j],
729 rxr->rx_desc_mapping[j]);
730 rxr->rx_desc_ring[j] = NULL;
732 vfree(rxr->rx_buf_ring);
733 rxr->rx_buf_ring = NULL;
735 for (j = 0; j < bp->rx_max_pg_ring; j++) {
736 if (rxr->rx_pg_desc_ring[j])
737 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
738 rxr->rx_pg_desc_ring[j],
739 rxr->rx_pg_desc_mapping[j]);
740 rxr->rx_pg_desc_ring[j] = NULL;
742 vfree(rxr->rx_pg_ring);
743 rxr->rx_pg_ring = NULL;
748 bnx2_alloc_tx_mem(struct bnx2 *bp)
752 for (i = 0; i < bp->num_tx_rings; i++) {
753 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
754 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
756 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
757 if (txr->tx_buf_ring == NULL)
761 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
762 &txr->tx_desc_mapping, GFP_KERNEL);
763 if (txr->tx_desc_ring == NULL)
770 bnx2_alloc_rx_mem(struct bnx2 *bp)
774 for (i = 0; i < bp->num_rx_rings; i++) {
775 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
776 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
780 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
781 if (rxr->rx_buf_ring == NULL)
784 for (j = 0; j < bp->rx_max_ring; j++) {
785 rxr->rx_desc_ring[j] =
786 dma_alloc_coherent(&bp->pdev->dev,
788 &rxr->rx_desc_mapping[j],
790 if (rxr->rx_desc_ring[j] == NULL)
795 if (bp->rx_pg_ring_size) {
796 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
798 if (rxr->rx_pg_ring == NULL)
803 for (j = 0; j < bp->rx_max_pg_ring; j++) {
804 rxr->rx_pg_desc_ring[j] =
805 dma_alloc_coherent(&bp->pdev->dev,
807 &rxr->rx_pg_desc_mapping[j],
809 if (rxr->rx_pg_desc_ring[j] == NULL)
818 bnx2_free_stats_blk(struct net_device *dev)
820 struct bnx2 *bp = netdev_priv(dev);
822 if (bp->status_blk) {
823 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
825 bp->status_blk_mapping);
826 bp->status_blk = NULL;
827 bp->stats_blk = NULL;
832 bnx2_alloc_stats_blk(struct net_device *dev)
836 struct bnx2 *bp = netdev_priv(dev);
838 /* Combine status and statistics blocks into one allocation. */
839 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
840 if (bp->flags & BNX2_FLAG_MSIX_CAP)
841 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
842 BNX2_SBLK_MSIX_ALIGN_SIZE);
843 bp->status_stats_size = status_blk_size +
844 sizeof(struct statistics_block);
845 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
846 &bp->status_blk_mapping, GFP_KERNEL);
847 if (status_blk == NULL)
850 bp->status_blk = status_blk;
851 bp->stats_blk = status_blk + status_blk_size;
852 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
858 bnx2_free_mem(struct bnx2 *bp)
861 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
863 bnx2_free_tx_mem(bp);
864 bnx2_free_rx_mem(bp);
866 for (i = 0; i < bp->ctx_pages; i++) {
867 if (bp->ctx_blk[i]) {
868 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
870 bp->ctx_blk_mapping[i]);
871 bp->ctx_blk[i] = NULL;
875 if (bnapi->status_blk.msi)
876 bnapi->status_blk.msi = NULL;
880 bnx2_alloc_mem(struct bnx2 *bp)
883 struct bnx2_napi *bnapi;
885 bnapi = &bp->bnx2_napi[0];
886 bnapi->status_blk.msi = bp->status_blk;
887 bnapi->hw_tx_cons_ptr =
888 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
889 bnapi->hw_rx_cons_ptr =
890 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
891 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
892 for (i = 1; i < bp->irq_nvecs; i++) {
893 struct status_block_msix *sblk;
895 bnapi = &bp->bnx2_napi[i];
897 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
898 bnapi->status_blk.msix = sblk;
899 bnapi->hw_tx_cons_ptr =
900 &sblk->status_tx_quick_consumer_index;
901 bnapi->hw_rx_cons_ptr =
902 &sblk->status_rx_quick_consumer_index;
903 bnapi->int_num = i << 24;
907 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
908 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
909 if (bp->ctx_pages == 0)
911 for (i = 0; i < bp->ctx_pages; i++) {
912 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
914 &bp->ctx_blk_mapping[i],
916 if (bp->ctx_blk[i] == NULL)
921 err = bnx2_alloc_rx_mem(bp);
925 err = bnx2_alloc_tx_mem(bp);
937 bnx2_report_fw_link(struct bnx2 *bp)
939 u32 fw_link_status = 0;
941 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
947 switch (bp->line_speed) {
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_10HALF;
952 fw_link_status = BNX2_LINK_STATUS_10FULL;
955 if (bp->duplex == DUPLEX_HALF)
956 fw_link_status = BNX2_LINK_STATUS_100HALF;
958 fw_link_status = BNX2_LINK_STATUS_100FULL;
961 if (bp->duplex == DUPLEX_HALF)
962 fw_link_status = BNX2_LINK_STATUS_1000HALF;
964 fw_link_status = BNX2_LINK_STATUS_1000FULL;
967 if (bp->duplex == DUPLEX_HALF)
968 fw_link_status = BNX2_LINK_STATUS_2500HALF;
970 fw_link_status = BNX2_LINK_STATUS_2500FULL;
974 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
977 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
979 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
980 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
982 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
983 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
984 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
986 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
990 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
992 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
996 bnx2_xceiver_str(struct bnx2 *bp)
998 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
999 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1004 bnx2_report_link(struct bnx2 *bp)
1007 netif_carrier_on(bp->dev);
1008 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1009 bnx2_xceiver_str(bp),
1011 bp->duplex == DUPLEX_FULL ? "full" : "half");
1013 if (bp->flow_ctrl) {
1014 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1015 pr_cont(", receive ");
1016 if (bp->flow_ctrl & FLOW_CTRL_TX)
1017 pr_cont("& transmit ");
1020 pr_cont(", transmit ");
1022 pr_cont("flow control ON");
1026 netif_carrier_off(bp->dev);
1027 netdev_err(bp->dev, "NIC %s Link is Down\n",
1028 bnx2_xceiver_str(bp));
1031 bnx2_report_fw_link(bp);
1035 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1037 u32 local_adv, remote_adv;
1040 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1041 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1043 if (bp->duplex == DUPLEX_FULL) {
1044 bp->flow_ctrl = bp->req_flow_ctrl;
1049 if (bp->duplex != DUPLEX_FULL) {
1053 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1054 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1057 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1058 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1059 bp->flow_ctrl |= FLOW_CTRL_TX;
1060 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1061 bp->flow_ctrl |= FLOW_CTRL_RX;
1065 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1066 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1068 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1069 u32 new_local_adv = 0;
1070 u32 new_remote_adv = 0;
1072 if (local_adv & ADVERTISE_1000XPAUSE)
1073 new_local_adv |= ADVERTISE_PAUSE_CAP;
1074 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1075 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1076 if (remote_adv & ADVERTISE_1000XPAUSE)
1077 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1078 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1079 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1081 local_adv = new_local_adv;
1082 remote_adv = new_remote_adv;
1085 /* See Table 28B-3 of 802.3ab-1999 spec. */
1086 if (local_adv & ADVERTISE_PAUSE_CAP) {
1087 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1088 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1089 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1091 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1092 bp->flow_ctrl = FLOW_CTRL_RX;
1096 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1097 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1101 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1102 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1103 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1105 bp->flow_ctrl = FLOW_CTRL_TX;
1111 bnx2_5709s_linkup(struct bnx2 *bp)
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1118 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1121 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1122 bp->line_speed = bp->req_line_speed;
1123 bp->duplex = bp->req_duplex;
1126 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1128 case MII_BNX2_GP_TOP_AN_SPEED_10:
1129 bp->line_speed = SPEED_10;
1131 case MII_BNX2_GP_TOP_AN_SPEED_100:
1132 bp->line_speed = SPEED_100;
1134 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1135 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1136 bp->line_speed = SPEED_1000;
1138 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1139 bp->line_speed = SPEED_2500;
1142 if (val & MII_BNX2_GP_TOP_AN_FD)
1143 bp->duplex = DUPLEX_FULL;
1145 bp->duplex = DUPLEX_HALF;
1150 bnx2_5708s_linkup(struct bnx2 *bp)
1155 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1156 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1157 case BCM5708S_1000X_STAT1_SPEED_10:
1158 bp->line_speed = SPEED_10;
1160 case BCM5708S_1000X_STAT1_SPEED_100:
1161 bp->line_speed = SPEED_100;
1163 case BCM5708S_1000X_STAT1_SPEED_1G:
1164 bp->line_speed = SPEED_1000;
1166 case BCM5708S_1000X_STAT1_SPEED_2G5:
1167 bp->line_speed = SPEED_2500;
1170 if (val & BCM5708S_1000X_STAT1_FD)
1171 bp->duplex = DUPLEX_FULL;
1173 bp->duplex = DUPLEX_HALF;
1179 bnx2_5706s_linkup(struct bnx2 *bp)
1181 u32 bmcr, local_adv, remote_adv, common;
1184 bp->line_speed = SPEED_1000;
1186 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1187 if (bmcr & BMCR_FULLDPLX) {
1188 bp->duplex = DUPLEX_FULL;
1191 bp->duplex = DUPLEX_HALF;
1194 if (!(bmcr & BMCR_ANENABLE)) {
1198 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1199 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1201 common = local_adv & remote_adv;
1202 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1204 if (common & ADVERTISE_1000XFULL) {
1205 bp->duplex = DUPLEX_FULL;
1208 bp->duplex = DUPLEX_HALF;
1216 bnx2_copper_linkup(struct bnx2 *bp)
1220 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1222 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1223 if (bmcr & BMCR_ANENABLE) {
1224 u32 local_adv, remote_adv, common;
1226 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1227 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1229 common = local_adv & (remote_adv >> 2);
1230 if (common & ADVERTISE_1000FULL) {
1231 bp->line_speed = SPEED_1000;
1232 bp->duplex = DUPLEX_FULL;
1234 else if (common & ADVERTISE_1000HALF) {
1235 bp->line_speed = SPEED_1000;
1236 bp->duplex = DUPLEX_HALF;
1239 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1240 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1242 common = local_adv & remote_adv;
1243 if (common & ADVERTISE_100FULL) {
1244 bp->line_speed = SPEED_100;
1245 bp->duplex = DUPLEX_FULL;
1247 else if (common & ADVERTISE_100HALF) {
1248 bp->line_speed = SPEED_100;
1249 bp->duplex = DUPLEX_HALF;
1251 else if (common & ADVERTISE_10FULL) {
1252 bp->line_speed = SPEED_10;
1253 bp->duplex = DUPLEX_FULL;
1255 else if (common & ADVERTISE_10HALF) {
1256 bp->line_speed = SPEED_10;
1257 bp->duplex = DUPLEX_HALF;
1266 if (bmcr & BMCR_SPEED100) {
1267 bp->line_speed = SPEED_100;
1270 bp->line_speed = SPEED_10;
1272 if (bmcr & BMCR_FULLDPLX) {
1273 bp->duplex = DUPLEX_FULL;
1276 bp->duplex = DUPLEX_HALF;
1283 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1284 if (ext_status & EXT_STATUS_MDIX)
1285 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1292 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1294 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1296 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1297 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1300 if (bp->flow_ctrl & FLOW_CTRL_TX)
1301 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1303 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1307 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1312 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1315 bnx2_init_rx_context(bp, cid);
1320 bnx2_set_mac_link(struct bnx2 *bp)
1324 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1325 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1326 (bp->duplex == DUPLEX_HALF)) {
1327 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1330 /* Configure the EMAC mode register. */
1331 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1333 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1334 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1335 BNX2_EMAC_MODE_25G_MODE);
1338 switch (bp->line_speed) {
1340 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1341 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1346 val |= BNX2_EMAC_MODE_PORT_MII;
1349 val |= BNX2_EMAC_MODE_25G_MODE;
1352 val |= BNX2_EMAC_MODE_PORT_GMII;
1357 val |= BNX2_EMAC_MODE_PORT_GMII;
1360 /* Set the MAC to operate in the appropriate duplex mode. */
1361 if (bp->duplex == DUPLEX_HALF)
1362 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1363 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1365 /* Enable/disable rx PAUSE. */
1366 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1368 if (bp->flow_ctrl & FLOW_CTRL_RX)
1369 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1370 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1372 /* Enable/disable tx PAUSE. */
1373 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1374 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1376 if (bp->flow_ctrl & FLOW_CTRL_TX)
1377 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1378 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1380 /* Acknowledge the interrupt. */
1381 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1383 bnx2_init_all_rx_contexts(bp);
1387 bnx2_enable_bmsr1(struct bnx2 *bp)
1389 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1390 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1391 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1392 MII_BNX2_BLK_ADDR_GP_STATUS);
1396 bnx2_disable_bmsr1(struct bnx2 *bp)
1398 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1399 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1400 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1405 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1410 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 if (bp->autoneg & AUTONEG_SPEED)
1414 bp->advertising |= ADVERTISED_2500baseX_Full;
1416 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1417 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1419 bnx2_read_phy(bp, bp->mii_up1, &up1);
1420 if (!(up1 & BCM5708S_UP1_2G5)) {
1421 up1 |= BCM5708S_UP1_2G5;
1422 bnx2_write_phy(bp, bp->mii_up1, up1);
1426 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1427 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1428 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1434 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1439 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1442 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1445 bnx2_read_phy(bp, bp->mii_up1, &up1);
1446 if (up1 & BCM5708S_UP1_2G5) {
1447 up1 &= ~BCM5708S_UP1_2G5;
1448 bnx2_write_phy(bp, bp->mii_up1, up1);
1452 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1453 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1460 bnx2_enable_forced_2g5(struct bnx2 *bp)
1462 u32 uninitialized_var(bmcr);
1465 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1468 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1471 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1472 MII_BNX2_BLK_ADDR_SERDES_DIG);
1473 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1474 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1475 val |= MII_BNX2_SD_MISC1_FORCE |
1476 MII_BNX2_SD_MISC1_FORCE_2_5G;
1477 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1480 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1481 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1482 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1484 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1485 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1487 bmcr |= BCM5708S_BMCR_FORCE_2500;
1495 if (bp->autoneg & AUTONEG_SPEED) {
1496 bmcr &= ~BMCR_ANENABLE;
1497 if (bp->req_duplex == DUPLEX_FULL)
1498 bmcr |= BMCR_FULLDPLX;
1500 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1504 bnx2_disable_forced_2g5(struct bnx2 *bp)
1506 u32 uninitialized_var(bmcr);
1509 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1512 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1515 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1516 MII_BNX2_BLK_ADDR_SERDES_DIG);
1517 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1518 val &= ~MII_BNX2_SD_MISC1_FORCE;
1519 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1522 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1523 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1524 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1526 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1527 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1529 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1537 if (bp->autoneg & AUTONEG_SPEED)
1538 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1539 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1543 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1547 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1548 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1550 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1552 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1556 bnx2_set_link(struct bnx2 *bp)
1561 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1566 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1569 link_up = bp->link_up;
1571 bnx2_enable_bmsr1(bp);
1572 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1573 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1574 bnx2_disable_bmsr1(bp);
1576 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1577 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1580 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1581 bnx2_5706s_force_link_dn(bp, 0);
1582 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1584 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1586 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1587 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1588 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1590 if ((val & BNX2_EMAC_STATUS_LINK) &&
1591 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1592 bmsr |= BMSR_LSTATUS;
1594 bmsr &= ~BMSR_LSTATUS;
1597 if (bmsr & BMSR_LSTATUS) {
1600 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1601 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1602 bnx2_5706s_linkup(bp);
1603 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1604 bnx2_5708s_linkup(bp);
1605 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1606 bnx2_5709s_linkup(bp);
1609 bnx2_copper_linkup(bp);
1611 bnx2_resolve_flow_ctrl(bp);
1614 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1615 (bp->autoneg & AUTONEG_SPEED))
1616 bnx2_disable_forced_2g5(bp);
1618 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1621 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1622 bmcr |= BMCR_ANENABLE;
1623 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1625 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1630 if (bp->link_up != link_up) {
1631 bnx2_report_link(bp);
1634 bnx2_set_mac_link(bp);
1640 bnx2_reset_phy(struct bnx2 *bp)
1645 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1647 #define PHY_RESET_MAX_WAIT 100
1648 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1651 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1652 if (!(reg & BMCR_RESET)) {
1657 if (i == PHY_RESET_MAX_WAIT) {
1664 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1668 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1669 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1671 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1672 adv = ADVERTISE_1000XPAUSE;
1675 adv = ADVERTISE_PAUSE_CAP;
1678 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1679 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1680 adv = ADVERTISE_1000XPSE_ASYM;
1683 adv = ADVERTISE_PAUSE_ASYM;
1686 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1687 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1688 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1691 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1697 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1700 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1701 __releases(&bp->phy_lock)
1702 __acquires(&bp->phy_lock)
1704 u32 speed_arg = 0, pause_adv;
1706 pause_adv = bnx2_phy_get_pause_adv(bp);
1708 if (bp->autoneg & AUTONEG_SPEED) {
1709 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1710 if (bp->advertising & ADVERTISED_10baseT_Half)
1711 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712 if (bp->advertising & ADVERTISED_10baseT_Full)
1713 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714 if (bp->advertising & ADVERTISED_100baseT_Half)
1715 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1716 if (bp->advertising & ADVERTISED_100baseT_Full)
1717 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1718 if (bp->advertising & ADVERTISED_1000baseT_Full)
1719 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1720 if (bp->advertising & ADVERTISED_2500baseX_Full)
1721 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1723 if (bp->req_line_speed == SPEED_2500)
1724 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1725 else if (bp->req_line_speed == SPEED_1000)
1726 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1727 else if (bp->req_line_speed == SPEED_100) {
1728 if (bp->req_duplex == DUPLEX_FULL)
1729 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1731 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1732 } else if (bp->req_line_speed == SPEED_10) {
1733 if (bp->req_duplex == DUPLEX_FULL)
1734 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1736 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1740 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1741 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1742 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1743 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1745 if (port == PORT_TP)
1746 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1747 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1749 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1751 spin_unlock_bh(&bp->phy_lock);
1752 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1753 spin_lock_bh(&bp->phy_lock);
1759 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1760 __releases(&bp->phy_lock)
1761 __acquires(&bp->phy_lock)
1766 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1767 return bnx2_setup_remote_phy(bp, port);
1769 if (!(bp->autoneg & AUTONEG_SPEED)) {
1771 int force_link_down = 0;
1773 if (bp->req_line_speed == SPEED_2500) {
1774 if (!bnx2_test_and_enable_2g5(bp))
1775 force_link_down = 1;
1776 } else if (bp->req_line_speed == SPEED_1000) {
1777 if (bnx2_test_and_disable_2g5(bp))
1778 force_link_down = 1;
1780 bnx2_read_phy(bp, bp->mii_adv, &adv);
1781 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1783 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1784 new_bmcr = bmcr & ~BMCR_ANENABLE;
1785 new_bmcr |= BMCR_SPEED1000;
1787 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1788 if (bp->req_line_speed == SPEED_2500)
1789 bnx2_enable_forced_2g5(bp);
1790 else if (bp->req_line_speed == SPEED_1000) {
1791 bnx2_disable_forced_2g5(bp);
1792 new_bmcr &= ~0x2000;
1795 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1796 if (bp->req_line_speed == SPEED_2500)
1797 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1799 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1802 if (bp->req_duplex == DUPLEX_FULL) {
1803 adv |= ADVERTISE_1000XFULL;
1804 new_bmcr |= BMCR_FULLDPLX;
1807 adv |= ADVERTISE_1000XHALF;
1808 new_bmcr &= ~BMCR_FULLDPLX;
1810 if ((new_bmcr != bmcr) || (force_link_down)) {
1811 /* Force a link down visible on the other side */
1813 bnx2_write_phy(bp, bp->mii_adv, adv &
1814 ~(ADVERTISE_1000XFULL |
1815 ADVERTISE_1000XHALF));
1816 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1817 BMCR_ANRESTART | BMCR_ANENABLE);
1820 netif_carrier_off(bp->dev);
1821 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 bnx2_report_link(bp);
1824 bnx2_write_phy(bp, bp->mii_adv, adv);
1825 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1827 bnx2_resolve_flow_ctrl(bp);
1828 bnx2_set_mac_link(bp);
1833 bnx2_test_and_enable_2g5(bp);
1835 if (bp->advertising & ADVERTISED_1000baseT_Full)
1836 new_adv |= ADVERTISE_1000XFULL;
1838 new_adv |= bnx2_phy_get_pause_adv(bp);
1840 bnx2_read_phy(bp, bp->mii_adv, &adv);
1841 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1843 bp->serdes_an_pending = 0;
1844 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1845 /* Force a link down visible on the other side */
1847 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1848 spin_unlock_bh(&bp->phy_lock);
1850 spin_lock_bh(&bp->phy_lock);
1853 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1854 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1856 /* Speed up link-up time when the link partner
1857 * does not autonegotiate which is very common
1858 * in blade servers. Some blade servers use
1859 * IPMI for kerboard input and it's important
1860 * to minimize link disruptions. Autoneg. involves
1861 * exchanging base pages plus 3 next pages and
1862 * normally completes in about 120 msec.
1864 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1865 bp->serdes_an_pending = 1;
1866 mod_timer(&bp->timer, jiffies + bp->current_interval);
1868 bnx2_resolve_flow_ctrl(bp);
1869 bnx2_set_mac_link(bp);
1875 #define ETHTOOL_ALL_FIBRE_SPEED \
1876 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1877 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1878 (ADVERTISED_1000baseT_Full)
1880 #define ETHTOOL_ALL_COPPER_SPEED \
1881 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1882 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1883 ADVERTISED_1000baseT_Full)
1885 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1886 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1888 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1891 bnx2_set_default_remote_link(struct bnx2 *bp)
1895 if (bp->phy_port == PORT_TP)
1896 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1898 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1900 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1901 bp->req_line_speed = 0;
1902 bp->autoneg |= AUTONEG_SPEED;
1903 bp->advertising = ADVERTISED_Autoneg;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1905 bp->advertising |= ADVERTISED_10baseT_Half;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1907 bp->advertising |= ADVERTISED_10baseT_Full;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1909 bp->advertising |= ADVERTISED_100baseT_Half;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1911 bp->advertising |= ADVERTISED_100baseT_Full;
1912 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1913 bp->advertising |= ADVERTISED_1000baseT_Full;
1914 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1915 bp->advertising |= ADVERTISED_2500baseX_Full;
1918 bp->advertising = 0;
1919 bp->req_duplex = DUPLEX_FULL;
1920 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1921 bp->req_line_speed = SPEED_10;
1922 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1923 bp->req_duplex = DUPLEX_HALF;
1925 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1926 bp->req_line_speed = SPEED_100;
1927 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1928 bp->req_duplex = DUPLEX_HALF;
1930 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1931 bp->req_line_speed = SPEED_1000;
1932 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1933 bp->req_line_speed = SPEED_2500;
1938 bnx2_set_default_link(struct bnx2 *bp)
1940 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1941 bnx2_set_default_remote_link(bp);
1945 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1946 bp->req_line_speed = 0;
1947 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1950 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1952 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1953 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1954 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1956 bp->req_line_speed = bp->line_speed = SPEED_1000;
1957 bp->req_duplex = DUPLEX_FULL;
1960 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1964 bnx2_send_heart_beat(struct bnx2 *bp)
1969 spin_lock(&bp->indirect_lock);
1970 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1971 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1972 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1973 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1974 spin_unlock(&bp->indirect_lock);
1978 bnx2_remote_phy_event(struct bnx2 *bp)
1981 u8 link_up = bp->link_up;
1984 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1986 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1987 bnx2_send_heart_beat(bp);
1989 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1991 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1997 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1998 bp->duplex = DUPLEX_FULL;
2000 case BNX2_LINK_STATUS_10HALF:
2001 bp->duplex = DUPLEX_HALF;
2003 case BNX2_LINK_STATUS_10FULL:
2004 bp->line_speed = SPEED_10;
2006 case BNX2_LINK_STATUS_100HALF:
2007 bp->duplex = DUPLEX_HALF;
2009 case BNX2_LINK_STATUS_100BASE_T4:
2010 case BNX2_LINK_STATUS_100FULL:
2011 bp->line_speed = SPEED_100;
2013 case BNX2_LINK_STATUS_1000HALF:
2014 bp->duplex = DUPLEX_HALF;
2016 case BNX2_LINK_STATUS_1000FULL:
2017 bp->line_speed = SPEED_1000;
2019 case BNX2_LINK_STATUS_2500HALF:
2020 bp->duplex = DUPLEX_HALF;
2022 case BNX2_LINK_STATUS_2500FULL:
2023 bp->line_speed = SPEED_2500;
2031 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2032 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2033 if (bp->duplex == DUPLEX_FULL)
2034 bp->flow_ctrl = bp->req_flow_ctrl;
2036 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2037 bp->flow_ctrl |= FLOW_CTRL_TX;
2038 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2039 bp->flow_ctrl |= FLOW_CTRL_RX;
2042 old_port = bp->phy_port;
2043 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2044 bp->phy_port = PORT_FIBRE;
2046 bp->phy_port = PORT_TP;
2048 if (old_port != bp->phy_port)
2049 bnx2_set_default_link(bp);
2052 if (bp->link_up != link_up)
2053 bnx2_report_link(bp);
2055 bnx2_set_mac_link(bp);
2059 bnx2_set_remote_link(struct bnx2 *bp)
2063 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2065 case BNX2_FW_EVT_CODE_LINK_EVENT:
2066 bnx2_remote_phy_event(bp);
2068 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2070 bnx2_send_heart_beat(bp);
2077 bnx2_setup_copper_phy(struct bnx2 *bp)
2078 __releases(&bp->phy_lock)
2079 __acquires(&bp->phy_lock)
2081 u32 bmcr, adv_reg, new_adv = 0;
2084 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2086 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2087 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2088 ADVERTISE_PAUSE_ASYM);
2090 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2092 if (bp->autoneg & AUTONEG_SPEED) {
2094 u32 new_adv1000 = 0;
2096 new_adv |= bnx2_phy_get_pause_adv(bp);
2098 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2099 adv1000_reg &= PHY_ALL_1000_SPEED;
2101 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2102 if ((adv1000_reg != new_adv1000) ||
2103 (adv_reg != new_adv) ||
2104 ((bmcr & BMCR_ANENABLE) == 0)) {
2106 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2107 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2108 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2111 else if (bp->link_up) {
2112 /* Flow ctrl may have changed from auto to forced */
2113 /* or vice-versa. */
2115 bnx2_resolve_flow_ctrl(bp);
2116 bnx2_set_mac_link(bp);
2121 /* advertise nothing when forcing speed */
2122 if (adv_reg != new_adv)
2123 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2126 if (bp->req_line_speed == SPEED_100) {
2127 new_bmcr |= BMCR_SPEED100;
2129 if (bp->req_duplex == DUPLEX_FULL) {
2130 new_bmcr |= BMCR_FULLDPLX;
2132 if (new_bmcr != bmcr) {
2135 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138 if (bmsr & BMSR_LSTATUS) {
2139 /* Force link down */
2140 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2141 spin_unlock_bh(&bp->phy_lock);
2143 spin_lock_bh(&bp->phy_lock);
2145 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2146 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2149 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2151 /* Normally, the new speed is setup after the link has
2152 * gone down and up again. In some cases, link will not go
2153 * down so we need to set up the new speed here.
2155 if (bmsr & BMSR_LSTATUS) {
2156 bp->line_speed = bp->req_line_speed;
2157 bp->duplex = bp->req_duplex;
2158 bnx2_resolve_flow_ctrl(bp);
2159 bnx2_set_mac_link(bp);
2162 bnx2_resolve_flow_ctrl(bp);
2163 bnx2_set_mac_link(bp);
2169 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2170 __releases(&bp->phy_lock)
2171 __acquires(&bp->phy_lock)
2173 if (bp->loopback == MAC_LOOPBACK)
2176 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2177 return bnx2_setup_serdes_phy(bp, port);
2180 return bnx2_setup_copper_phy(bp);
2185 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2189 bp->mii_bmcr = MII_BMCR + 0x10;
2190 bp->mii_bmsr = MII_BMSR + 0x10;
2191 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2192 bp->mii_adv = MII_ADVERTISE + 0x10;
2193 bp->mii_lpa = MII_LPA + 0x10;
2194 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2197 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2205 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2206 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2207 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2208 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2210 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2211 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2212 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2213 val |= BCM5708S_UP1_2G5;
2215 val &= ~BCM5708S_UP1_2G5;
2216 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2218 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2219 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2220 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2221 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2223 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2225 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2226 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2227 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2229 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2235 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2242 bp->mii_up1 = BCM5708S_UP1;
2244 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2245 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2249 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2250 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2252 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2253 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2254 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2256 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2257 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2258 val |= BCM5708S_UP1_2G5;
2259 bnx2_write_phy(bp, BCM5708S_UP1, val);
2262 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2263 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2264 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2265 /* increase tx signal amplitude */
2266 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2267 BCM5708S_BLK_ADDR_TX_MISC);
2268 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2269 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2270 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2271 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2274 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2275 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2280 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2281 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2282 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2283 BCM5708S_BLK_ADDR_TX_MISC);
2284 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2285 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2286 BCM5708S_BLK_ADDR_DIG);
2293 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2298 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2300 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2301 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2303 if (bp->dev->mtu > 1500) {
2306 /* Set extended packet length bit */
2307 bnx2_write_phy(bp, 0x18, 0x7);
2308 bnx2_read_phy(bp, 0x18, &val);
2309 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2311 bnx2_write_phy(bp, 0x1c, 0x6c00);
2312 bnx2_read_phy(bp, 0x1c, &val);
2313 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2318 bnx2_write_phy(bp, 0x18, 0x7);
2319 bnx2_read_phy(bp, 0x18, &val);
2320 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2322 bnx2_write_phy(bp, 0x1c, 0x6c00);
2323 bnx2_read_phy(bp, 0x1c, &val);
2324 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2331 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2338 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2339 bnx2_write_phy(bp, 0x18, 0x0c00);
2340 bnx2_write_phy(bp, 0x17, 0x000a);
2341 bnx2_write_phy(bp, 0x15, 0x310b);
2342 bnx2_write_phy(bp, 0x17, 0x201f);
2343 bnx2_write_phy(bp, 0x15, 0x9506);
2344 bnx2_write_phy(bp, 0x17, 0x401f);
2345 bnx2_write_phy(bp, 0x15, 0x14e2);
2346 bnx2_write_phy(bp, 0x18, 0x0400);
2349 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2350 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2351 MII_BNX2_DSP_EXPAND_REG | 0x8);
2352 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2354 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2357 if (bp->dev->mtu > 1500) {
2358 /* Set extended packet length bit */
2359 bnx2_write_phy(bp, 0x18, 0x7);
2360 bnx2_read_phy(bp, 0x18, &val);
2361 bnx2_write_phy(bp, 0x18, val | 0x4000);
2363 bnx2_read_phy(bp, 0x10, &val);
2364 bnx2_write_phy(bp, 0x10, val | 0x1);
2367 bnx2_write_phy(bp, 0x18, 0x7);
2368 bnx2_read_phy(bp, 0x18, &val);
2369 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2371 bnx2_read_phy(bp, 0x10, &val);
2372 bnx2_write_phy(bp, 0x10, val & ~0x1);
2375 /* ethernet@wirespeed */
2376 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2377 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2378 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2381 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2382 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2384 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2390 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2391 __releases(&bp->phy_lock)
2392 __acquires(&bp->phy_lock)
2397 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2398 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2400 bp->mii_bmcr = MII_BMCR;
2401 bp->mii_bmsr = MII_BMSR;
2402 bp->mii_bmsr1 = MII_BMSR;
2403 bp->mii_adv = MII_ADVERTISE;
2404 bp->mii_lpa = MII_LPA;
2406 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2408 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2411 bnx2_read_phy(bp, MII_PHYSID1, &val);
2412 bp->phy_id = val << 16;
2413 bnx2_read_phy(bp, MII_PHYSID2, &val);
2414 bp->phy_id |= val & 0xffff;
2416 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2417 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2418 rc = bnx2_init_5706s_phy(bp, reset_phy);
2419 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2420 rc = bnx2_init_5708s_phy(bp, reset_phy);
2421 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2422 rc = bnx2_init_5709s_phy(bp, reset_phy);
2425 rc = bnx2_init_copper_phy(bp, reset_phy);
2430 rc = bnx2_setup_phy(bp, bp->phy_port);
2436 bnx2_set_mac_loopback(struct bnx2 *bp)
2440 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2441 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2442 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2443 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448 static int bnx2_test_link(struct bnx2 *);
2451 bnx2_set_phy_loopback(struct bnx2 *bp)
2456 spin_lock_bh(&bp->phy_lock);
2457 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2459 spin_unlock_bh(&bp->phy_lock);
2463 for (i = 0; i < 10; i++) {
2464 if (bnx2_test_link(bp) == 0)
2469 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2470 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2471 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2472 BNX2_EMAC_MODE_25G_MODE);
2474 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2475 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2481 bnx2_dump_mcp_state(struct bnx2 *bp)
2483 struct net_device *dev = bp->dev;
2486 netdev_err(dev, "<--- start MCP states dump --->\n");
2487 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2488 mcp_p0 = BNX2_MCP_STATE_P0;
2489 mcp_p1 = BNX2_MCP_STATE_P1;
2491 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2492 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2494 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2495 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2496 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2500 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2501 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2502 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2503 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2504 netdev_err(dev, "DEBUG: shmem states:\n");
2505 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2506 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2507 bnx2_shmem_rd(bp, BNX2_FW_MB),
2508 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2509 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2510 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2511 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2512 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2513 pr_cont(" condition[%08x]\n",
2514 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2515 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2516 DP_SHMEM_LINE(bp, 0x3cc);
2517 DP_SHMEM_LINE(bp, 0x3dc);
2518 DP_SHMEM_LINE(bp, 0x3ec);
2519 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2520 netdev_err(dev, "<--- end MCP states dump --->\n");
2524 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2530 msg_data |= bp->fw_wr_seq;
2531 bp->fw_last_msg = msg_data;
2533 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2538 /* wait for an acknowledgement. */
2539 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2542 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2544 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2547 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2550 /* If we timed out, inform the firmware that this is the case. */
2551 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2552 msg_data &= ~BNX2_DRV_MSG_CODE;
2553 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2555 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2557 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2558 bnx2_dump_mcp_state(bp);
2564 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2571 bnx2_init_5709_context(struct bnx2 *bp)
2576 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2577 val |= (BNX2_PAGE_BITS - 8) << 16;
2578 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2579 for (i = 0; i < 10; i++) {
2580 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2581 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2585 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2588 for (i = 0; i < bp->ctx_pages; i++) {
2592 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2596 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2597 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2598 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2599 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2600 (u64) bp->ctx_blk_mapping[i] >> 32);
2601 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2602 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2603 for (j = 0; j < 10; j++) {
2605 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2606 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2610 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2619 bnx2_init_context(struct bnx2 *bp)
2625 u32 vcid_addr, pcid_addr, offset;
2630 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2633 vcid_addr = GET_PCID_ADDR(vcid);
2635 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2640 pcid_addr = GET_PCID_ADDR(new_vcid);
2643 vcid_addr = GET_CID_ADDR(vcid);
2644 pcid_addr = vcid_addr;
2647 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2648 vcid_addr += (i << PHY_CTX_SHIFT);
2649 pcid_addr += (i << PHY_CTX_SHIFT);
2651 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2652 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2654 /* Zero out the context. */
2655 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2656 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2662 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2668 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2669 if (good_mbuf == NULL)
2672 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2673 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2677 /* Allocate a bunch of mbufs and save the good ones in an array. */
2678 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2679 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2680 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2681 BNX2_RBUF_COMMAND_ALLOC_REQ);
2683 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2685 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2687 /* The addresses with Bit 9 set are bad memory blocks. */
2688 if (!(val & (1 << 9))) {
2689 good_mbuf[good_mbuf_cnt] = (u16) val;
2693 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2696 /* Free the good ones back to the mbuf pool thus discarding
2697 * all the bad ones. */
2698 while (good_mbuf_cnt) {
2701 val = good_mbuf[good_mbuf_cnt];
2702 val = (val << 9) | val | 1;
2704 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2711 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2715 val = (mac_addr[0] << 8) | mac_addr[1];
2717 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2719 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2720 (mac_addr[4] << 8) | mac_addr[5];
2722 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2726 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2730 struct bnx2_rx_bd *rxbd =
2731 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2732 struct page *page = alloc_page(gfp);
2736 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2737 PCI_DMA_FROMDEVICE);
2738 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2744 dma_unmap_addr_set(rx_pg, mapping, mapping);
2745 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2746 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2751 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2753 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2754 struct page *page = rx_pg->page;
2759 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2760 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2767 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2770 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2772 struct bnx2_rx_bd *rxbd =
2773 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2775 data = kmalloc(bp->rx_buf_size, gfp);
2779 mapping = dma_map_single(&bp->pdev->dev,
2781 bp->rx_buf_use_size,
2782 PCI_DMA_FROMDEVICE);
2783 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2788 rx_buf->data = data;
2789 dma_unmap_addr_set(rx_buf, mapping, mapping);
2791 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2792 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2794 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2800 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2802 struct status_block *sblk = bnapi->status_blk.msi;
2803 u32 new_link_state, old_link_state;
2806 new_link_state = sblk->status_attn_bits & event;
2807 old_link_state = sblk->status_attn_bits_ack & event;
2808 if (new_link_state != old_link_state) {
2810 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2812 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2820 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2822 spin_lock(&bp->phy_lock);
2824 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2826 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2827 bnx2_set_remote_link(bp);
2829 spin_unlock(&bp->phy_lock);
2834 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2838 /* Tell compiler that status block fields can change. */
2840 cons = *bnapi->hw_tx_cons_ptr;
2842 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2848 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2850 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2851 u16 hw_cons, sw_cons, sw_ring_cons;
2852 int tx_pkt = 0, index;
2853 unsigned int tx_bytes = 0;
2854 struct netdev_queue *txq;
2856 index = (bnapi - bp->bnx2_napi);
2857 txq = netdev_get_tx_queue(bp->dev, index);
2859 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2860 sw_cons = txr->tx_cons;
2862 while (sw_cons != hw_cons) {
2863 struct bnx2_sw_tx_bd *tx_buf;
2864 struct sk_buff *skb;
2867 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2869 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2872 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2873 prefetch(&skb->end);
2875 /* partial BD completions possible with TSO packets */
2876 if (tx_buf->is_gso) {
2877 u16 last_idx, last_ring_idx;
2879 last_idx = sw_cons + tx_buf->nr_frags + 1;
2880 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2881 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2884 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2889 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2890 skb_headlen(skb), PCI_DMA_TODEVICE);
2893 last = tx_buf->nr_frags;
2895 for (i = 0; i < last; i++) {
2896 struct bnx2_sw_tx_bd *tx_buf;
2898 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2900 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2901 dma_unmap_page(&bp->pdev->dev,
2902 dma_unmap_addr(tx_buf, mapping),
2903 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2907 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2909 tx_bytes += skb->len;
2910 dev_kfree_skb_any(skb);
2912 if (tx_pkt == budget)
2915 if (hw_cons == sw_cons)
2916 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2919 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2920 txr->hw_tx_cons = hw_cons;
2921 txr->tx_cons = sw_cons;
2923 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2924 * before checking for netif_tx_queue_stopped(). Without the
2925 * memory barrier, there is a small possibility that bnx2_start_xmit()
2926 * will miss it and cause the queue to be stopped forever.
2930 if (unlikely(netif_tx_queue_stopped(txq)) &&
2931 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2932 __netif_tx_lock(txq, smp_processor_id());
2933 if ((netif_tx_queue_stopped(txq)) &&
2934 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2935 netif_tx_wake_queue(txq);
2936 __netif_tx_unlock(txq);
2943 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2944 struct sk_buff *skb, int count)
2946 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2947 struct bnx2_rx_bd *cons_bd, *prod_bd;
2950 u16 cons = rxr->rx_pg_cons;
2952 cons_rx_pg = &rxr->rx_pg_ring[cons];
2954 /* The caller was unable to allocate a new page to replace the
2955 * last one in the frags array, so we need to recycle that page
2956 * and then free the skb.
2960 struct skb_shared_info *shinfo;
2962 shinfo = skb_shinfo(skb);
2964 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2965 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2967 cons_rx_pg->page = page;
2971 hw_prod = rxr->rx_pg_prod;
2973 for (i = 0; i < count; i++) {
2974 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2976 prod_rx_pg = &rxr->rx_pg_ring[prod];
2977 cons_rx_pg = &rxr->rx_pg_ring[cons];
2978 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2979 [BNX2_RX_IDX(cons)];
2980 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2981 [BNX2_RX_IDX(prod)];
2984 prod_rx_pg->page = cons_rx_pg->page;
2985 cons_rx_pg->page = NULL;
2986 dma_unmap_addr_set(prod_rx_pg, mapping,
2987 dma_unmap_addr(cons_rx_pg, mapping));
2989 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2990 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2993 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2994 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2996 rxr->rx_pg_prod = hw_prod;
2997 rxr->rx_pg_cons = cons;
3001 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3002 u8 *data, u16 cons, u16 prod)
3004 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3005 struct bnx2_rx_bd *cons_bd, *prod_bd;
3007 cons_rx_buf = &rxr->rx_buf_ring[cons];
3008 prod_rx_buf = &rxr->rx_buf_ring[prod];
3010 dma_sync_single_for_device(&bp->pdev->dev,
3011 dma_unmap_addr(cons_rx_buf, mapping),
3012 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3014 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3016 prod_rx_buf->data = data;
3021 dma_unmap_addr_set(prod_rx_buf, mapping,
3022 dma_unmap_addr(cons_rx_buf, mapping));
3024 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3025 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3026 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3027 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3030 static struct sk_buff *
3031 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3032 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3036 u16 prod = ring_idx & 0xffff;
3037 struct sk_buff *skb;
3039 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3040 if (unlikely(err)) {
3041 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3044 unsigned int raw_len = len + 4;
3045 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3047 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3052 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3053 PCI_DMA_FROMDEVICE);
3054 skb = build_skb(data, 0);
3059 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3064 unsigned int i, frag_len, frag_size, pages;
3065 struct bnx2_sw_pg *rx_pg;
3066 u16 pg_cons = rxr->rx_pg_cons;
3067 u16 pg_prod = rxr->rx_pg_prod;
3069 frag_size = len + 4 - hdr_len;
3070 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3071 skb_put(skb, hdr_len);
3073 for (i = 0; i < pages; i++) {
3074 dma_addr_t mapping_old;
3076 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3077 if (unlikely(frag_len <= 4)) {
3078 unsigned int tail = 4 - frag_len;
3080 rxr->rx_pg_cons = pg_cons;
3081 rxr->rx_pg_prod = pg_prod;
3082 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3089 &skb_shinfo(skb)->frags[i - 1];
3090 skb_frag_size_sub(frag, tail);
3091 skb->data_len -= tail;
3095 rx_pg = &rxr->rx_pg_ring[pg_cons];
3097 /* Don't unmap yet. If we're unable to allocate a new
3098 * page, we need to recycle the page and the DMA addr.
3100 mapping_old = dma_unmap_addr(rx_pg, mapping);
3104 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3107 err = bnx2_alloc_rx_page(bp, rxr,
3108 BNX2_RX_PG_RING_IDX(pg_prod),
3110 if (unlikely(err)) {
3111 rxr->rx_pg_cons = pg_cons;
3112 rxr->rx_pg_prod = pg_prod;
3113 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3118 dma_unmap_page(&bp->pdev->dev, mapping_old,
3119 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3121 frag_size -= frag_len;
3122 skb->data_len += frag_len;
3123 skb->truesize += PAGE_SIZE;
3124 skb->len += frag_len;
3126 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3127 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3129 rxr->rx_pg_prod = pg_prod;
3130 rxr->rx_pg_cons = pg_cons;
3136 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3140 /* Tell compiler that status block fields can change. */
3142 cons = *bnapi->hw_rx_cons_ptr;
3144 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3150 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3152 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3153 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3154 struct l2_fhdr *rx_hdr;
3155 int rx_pkt = 0, pg_ring_used = 0;
3160 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3161 sw_cons = rxr->rx_cons;
3162 sw_prod = rxr->rx_prod;
3164 /* Memory barrier necessary as speculative reads of the rx
3165 * buffer can be ahead of the index in the status block
3168 while (sw_cons != hw_cons) {
3169 unsigned int len, hdr_len;
3171 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3172 struct sk_buff *skb;
3173 dma_addr_t dma_addr;
3177 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3178 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3180 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3181 data = rx_buf->data;
3182 rx_buf->data = NULL;
3184 rx_hdr = get_l2_fhdr(data);
3187 dma_addr = dma_unmap_addr(rx_buf, mapping);
3189 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3190 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3191 PCI_DMA_FROMDEVICE);
3193 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3194 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3195 prefetch(get_l2_fhdr(next_rx_buf->data));
3197 len = rx_hdr->l2_fhdr_pkt_len;
3198 status = rx_hdr->l2_fhdr_status;
3201 if (status & L2_FHDR_STATUS_SPLIT) {
3202 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3204 } else if (len > bp->rx_jumbo_thresh) {
3205 hdr_len = bp->rx_jumbo_thresh;
3209 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3210 L2_FHDR_ERRORS_PHY_DECODE |
3211 L2_FHDR_ERRORS_ALIGNMENT |
3212 L2_FHDR_ERRORS_TOO_SHORT |
3213 L2_FHDR_ERRORS_GIANT_FRAME))) {
3215 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3220 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3222 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3229 if (len <= bp->rx_copy_thresh) {
3230 skb = netdev_alloc_skb(bp->dev, len + 6);
3232 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3239 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3241 skb_reserve(skb, 6);
3244 bnx2_reuse_rx_data(bp, rxr, data,
3245 sw_ring_cons, sw_ring_prod);
3248 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3249 (sw_ring_cons << 16) | sw_ring_prod);
3253 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3254 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3255 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3257 skb->protocol = eth_type_trans(skb, bp->dev);
3259 if (len > (bp->dev->mtu + ETH_HLEN) &&
3260 skb->protocol != htons(0x8100) &&
3261 skb->protocol != htons(ETH_P_8021AD)) {
3268 skb_checksum_none_assert(skb);
3269 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3270 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3271 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3273 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3274 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3275 skb->ip_summed = CHECKSUM_UNNECESSARY;
3277 if ((bp->dev->features & NETIF_F_RXHASH) &&
3278 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3279 L2_FHDR_STATUS_USE_RXHASH))
3280 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3283 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3284 napi_gro_receive(&bnapi->napi, skb);
3288 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3289 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3291 if ((rx_pkt == budget))
3294 /* Refresh hw_cons to see if there is new work */
3295 if (sw_cons == hw_cons) {
3296 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3300 rxr->rx_cons = sw_cons;
3301 rxr->rx_prod = sw_prod;
3304 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3306 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3308 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3316 /* MSI ISR - The only difference between this and the INTx ISR
3317 * is that the MSI interrupt is always serviced.
3320 bnx2_msi(int irq, void *dev_instance)
3322 struct bnx2_napi *bnapi = dev_instance;
3323 struct bnx2 *bp = bnapi->bp;
3325 prefetch(bnapi->status_blk.msi);
3326 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3327 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3328 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3330 /* Return here if interrupt is disabled. */
3331 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3334 napi_schedule(&bnapi->napi);
3340 bnx2_msi_1shot(int irq, void *dev_instance)
3342 struct bnx2_napi *bnapi = dev_instance;
3343 struct bnx2 *bp = bnapi->bp;
3345 prefetch(bnapi->status_blk.msi);
3347 /* Return here if interrupt is disabled. */
3348 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3351 napi_schedule(&bnapi->napi);
3357 bnx2_interrupt(int irq, void *dev_instance)
3359 struct bnx2_napi *bnapi = dev_instance;
3360 struct bnx2 *bp = bnapi->bp;
3361 struct status_block *sblk = bnapi->status_blk.msi;
3363 /* When using INTx, it is possible for the interrupt to arrive
3364 * at the CPU before the status block posted prior to the
3365 * interrupt. Reading a register will flush the status block.
3366 * When using MSI, the MSI message will always complete after
3367 * the status block write.
3369 if ((sblk->status_idx == bnapi->last_status_idx) &&
3370 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3371 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3374 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3375 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3376 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3378 /* Read back to deassert IRQ immediately to avoid too many
3379 * spurious interrupts.
3381 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3383 /* Return here if interrupt is shared and is disabled. */
3384 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3387 if (napi_schedule_prep(&bnapi->napi)) {
3388 bnapi->last_status_idx = sblk->status_idx;
3389 __napi_schedule(&bnapi->napi);
3396 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3398 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3399 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3401 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3402 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3407 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3408 STATUS_ATTN_BITS_TIMER_ABORT)
3411 bnx2_has_work(struct bnx2_napi *bnapi)
3413 struct status_block *sblk = bnapi->status_blk.msi;
3415 if (bnx2_has_fast_work(bnapi))
3419 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3423 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3424 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3431 bnx2_chk_missed_msi(struct bnx2 *bp)
3433 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3436 if (bnx2_has_work(bnapi)) {
3437 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3438 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3441 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3442 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3443 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3444 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3445 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3449 bp->idle_chk_status_idx = bnapi->last_status_idx;
3453 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3455 struct cnic_ops *c_ops;
3457 if (!bnapi->cnic_present)
3461 c_ops = rcu_dereference(bp->cnic_ops);
3463 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3464 bnapi->status_blk.msi);
3469 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3471 struct status_block *sblk = bnapi->status_blk.msi;
3472 u32 status_attn_bits = sblk->status_attn_bits;
3473 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3475 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3476 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3478 bnx2_phy_int(bp, bnapi);
3480 /* This is needed to take care of transient status
3481 * during link changes.
3483 BNX2_WR(bp, BNX2_HC_COMMAND,
3484 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3485 BNX2_RD(bp, BNX2_HC_COMMAND);
3489 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3490 int work_done, int budget)
3492 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3493 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3495 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3496 bnx2_tx_int(bp, bnapi, 0);
3498 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3499 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3504 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3506 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3507 struct bnx2 *bp = bnapi->bp;
3509 struct status_block_msix *sblk = bnapi->status_blk.msix;
3512 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3513 if (unlikely(work_done >= budget))
3516 bnapi->last_status_idx = sblk->status_idx;
3517 /* status idx must be read before checking for more work. */
3519 if (likely(!bnx2_has_fast_work(bnapi))) {
3521 napi_complete(napi);
3522 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3523 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524 bnapi->last_status_idx);
3531 static int bnx2_poll(struct napi_struct *napi, int budget)
3533 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3534 struct bnx2 *bp = bnapi->bp;
3536 struct status_block *sblk = bnapi->status_blk.msi;
3539 bnx2_poll_link(bp, bnapi);
3541 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3544 bnx2_poll_cnic(bp, bnapi);
3547 /* bnapi->last_status_idx is used below to tell the hw how
3548 * much work has been processed, so we must read it before
3549 * checking for more work.
3551 bnapi->last_status_idx = sblk->status_idx;
3553 if (unlikely(work_done >= budget))
3557 if (likely(!bnx2_has_work(bnapi))) {
3558 napi_complete(napi);
3559 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3560 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562 bnapi->last_status_idx);
3565 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3566 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3567 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3568 bnapi->last_status_idx);
3570 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3571 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3572 bnapi->last_status_idx);
3580 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3581 * from set_multicast.
3584 bnx2_set_rx_mode(struct net_device *dev)
3586 struct bnx2 *bp = netdev_priv(dev);
3587 u32 rx_mode, sort_mode;
3588 struct netdev_hw_addr *ha;
3591 if (!netif_running(dev))
3594 spin_lock_bh(&bp->phy_lock);
3596 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3597 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3598 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3599 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3600 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3601 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3602 if (dev->flags & IFF_PROMISC) {
3603 /* Promiscuous mode. */
3604 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3605 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3606 BNX2_RPM_SORT_USER0_PROM_VLAN;
3608 else if (dev->flags & IFF_ALLMULTI) {
3609 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3610 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3613 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3616 /* Accept one or more multicast(s). */
3617 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3622 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3624 netdev_for_each_mc_addr(ha, dev) {
3625 crc = ether_crc_le(ETH_ALEN, ha->addr);
3627 regidx = (bit & 0xe0) >> 5;
3629 mc_filter[regidx] |= (1 << bit);
3632 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3633 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3637 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3640 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3641 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3642 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3643 BNX2_RPM_SORT_USER0_PROM_VLAN;
3644 } else if (!(dev->flags & IFF_PROMISC)) {
3645 /* Add all entries into to the match filter list */
3647 netdev_for_each_uc_addr(ha, dev) {
3648 bnx2_set_mac_addr(bp, ha->addr,
3649 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3651 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3657 if (rx_mode != bp->rx_mode) {
3658 bp->rx_mode = rx_mode;
3659 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3662 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3663 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3664 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3666 spin_unlock_bh(&bp->phy_lock);
3670 check_fw_section(const struct firmware *fw,
3671 const struct bnx2_fw_file_section *section,
3672 u32 alignment, bool non_empty)
3674 u32 offset = be32_to_cpu(section->offset);
3675 u32 len = be32_to_cpu(section->len);
3677 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3679 if ((non_empty && len == 0) || len > fw->size - offset ||
3680 len & (alignment - 1))
3686 check_mips_fw_entry(const struct firmware *fw,
3687 const struct bnx2_mips_fw_file_entry *entry)
3689 if (check_fw_section(fw, &entry->text, 4, true) ||
3690 check_fw_section(fw, &entry->data, 4, false) ||
3691 check_fw_section(fw, &entry->rodata, 4, false))
3696 static void bnx2_release_firmware(struct bnx2 *bp)
3698 if (bp->rv2p_firmware) {
3699 release_firmware(bp->mips_firmware);
3700 release_firmware(bp->rv2p_firmware);
3701 bp->rv2p_firmware = NULL;
3705 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3707 const char *mips_fw_file, *rv2p_fw_file;
3708 const struct bnx2_mips_fw_file *mips_fw;
3709 const struct bnx2_rv2p_fw_file *rv2p_fw;
3712 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3713 mips_fw_file = FW_MIPS_FILE_09;
3714 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3715 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3716 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3718 rv2p_fw_file = FW_RV2P_FILE_09;
3720 mips_fw_file = FW_MIPS_FILE_06;
3721 rv2p_fw_file = FW_RV2P_FILE_06;
3724 rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3726 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3730 rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3732 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3733 goto err_release_mips_firmware;
3735 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3736 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3737 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3738 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3739 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3740 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3741 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3742 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3743 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3745 goto err_release_firmware;
3747 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3748 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3749 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3750 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3752 goto err_release_firmware;
3757 err_release_firmware:
3758 release_firmware(bp->rv2p_firmware);
3759 bp->rv2p_firmware = NULL;
3760 err_release_mips_firmware:
3761 release_firmware(bp->mips_firmware);
3765 static int bnx2_request_firmware(struct bnx2 *bp)
3767 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3771 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3774 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3775 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3776 rv2p_code |= RV2P_BD_PAGE_SIZE;
3783 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3784 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3786 u32 rv2p_code_len, file_offset;
3791 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3792 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3794 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3796 if (rv2p_proc == RV2P_PROC1) {
3797 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3798 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3800 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3801 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3804 for (i = 0; i < rv2p_code_len; i += 8) {
3805 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3807 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3810 val = (i / 8) | cmd;
3811 BNX2_WR(bp, addr, val);
3814 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3815 for (i = 0; i < 8; i++) {
3818 loc = be32_to_cpu(fw_entry->fixup[i]);
3819 if (loc && ((loc * 4) < rv2p_code_len)) {
3820 code = be32_to_cpu(*(rv2p_code + loc - 1));
3821 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3822 code = be32_to_cpu(*(rv2p_code + loc));
3823 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3824 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3826 val = (loc / 2) | cmd;
3827 BNX2_WR(bp, addr, val);
3831 /* Reset the processor, un-stall is done later. */
3832 if (rv2p_proc == RV2P_PROC1) {
3833 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3836 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3843 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3844 const struct bnx2_mips_fw_file_entry *fw_entry)
3846 u32 addr, len, file_offset;
3852 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3853 val |= cpu_reg->mode_value_halt;
3854 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3855 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3857 /* Load the Text area. */
3858 addr = be32_to_cpu(fw_entry->text.addr);
3859 len = be32_to_cpu(fw_entry->text.len);
3860 file_offset = be32_to_cpu(fw_entry->text.offset);
3861 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3863 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867 for (j = 0; j < (len / 4); j++, offset += 4)
3868 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3871 /* Load the Data area. */
3872 addr = be32_to_cpu(fw_entry->data.addr);
3873 len = be32_to_cpu(fw_entry->data.len);
3874 file_offset = be32_to_cpu(fw_entry->data.offset);
3875 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3877 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3881 for (j = 0; j < (len / 4); j++, offset += 4)
3882 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3885 /* Load the Read-Only area. */
3886 addr = be32_to_cpu(fw_entry->rodata.addr);
3887 len = be32_to_cpu(fw_entry->rodata.len);
3888 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3889 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3891 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3895 for (j = 0; j < (len / 4); j++, offset += 4)
3896 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3899 /* Clear the pre-fetch instruction. */
3900 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3902 val = be32_to_cpu(fw_entry->start_addr);
3903 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3905 /* Start the CPU. */
3906 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3907 val &= ~cpu_reg->mode_value_halt;
3908 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3909 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3915 bnx2_init_cpus(struct bnx2 *bp)
3917 const struct bnx2_mips_fw_file *mips_fw =
3918 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3919 const struct bnx2_rv2p_fw_file *rv2p_fw =
3920 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3923 /* Initialize the RV2P processor. */
3924 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3925 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3927 /* Initialize the RX Processor. */
3928 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3932 /* Initialize the TX Processor. */
3933 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3937 /* Initialize the TX Patch-up Processor. */
3938 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3942 /* Initialize the Completion Processor. */
3943 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3947 /* Initialize the Command Processor. */
3948 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3955 bnx2_setup_wol(struct bnx2 *bp)
3964 autoneg = bp->autoneg;
3965 advertising = bp->advertising;
3967 if (bp->phy_port == PORT_TP) {
3968 bp->autoneg = AUTONEG_SPEED;
3969 bp->advertising = ADVERTISED_10baseT_Half |
3970 ADVERTISED_10baseT_Full |
3971 ADVERTISED_100baseT_Half |
3972 ADVERTISED_100baseT_Full |
3976 spin_lock_bh(&bp->phy_lock);
3977 bnx2_setup_phy(bp, bp->phy_port);
3978 spin_unlock_bh(&bp->phy_lock);
3980 bp->autoneg = autoneg;
3981 bp->advertising = advertising;
3983 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3985 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3987 /* Enable port mode. */
3988 val &= ~BNX2_EMAC_MODE_PORT;
3989 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3990 BNX2_EMAC_MODE_ACPI_RCVD |
3991 BNX2_EMAC_MODE_MPKT;
3992 if (bp->phy_port == PORT_TP) {
3993 val |= BNX2_EMAC_MODE_PORT_MII;
3995 val |= BNX2_EMAC_MODE_PORT_GMII;
3996 if (bp->line_speed == SPEED_2500)
3997 val |= BNX2_EMAC_MODE_25G_MODE;
4000 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4002 /* receive all multicast */
4003 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4004 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4007 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4009 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4010 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4011 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4012 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4014 /* Need to enable EMAC and RPM for WOL. */
4015 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4016 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4017 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4018 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4020 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4021 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4022 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4024 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4026 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4029 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4032 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4033 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4034 bnx2_fw_sync(bp, wol_msg, 1, 0);
4037 /* Tell firmware not to power down the PHY yet, otherwise
4038 * the chip will take a long time to respond to MMIO reads.
4040 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4041 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4042 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4043 bnx2_fw_sync(bp, wol_msg, 1, 0);
4044 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4050 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4056 pci_enable_wake(bp->pdev, PCI_D0, false);
4057 pci_set_power_state(bp->pdev, PCI_D0);
4059 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4060 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4061 val &= ~BNX2_EMAC_MODE_MPKT;
4062 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4064 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4065 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4066 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4071 pci_wake_from_d3(bp->pdev, bp->wol);
4072 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4073 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4076 pci_set_power_state(bp->pdev, PCI_D3hot);
4080 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4083 /* Tell firmware not to power down the PHY yet,
4084 * otherwise the other port may not respond to
4087 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4088 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4089 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4090 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4092 pci_set_power_state(bp->pdev, PCI_D3hot);
4094 /* No more memory access after this point until
4095 * device is brought back to D0.
4106 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4111 /* Request access to the flash interface. */
4112 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4113 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4114 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4115 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4121 if (j >= NVRAM_TIMEOUT_COUNT)
4128 bnx2_release_nvram_lock(struct bnx2 *bp)
4133 /* Relinquish nvram interface. */
4134 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4136 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4137 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4138 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4144 if (j >= NVRAM_TIMEOUT_COUNT)
4152 bnx2_enable_nvram_write(struct bnx2 *bp)
4156 val = BNX2_RD(bp, BNX2_MISC_CFG);
4157 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4159 if (bp->flash_info->flags & BNX2_NV_WREN) {
4162 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4163 BNX2_WR(bp, BNX2_NVM_COMMAND,
4164 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4166 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4169 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4170 if (val & BNX2_NVM_COMMAND_DONE)
4174 if (j >= NVRAM_TIMEOUT_COUNT)
4181 bnx2_disable_nvram_write(struct bnx2 *bp)
4185 val = BNX2_RD(bp, BNX2_MISC_CFG);
4186 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4191 bnx2_enable_nvram_access(struct bnx2 *bp)
4195 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4196 /* Enable both bits, even on read. */
4197 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4198 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4202 bnx2_disable_nvram_access(struct bnx2 *bp)
4206 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4207 /* Disable both bits, even after read. */
4208 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4209 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4210 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4214 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4219 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4220 /* Buffered flash, no erase needed */
4223 /* Build an erase command */
4224 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4225 BNX2_NVM_COMMAND_DOIT;
4227 /* Need to clear DONE bit separately. */
4228 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4230 /* Address of the NVRAM to read from. */
4231 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4233 /* Issue an erase command. */
4234 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4236 /* Wait for completion. */
4237 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4242 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4243 if (val & BNX2_NVM_COMMAND_DONE)
4247 if (j >= NVRAM_TIMEOUT_COUNT)
4254 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4259 /* Build the command word. */
4260 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4262 /* Calculate an offset of a buffered flash, not needed for 5709. */
4263 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4264 offset = ((offset / bp->flash_info->page_size) <<
4265 bp->flash_info->page_bits) +
4266 (offset % bp->flash_info->page_size);
4269 /* Need to clear DONE bit separately. */
4270 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4272 /* Address of the NVRAM to read from. */
4273 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4275 /* Issue a read command. */
4276 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4278 /* Wait for completion. */
4279 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4284 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4285 if (val & BNX2_NVM_COMMAND_DONE) {
4286 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4287 memcpy(ret_val, &v, 4);
4291 if (j >= NVRAM_TIMEOUT_COUNT)
4299 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4305 /* Build the command word. */
4306 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4308 /* Calculate an offset of a buffered flash, not needed for 5709. */
4309 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4310 offset = ((offset / bp->flash_info->page_size) <<
4311 bp->flash_info->page_bits) +
4312 (offset % bp->flash_info->page_size);
4315 /* Need to clear DONE bit separately. */
4316 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4318 memcpy(&val32, val, 4);
4320 /* Write the data. */
4321 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4323 /* Address of the NVRAM to write to. */
4324 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4326 /* Issue the write command. */
4327 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4329 /* Wait for completion. */
4330 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4333 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4336 if (j >= NVRAM_TIMEOUT_COUNT)
4343 bnx2_init_nvram(struct bnx2 *bp)
4346 int j, entry_count, rc = 0;
4347 const struct flash_spec *flash;
4349 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4350 bp->flash_info = &flash_5709;
4351 goto get_flash_size;
4354 /* Determine the selected interface. */
4355 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4357 entry_count = ARRAY_SIZE(flash_table);
4359 if (val & 0x40000000) {
4361 /* Flash interface has been reconfigured */
4362 for (j = 0, flash = &flash_table[0]; j < entry_count;
4364 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4365 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4366 bp->flash_info = flash;
4373 /* Not yet been reconfigured */
4375 if (val & (1 << 23))
4376 mask = FLASH_BACKUP_STRAP_MASK;
4378 mask = FLASH_STRAP_MASK;
4380 for (j = 0, flash = &flash_table[0]; j < entry_count;
4383 if ((val & mask) == (flash->strapping & mask)) {
4384 bp->flash_info = flash;
4386 /* Request access to the flash interface. */
4387 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4390 /* Enable access to flash interface */
4391 bnx2_enable_nvram_access(bp);
4393 /* Reconfigure the flash interface */
4394 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4395 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4396 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4397 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4399 /* Disable access to flash interface */
4400 bnx2_disable_nvram_access(bp);
4401 bnx2_release_nvram_lock(bp);
4406 } /* if (val & 0x40000000) */
4408 if (j == entry_count) {
4409 bp->flash_info = NULL;
4410 pr_alert("Unknown flash/EEPROM type\n");
4415 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4416 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4418 bp->flash_size = val;
4420 bp->flash_size = bp->flash_info->total_size;
4426 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4430 u32 cmd_flags, offset32, len32, extra;
4435 /* Request access to the flash interface. */
4436 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4439 /* Enable access to flash interface */
4440 bnx2_enable_nvram_access(bp);
4453 pre_len = 4 - (offset & 3);
4455 if (pre_len >= len32) {
4457 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4458 BNX2_NVM_COMMAND_LAST;
4461 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4464 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469 memcpy(ret_buf, buf + (offset & 3), pre_len);
4476 extra = 4 - (len32 & 3);
4477 len32 = (len32 + 4) & ~3;
4484 cmd_flags = BNX2_NVM_COMMAND_LAST;
4486 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4487 BNX2_NVM_COMMAND_LAST;
4489 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4491 memcpy(ret_buf, buf, 4 - extra);
4493 else if (len32 > 0) {
4496 /* Read the first word. */
4500 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4502 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4504 /* Advance to the next dword. */
4509 while (len32 > 4 && rc == 0) {
4510 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4512 /* Advance to the next dword. */
4521 cmd_flags = BNX2_NVM_COMMAND_LAST;
4522 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4524 memcpy(ret_buf, buf, 4 - extra);
4527 /* Disable access to flash interface */
4528 bnx2_disable_nvram_access(bp);
4530 bnx2_release_nvram_lock(bp);
4536 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4539 u32 written, offset32, len32;
4540 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4542 int align_start, align_end;
4547 align_start = align_end = 0;
4549 if ((align_start = (offset32 & 3))) {
4551 len32 += align_start;
4554 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4559 align_end = 4 - (len32 & 3);
4561 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4565 if (align_start || align_end) {
4566 align_buf = kmalloc(len32, GFP_KERNEL);
4567 if (align_buf == NULL)
4570 memcpy(align_buf, start, 4);
4573 memcpy(align_buf + len32 - 4, end, 4);
4575 memcpy(align_buf + align_start, data_buf, buf_size);
4579 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4580 flash_buffer = kmalloc(264, GFP_KERNEL);
4581 if (flash_buffer == NULL) {
4583 goto nvram_write_end;
4588 while ((written < len32) && (rc == 0)) {
4589 u32 page_start, page_end, data_start, data_end;
4590 u32 addr, cmd_flags;
4593 /* Find the page_start addr */
4594 page_start = offset32 + written;
4595 page_start -= (page_start % bp->flash_info->page_size);
4596 /* Find the page_end addr */
4597 page_end = page_start + bp->flash_info->page_size;
4598 /* Find the data_start addr */
4599 data_start = (written == 0) ? offset32 : page_start;
4600 /* Find the data_end addr */
4601 data_end = (page_end > offset32 + len32) ?
4602 (offset32 + len32) : page_end;
4604 /* Request access to the flash interface. */
4605 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4606 goto nvram_write_end;
4608 /* Enable access to flash interface */
4609 bnx2_enable_nvram_access(bp);
4611 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4612 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4615 /* Read the whole page into the buffer
4616 * (non-buffer flash only) */
4617 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4618 if (j == (bp->flash_info->page_size - 4)) {
4619 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4621 rc = bnx2_nvram_read_dword(bp,
4627 goto nvram_write_end;
4633 /* Enable writes to flash interface (unlock write-protect) */
4634 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4635 goto nvram_write_end;
4637 /* Loop to write back the buffer data from page_start to
4640 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4641 /* Erase the page */
4642 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4643 goto nvram_write_end;
4645 /* Re-enable the write again for the actual write */
4646 bnx2_enable_nvram_write(bp);
4648 for (addr = page_start; addr < data_start;
4649 addr += 4, i += 4) {
4651 rc = bnx2_nvram_write_dword(bp, addr,
4652 &flash_buffer[i], cmd_flags);
4655 goto nvram_write_end;
4661 /* Loop to write the new data from data_start to data_end */
4662 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4663 if ((addr == page_end - 4) ||
4664 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4665 (addr == data_end - 4))) {
4667 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4669 rc = bnx2_nvram_write_dword(bp, addr, buf,
4673 goto nvram_write_end;
4679 /* Loop to write back the buffer data from data_end
4681 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4682 for (addr = data_end; addr < page_end;
4683 addr += 4, i += 4) {
4685 if (addr == page_end-4) {
4686 cmd_flags = BNX2_NVM_COMMAND_LAST;
4688 rc = bnx2_nvram_write_dword(bp, addr,
4689 &flash_buffer[i], cmd_flags);
4692 goto nvram_write_end;
4698 /* Disable writes to flash interface (lock write-protect) */
4699 bnx2_disable_nvram_write(bp);
4701 /* Disable access to flash interface */
4702 bnx2_disable_nvram_access(bp);
4703 bnx2_release_nvram_lock(bp);
4705 /* Increment written */
4706 written += data_end - data_start;
4710 kfree(flash_buffer);
4716 bnx2_init_fw_cap(struct bnx2 *bp)
4720 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4721 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4723 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4724 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4726 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4727 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4730 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4731 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4732 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4735 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4736 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4739 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4741 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4742 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4743 bp->phy_port = PORT_FIBRE;
4745 bp->phy_port = PORT_TP;
4747 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4748 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4751 if (netif_running(bp->dev) && sig)
4752 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4756 bnx2_setup_msix_tbl(struct bnx2 *bp)
4758 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4760 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4761 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4765 bnx2_wait_dma_complete(struct bnx2 *bp)
4771 * Wait for the current PCI transaction to complete before
4774 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4775 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4776 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4777 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4778 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4779 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4780 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4781 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4784 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4785 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4786 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4787 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4789 for (i = 0; i < 100; i++) {
4791 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4792 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4802 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4808 /* Wait for the current PCI transaction to complete before
4809 * issuing a reset. */
4810 bnx2_wait_dma_complete(bp);
4812 /* Wait for the firmware to tell us it is ok to issue a reset. */
4813 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4815 /* Deposit a driver reset signature so the firmware knows that
4816 * this is a soft reset. */
4817 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4818 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4820 /* Do a dummy read to force the chip to complete all current transaction
4821 * before we issue a reset. */
4822 val = BNX2_RD(bp, BNX2_MISC_ID);
4824 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4825 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4826 BNX2_RD(bp, BNX2_MISC_COMMAND);
4829 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4830 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4832 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4835 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4836 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4837 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4840 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4842 /* Reading back any register after chip reset will hang the
4843 * bus on 5706 A0 and A1. The msleep below provides plenty
4844 * of margin for write posting.
4846 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4847 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4850 /* Reset takes approximate 30 usec */
4851 for (i = 0; i < 10; i++) {
4852 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4853 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4854 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4859 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4860 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4861 pr_err("Chip reset did not complete\n");
4866 /* Make sure byte swapping is properly configured. */
4867 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4868 if (val != 0x01020304) {
4869 pr_err("Chip not in correct endian mode\n");
4873 /* Wait for the firmware to finish its initialization. */
4874 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4878 spin_lock_bh(&bp->phy_lock);
4879 old_port = bp->phy_port;
4880 bnx2_init_fw_cap(bp);
4881 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4882 old_port != bp->phy_port)
4883 bnx2_set_default_remote_link(bp);
4884 spin_unlock_bh(&bp->phy_lock);
4886 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4887 /* Adjust the voltage regular to two steps lower. The default
4888 * of this register is 0x0000000e. */
4889 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4891 /* Remove bad rbuf memory from the free pool. */
4892 rc = bnx2_alloc_bad_rbuf(bp);
4895 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4896 bnx2_setup_msix_tbl(bp);
4897 /* Prevent MSIX table reads and write from timing out */
4898 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4899 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4906 bnx2_init_chip(struct bnx2 *bp)
4911 /* Make sure the interrupt is not active. */
4912 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4914 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4915 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4917 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4919 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4920 DMA_READ_CHANS << 12 |
4921 DMA_WRITE_CHANS << 16;
4923 val |= (0x2 << 20) | (1 << 11);
4925 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4928 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4929 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4930 !(bp->flags & BNX2_FLAG_PCIX))
4931 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4933 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4935 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4936 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4937 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4938 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4941 if (bp->flags & BNX2_FLAG_PCIX) {
4944 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4946 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4947 val16 & ~PCI_X_CMD_ERO);
4950 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4951 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4952 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4953 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4955 /* Initialize context mapping and zero out the quick contexts. The
4956 * context block must have already been enabled. */
4957 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4958 rc = bnx2_init_5709_context(bp);
4962 bnx2_init_context(bp);
4964 if ((rc = bnx2_init_cpus(bp)) != 0)
4967 bnx2_init_nvram(bp);
4969 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4971 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4972 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4973 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4974 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4975 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4976 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4977 val |= BNX2_MQ_CONFIG_HALT_DIS;
4980 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4982 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4983 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4984 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4986 val = (BNX2_PAGE_BITS - 8) << 24;
4987 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4989 /* Configure page size. */
4990 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4991 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4992 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4993 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4995 val = bp->mac_addr[0] +
4996 (bp->mac_addr[1] << 8) +
4997 (bp->mac_addr[2] << 16) +
4999 (bp->mac_addr[4] << 8) +
5000 (bp->mac_addr[5] << 16);
5001 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
5003 /* Program the MTU. Also include 4 bytes for CRC32. */
5005 val = mtu + ETH_HLEN + ETH_FCS_LEN;
5006 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
5007 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5008 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5013 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5014 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5015 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5017 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5018 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5019 bp->bnx2_napi[i].last_status_idx = 0;
5021 bp->idle_chk_status_idx = 0xffff;
5023 /* Set up how to generate a link change interrupt. */
5024 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5026 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5027 (u64) bp->status_blk_mapping & 0xffffffff);
5028 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5030 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5031 (u64) bp->stats_blk_mapping & 0xffffffff);
5032 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5033 (u64) bp->stats_blk_mapping >> 32);
5035 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5036 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5038 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5039 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5041 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5042 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5044 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5046 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5048 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5049 (bp->com_ticks_int << 16) | bp->com_ticks);
5051 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5052 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5054 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5055 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5057 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5058 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5060 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5061 val = BNX2_HC_CONFIG_COLLECT_STATS;
5063 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5064 BNX2_HC_CONFIG_COLLECT_STATS;
5067 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5068 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5069 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5071 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5074 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5075 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5077 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5079 if (bp->rx_ticks < 25)
5080 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5082 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5084 for (i = 1; i < bp->irq_nvecs; i++) {
5085 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5086 BNX2_HC_SB_CONFIG_1;
5089 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5090 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5091 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5093 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5094 (bp->tx_quick_cons_trip_int << 16) |
5095 bp->tx_quick_cons_trip);
5097 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5098 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5100 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5101 (bp->rx_quick_cons_trip_int << 16) |
5102 bp->rx_quick_cons_trip);
5104 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5105 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5108 /* Clear internal stats counters. */
5109 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5111 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5113 /* Initialize the receive filter. */
5114 bnx2_set_rx_mode(bp->dev);
5116 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5117 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5118 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5119 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5121 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5124 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5125 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5129 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5135 bnx2_clear_ring_states(struct bnx2 *bp)
5137 struct bnx2_napi *bnapi;
5138 struct bnx2_tx_ring_info *txr;
5139 struct bnx2_rx_ring_info *rxr;
5142 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5143 bnapi = &bp->bnx2_napi[i];
5144 txr = &bnapi->tx_ring;
5145 rxr = &bnapi->rx_ring;
5148 txr->hw_tx_cons = 0;
5149 rxr->rx_prod_bseq = 0;
5152 rxr->rx_pg_prod = 0;
5153 rxr->rx_pg_cons = 0;
5158 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5160 u32 val, offset0, offset1, offset2, offset3;
5161 u32 cid_addr = GET_CID_ADDR(cid);
5163 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5164 offset0 = BNX2_L2CTX_TYPE_XI;
5165 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5166 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5167 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5169 offset0 = BNX2_L2CTX_TYPE;
5170 offset1 = BNX2_L2CTX_CMD_TYPE;
5171 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5172 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5174 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5175 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5177 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5178 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5180 val = (u64) txr->tx_desc_mapping >> 32;
5181 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5183 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5184 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5188 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5190 struct bnx2_tx_bd *txbd;
5192 struct bnx2_napi *bnapi;
5193 struct bnx2_tx_ring_info *txr;
5195 bnapi = &bp->bnx2_napi[ring_num];
5196 txr = &bnapi->tx_ring;
5201 cid = TX_TSS_CID + ring_num - 1;
5203 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5205 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5207 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5208 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5211 txr->tx_prod_bseq = 0;
5213 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5214 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5216 bnx2_init_tx_context(bp, cid, txr);
5220 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5221 u32 buf_size, int num_rings)
5224 struct bnx2_rx_bd *rxbd;
5226 for (i = 0; i < num_rings; i++) {
5229 rxbd = &rx_ring[i][0];
5230 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5231 rxbd->rx_bd_len = buf_size;
5232 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5234 if (i == (num_rings - 1))
5238 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5239 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5244 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5247 u16 prod, ring_prod;
5248 u32 cid, rx_cid_addr, val;
5249 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5250 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5255 cid = RX_RSS_CID + ring_num - 1;
5257 rx_cid_addr = GET_CID_ADDR(cid);
5259 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5260 bp->rx_buf_use_size, bp->rx_max_ring);
5262 bnx2_init_rx_context(bp, cid);
5264 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5265 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5266 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5269 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5270 if (bp->rx_pg_ring_size) {
5271 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5272 rxr->rx_pg_desc_mapping,
5273 PAGE_SIZE, bp->rx_max_pg_ring);
5274 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5276 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5277 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5279 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5280 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5282 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5283 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5285 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5286 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5289 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5290 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5292 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5293 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5295 ring_prod = prod = rxr->rx_pg_prod;
5296 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5297 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5298 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5299 ring_num, i, bp->rx_pg_ring_size);
5302 prod = BNX2_NEXT_RX_BD(prod);
5303 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5305 rxr->rx_pg_prod = prod;
5307 ring_prod = prod = rxr->rx_prod;
5308 for (i = 0; i < bp->rx_ring_size; i++) {
5309 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5310 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5311 ring_num, i, bp->rx_ring_size);
5314 prod = BNX2_NEXT_RX_BD(prod);
5315 ring_prod = BNX2_RX_RING_IDX(prod);
5317 rxr->rx_prod = prod;
5319 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5320 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5321 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5323 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5324 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5326 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5330 bnx2_init_all_rings(struct bnx2 *bp)
5335 bnx2_clear_ring_states(bp);
5337 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5338 for (i = 0; i < bp->num_tx_rings; i++)
5339 bnx2_init_tx_ring(bp, i);
5341 if (bp->num_tx_rings > 1)
5342 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5345 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5346 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5348 for (i = 0; i < bp->num_rx_rings; i++)
5349 bnx2_init_rx_ring(bp, i);
5351 if (bp->num_rx_rings > 1) {
5354 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5355 int shift = (i % 8) << 2;
5357 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5359 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5360 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5361 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5362 BNX2_RLUP_RSS_COMMAND_WRITE |
5363 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5368 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5369 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5371 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5376 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5378 u32 max, num_rings = 1;
5380 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5381 ring_size -= BNX2_MAX_RX_DESC_CNT;
5384 /* round to next power of 2 */
5386 while ((max & num_rings) == 0)
5389 if (num_rings != max)
5396 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5398 u32 rx_size, rx_space, jumbo_size;
5400 /* 8 for CRC and VLAN */
5401 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5403 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5404 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5406 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5407 bp->rx_pg_ring_size = 0;
5408 bp->rx_max_pg_ring = 0;
5409 bp->rx_max_pg_ring_idx = 0;
5410 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5411 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5413 jumbo_size = size * pages;
5414 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5415 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5417 bp->rx_pg_ring_size = jumbo_size;
5418 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5419 BNX2_MAX_RX_PG_RINGS);
5420 bp->rx_max_pg_ring_idx =
5421 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5422 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5423 bp->rx_copy_thresh = 0;
5426 bp->rx_buf_use_size = rx_size;
5427 /* hw alignment + build_skb() overhead*/
5428 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5429 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5430 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5431 bp->rx_ring_size = size;
5432 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5433 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5437 bnx2_free_tx_skbs(struct bnx2 *bp)
5441 for (i = 0; i < bp->num_tx_rings; i++) {
5442 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5443 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5446 if (txr->tx_buf_ring == NULL)
5449 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5450 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5451 struct sk_buff *skb = tx_buf->skb;
5455 j = BNX2_NEXT_TX_BD(j);
5459 dma_unmap_single(&bp->pdev->dev,
5460 dma_unmap_addr(tx_buf, mapping),
5466 last = tx_buf->nr_frags;
5467 j = BNX2_NEXT_TX_BD(j);
5468 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5469 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5470 dma_unmap_page(&bp->pdev->dev,
5471 dma_unmap_addr(tx_buf, mapping),
5472 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5477 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5482 bnx2_free_rx_skbs(struct bnx2 *bp)
5486 for (i = 0; i < bp->num_rx_rings; i++) {
5487 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5488 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5491 if (rxr->rx_buf_ring == NULL)
5494 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5495 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5496 u8 *data = rx_buf->data;
5501 dma_unmap_single(&bp->pdev->dev,
5502 dma_unmap_addr(rx_buf, mapping),
5503 bp->rx_buf_use_size,
5504 PCI_DMA_FROMDEVICE);
5506 rx_buf->data = NULL;
5510 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5511 bnx2_free_rx_page(bp, rxr, j);
5516 bnx2_free_skbs(struct bnx2 *bp)
5518 bnx2_free_tx_skbs(bp);
5519 bnx2_free_rx_skbs(bp);
5523 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5527 rc = bnx2_reset_chip(bp, reset_code);
5532 if ((rc = bnx2_init_chip(bp)) != 0)
5535 bnx2_init_all_rings(bp);
5540 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5544 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5547 spin_lock_bh(&bp->phy_lock);
5548 bnx2_init_phy(bp, reset_phy);
5550 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5551 bnx2_remote_phy_event(bp);
5552 spin_unlock_bh(&bp->phy_lock);
5557 bnx2_shutdown_chip(struct bnx2 *bp)
5561 if (bp->flags & BNX2_FLAG_NO_WOL)
5562 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5564 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5566 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5568 return bnx2_reset_chip(bp, reset_code);
5572 bnx2_test_registers(struct bnx2 *bp)
5576 static const struct {
5579 #define BNX2_FL_NOT_5709 1
5583 { 0x006c, 0, 0x00000000, 0x0000003f },
5584 { 0x0090, 0, 0xffffffff, 0x00000000 },
5585 { 0x0094, 0, 0x00000000, 0x00000000 },
5587 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5588 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5590 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5591 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5592 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5593 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5594 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5595 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5597 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5598 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5599 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5600 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5601 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5602 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5604 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5605 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5606 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5608 { 0x1000, 0, 0x00000000, 0x00000001 },
5609 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5611 { 0x1408, 0, 0x01c00800, 0x00000000 },
5612 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5613 { 0x14a8, 0, 0x00000000, 0x000001ff },
5614 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5615 { 0x14b0, 0, 0x00000002, 0x00000001 },
5616 { 0x14b8, 0, 0x00000000, 0x00000000 },
5617 { 0x14c0, 0, 0x00000000, 0x00000009 },
5618 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5619 { 0x14cc, 0, 0x00000000, 0x00000001 },
5620 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5622 { 0x1800, 0, 0x00000000, 0x00000001 },
5623 { 0x1804, 0, 0x00000000, 0x00000003 },
5625 { 0x2800, 0, 0x00000000, 0x00000001 },
5626 { 0x2804, 0, 0x00000000, 0x00003f01 },
5627 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5628 { 0x2810, 0, 0xffff0000, 0x00000000 },
5629 { 0x2814, 0, 0xffff0000, 0x00000000 },
5630 { 0x2818, 0, 0xffff0000, 0x00000000 },
5631 { 0x281c, 0, 0xffff0000, 0x00000000 },
5632 { 0x2834, 0, 0xffffffff, 0x00000000 },
5633 { 0x2840, 0, 0x00000000, 0xffffffff },
5634 { 0x2844, 0, 0x00000000, 0xffffffff },
5635 { 0x2848, 0, 0xffffffff, 0x00000000 },
5636 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5638 { 0x2c00, 0, 0x00000000, 0x00000011 },
5639 { 0x2c04, 0, 0x00000000, 0x00030007 },
5641 { 0x3c00, 0, 0x00000000, 0x00000001 },
5642 { 0x3c04, 0, 0x00000000, 0x00070000 },
5643 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5644 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5645 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5646 { 0x3c14, 0, 0x00000000, 0xffffffff },
5647 { 0x3c18, 0, 0x00000000, 0xffffffff },
5648 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5649 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5651 { 0x5004, 0, 0x00000000, 0x0000007f },
5652 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5654 { 0x5c00, 0, 0x00000000, 0x00000001 },
5655 { 0x5c04, 0, 0x00000000, 0x0003000f },
5656 { 0x5c08, 0, 0x00000003, 0x00000000 },
5657 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5658 { 0x5c10, 0, 0x00000000, 0xffffffff },
5659 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5660 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5661 { 0x5c88, 0, 0x00000000, 0x00077373 },
5662 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5664 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5665 { 0x680c, 0, 0xffffffff, 0x00000000 },
5666 { 0x6810, 0, 0xffffffff, 0x00000000 },
5667 { 0x6814, 0, 0xffffffff, 0x00000000 },
5668 { 0x6818, 0, 0xffffffff, 0x00000000 },
5669 { 0x681c, 0, 0xffffffff, 0x00000000 },
5670 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5671 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5672 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5673 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5674 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5675 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5676 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5677 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5678 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5679 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5680 { 0x684c, 0, 0xffffffff, 0x00000000 },
5681 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5682 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5683 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5684 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5685 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5686 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5688 { 0xffff, 0, 0x00000000, 0x00000000 },
5693 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5696 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5697 u32 offset, rw_mask, ro_mask, save_val, val;
5698 u16 flags = reg_tbl[i].flags;
5700 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5703 offset = (u32) reg_tbl[i].offset;
5704 rw_mask = reg_tbl[i].rw_mask;
5705 ro_mask = reg_tbl[i].ro_mask;
5707 save_val = readl(bp->regview + offset);
5709 writel(0, bp->regview + offset);
5711 val = readl(bp->regview + offset);
5712 if ((val & rw_mask) != 0) {
5716 if ((val & ro_mask) != (save_val & ro_mask)) {
5720 writel(0xffffffff, bp->regview + offset);
5722 val = readl(bp->regview + offset);
5723 if ((val & rw_mask) != rw_mask) {
5727 if ((val & ro_mask) != (save_val & ro_mask)) {
5731 writel(save_val, bp->regview + offset);
5735 writel(save_val, bp->regview + offset);
5743 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5745 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5746 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5749 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5752 for (offset = 0; offset < size; offset += 4) {
5754 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5756 if (bnx2_reg_rd_ind(bp, start + offset) !=
5766 bnx2_test_memory(struct bnx2 *bp)
5770 static struct mem_entry {
5773 } mem_tbl_5706[] = {
5774 { 0x60000, 0x4000 },
5775 { 0xa0000, 0x3000 },
5776 { 0xe0000, 0x4000 },
5777 { 0x120000, 0x4000 },
5778 { 0x1a0000, 0x4000 },
5779 { 0x160000, 0x4000 },
5783 { 0x60000, 0x4000 },
5784 { 0xa0000, 0x3000 },
5785 { 0xe0000, 0x4000 },
5786 { 0x120000, 0x4000 },
5787 { 0x1a0000, 0x4000 },
5790 struct mem_entry *mem_tbl;
5792 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5793 mem_tbl = mem_tbl_5709;
5795 mem_tbl = mem_tbl_5706;
5797 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5798 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5799 mem_tbl[i].len)) != 0) {
5807 #define BNX2_MAC_LOOPBACK 0
5808 #define BNX2_PHY_LOOPBACK 1
5811 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5813 unsigned int pkt_size, num_pkts, i;
5814 struct sk_buff *skb;
5816 unsigned char *packet;
5817 u16 rx_start_idx, rx_idx;
5819 struct bnx2_tx_bd *txbd;
5820 struct bnx2_sw_bd *rx_buf;
5821 struct l2_fhdr *rx_hdr;
5823 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5824 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5825 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5829 txr = &tx_napi->tx_ring;
5830 rxr = &bnapi->rx_ring;
5831 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5832 bp->loopback = MAC_LOOPBACK;
5833 bnx2_set_mac_loopback(bp);
5835 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5836 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5839 bp->loopback = PHY_LOOPBACK;
5840 bnx2_set_phy_loopback(bp);
5845 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5846 skb = netdev_alloc_skb(bp->dev, pkt_size);
5849 packet = skb_put(skb, pkt_size);
5850 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5851 memset(packet + ETH_ALEN, 0x0, 8);
5852 for (i = 14; i < pkt_size; i++)
5853 packet[i] = (unsigned char) (i & 0xff);
5855 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5857 if (dma_mapping_error(&bp->pdev->dev, map)) {
5862 BNX2_WR(bp, BNX2_HC_COMMAND,
5863 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5865 BNX2_RD(bp, BNX2_HC_COMMAND);
5868 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5872 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5874 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5875 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5876 txbd->tx_bd_mss_nbytes = pkt_size;
5877 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5880 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5881 txr->tx_prod_bseq += pkt_size;
5883 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5884 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5888 BNX2_WR(bp, BNX2_HC_COMMAND,
5889 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5891 BNX2_RD(bp, BNX2_HC_COMMAND);
5895 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5898 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5899 goto loopback_test_done;
5901 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5902 if (rx_idx != rx_start_idx + num_pkts) {
5903 goto loopback_test_done;
5906 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5907 data = rx_buf->data;
5909 rx_hdr = get_l2_fhdr(data);
5910 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5912 dma_sync_single_for_cpu(&bp->pdev->dev,
5913 dma_unmap_addr(rx_buf, mapping),
5914 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5916 if (rx_hdr->l2_fhdr_status &
5917 (L2_FHDR_ERRORS_BAD_CRC |
5918 L2_FHDR_ERRORS_PHY_DECODE |
5919 L2_FHDR_ERRORS_ALIGNMENT |
5920 L2_FHDR_ERRORS_TOO_SHORT |
5921 L2_FHDR_ERRORS_GIANT_FRAME)) {
5923 goto loopback_test_done;
5926 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5927 goto loopback_test_done;
5930 for (i = 14; i < pkt_size; i++) {
5931 if (*(data + i) != (unsigned char) (i & 0xff)) {
5932 goto loopback_test_done;
5943 #define BNX2_MAC_LOOPBACK_FAILED 1
5944 #define BNX2_PHY_LOOPBACK_FAILED 2
5945 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5946 BNX2_PHY_LOOPBACK_FAILED)
5949 bnx2_test_loopback(struct bnx2 *bp)
5953 if (!netif_running(bp->dev))
5954 return BNX2_LOOPBACK_FAILED;
5956 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5957 spin_lock_bh(&bp->phy_lock);
5958 bnx2_init_phy(bp, 1);
5959 spin_unlock_bh(&bp->phy_lock);
5960 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5961 rc |= BNX2_MAC_LOOPBACK_FAILED;
5962 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5963 rc |= BNX2_PHY_LOOPBACK_FAILED;
5967 #define NVRAM_SIZE 0x200
5968 #define CRC32_RESIDUAL 0xdebb20e3
5971 bnx2_test_nvram(struct bnx2 *bp)
5973 __be32 buf[NVRAM_SIZE / 4];
5974 u8 *data = (u8 *) buf;
5978 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5979 goto test_nvram_done;
5981 magic = be32_to_cpu(buf[0]);
5982 if (magic != 0x669955aa) {
5984 goto test_nvram_done;
5987 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5988 goto test_nvram_done;
5990 csum = ether_crc_le(0x100, data);
5991 if (csum != CRC32_RESIDUAL) {
5993 goto test_nvram_done;
5996 csum = ether_crc_le(0x100, data + 0x100);
5997 if (csum != CRC32_RESIDUAL) {
6006 bnx2_test_link(struct bnx2 *bp)
6010 if (!netif_running(bp->dev))
6013 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6018 spin_lock_bh(&bp->phy_lock);
6019 bnx2_enable_bmsr1(bp);
6020 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6021 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6022 bnx2_disable_bmsr1(bp);
6023 spin_unlock_bh(&bp->phy_lock);
6025 if (bmsr & BMSR_LSTATUS) {
6032 bnx2_test_intr(struct bnx2 *bp)
6037 if (!netif_running(bp->dev))
6040 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6042 /* This register is not touched during run-time. */
6043 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6044 BNX2_RD(bp, BNX2_HC_COMMAND);
6046 for (i = 0; i < 10; i++) {
6047 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6053 msleep_interruptible(10);
6061 /* Determining link for parallel detection. */
6063 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6065 u32 mode_ctl, an_dbg, exp;
6067 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6070 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6071 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6073 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6076 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6077 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6078 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6080 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6083 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6084 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6085 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6087 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6094 bnx2_5706_serdes_timer(struct bnx2 *bp)
6098 spin_lock(&bp->phy_lock);
6099 if (bp->serdes_an_pending) {
6100 bp->serdes_an_pending--;
6102 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6105 bp->current_interval = BNX2_TIMER_INTERVAL;
6107 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6109 if (bmcr & BMCR_ANENABLE) {
6110 if (bnx2_5706_serdes_has_link(bp)) {
6111 bmcr &= ~BMCR_ANENABLE;
6112 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6113 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6114 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6118 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6119 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6122 bnx2_write_phy(bp, 0x17, 0x0f01);
6123 bnx2_read_phy(bp, 0x15, &phy2);
6127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6128 bmcr |= BMCR_ANENABLE;
6129 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6131 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6134 bp->current_interval = BNX2_TIMER_INTERVAL;
6139 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6140 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6141 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6143 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6144 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6145 bnx2_5706s_force_link_dn(bp, 1);
6146 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6149 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6152 spin_unlock(&bp->phy_lock);
6156 bnx2_5708_serdes_timer(struct bnx2 *bp)
6158 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6161 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6162 bp->serdes_an_pending = 0;
6166 spin_lock(&bp->phy_lock);
6167 if (bp->serdes_an_pending)
6168 bp->serdes_an_pending--;
6169 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6172 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6173 if (bmcr & BMCR_ANENABLE) {
6174 bnx2_enable_forced_2g5(bp);
6175 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6177 bnx2_disable_forced_2g5(bp);
6178 bp->serdes_an_pending = 2;
6179 bp->current_interval = BNX2_TIMER_INTERVAL;
6183 bp->current_interval = BNX2_TIMER_INTERVAL;
6185 spin_unlock(&bp->phy_lock);
6189 bnx2_timer(unsigned long data)
6191 struct bnx2 *bp = (struct bnx2 *) data;
6193 if (!netif_running(bp->dev))
6196 if (atomic_read(&bp->intr_sem) != 0)
6197 goto bnx2_restart_timer;
6199 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6200 BNX2_FLAG_USING_MSI)
6201 bnx2_chk_missed_msi(bp);
6203 bnx2_send_heart_beat(bp);
6205 bp->stats_blk->stat_FwRxDrop =
6206 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6208 /* workaround occasional corrupted counters */
6209 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6210 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6211 BNX2_HC_COMMAND_STATS_NOW);
6213 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6214 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6215 bnx2_5706_serdes_timer(bp);
6217 bnx2_5708_serdes_timer(bp);
6221 mod_timer(&bp->timer, jiffies + bp->current_interval);
6225 bnx2_request_irq(struct bnx2 *bp)
6227 unsigned long flags;
6228 struct bnx2_irq *irq;
6231 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6234 flags = IRQF_SHARED;
6236 for (i = 0; i < bp->irq_nvecs; i++) {
6237 irq = &bp->irq_tbl[i];
6238 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6248 __bnx2_free_irq(struct bnx2 *bp)
6250 struct bnx2_irq *irq;
6253 for (i = 0; i < bp->irq_nvecs; i++) {
6254 irq = &bp->irq_tbl[i];
6256 free_irq(irq->vector, &bp->bnx2_napi[i]);
6262 bnx2_free_irq(struct bnx2 *bp)
6265 __bnx2_free_irq(bp);
6266 if (bp->flags & BNX2_FLAG_USING_MSI)
6267 pci_disable_msi(bp->pdev);
6268 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6269 pci_disable_msix(bp->pdev);
6271 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6275 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6278 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6279 struct net_device *dev = bp->dev;
6280 const int len = sizeof(bp->irq_tbl[0].name);
6282 bnx2_setup_msix_tbl(bp);
6283 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6284 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6285 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6287 /* Need to flush the previous three writes to ensure MSI-X
6288 * is setup properly */
6289 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6291 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6292 msix_ent[i].entry = i;
6293 msix_ent[i].vector = 0;
6296 total_vecs = msix_vecs;
6300 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6301 BNX2_MIN_MSIX_VEC, total_vecs);
6305 msix_vecs = total_vecs;
6309 bp->irq_nvecs = msix_vecs;
6310 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6311 for (i = 0; i < total_vecs; i++) {
6312 bp->irq_tbl[i].vector = msix_ent[i].vector;
6313 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6314 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6319 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6321 int cpus = netif_get_num_default_rss_queues();
6324 if (!bp->num_req_rx_rings)
6325 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6326 else if (!bp->num_req_tx_rings)
6327 msix_vecs = max(cpus, bp->num_req_rx_rings);
6329 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6331 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6333 bp->irq_tbl[0].handler = bnx2_interrupt;
6334 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6336 bp->irq_tbl[0].vector = bp->pdev->irq;
6338 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6339 bnx2_enable_msix(bp, msix_vecs);
6341 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6342 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6343 if (pci_enable_msi(bp->pdev) == 0) {
6344 bp->flags |= BNX2_FLAG_USING_MSI;
6345 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6346 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6347 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6349 bp->irq_tbl[0].handler = bnx2_msi;
6351 bp->irq_tbl[0].vector = bp->pdev->irq;
6355 if (!bp->num_req_tx_rings)
6356 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6358 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6360 if (!bp->num_req_rx_rings)
6361 bp->num_rx_rings = bp->irq_nvecs;
6363 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6365 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6367 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6370 /* Called with rtnl_lock */
6372 bnx2_open(struct net_device *dev)
6374 struct bnx2 *bp = netdev_priv(dev);
6377 rc = bnx2_request_firmware(bp);
6381 netif_carrier_off(dev);
6383 bnx2_disable_int(bp);
6385 rc = bnx2_setup_int_mode(bp, disable_msi);
6389 bnx2_napi_enable(bp);
6390 rc = bnx2_alloc_mem(bp);
6394 rc = bnx2_request_irq(bp);
6398 rc = bnx2_init_nic(bp, 1);
6402 mod_timer(&bp->timer, jiffies + bp->current_interval);
6404 atomic_set(&bp->intr_sem, 0);
6406 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6408 bnx2_enable_int(bp);
6410 if (bp->flags & BNX2_FLAG_USING_MSI) {
6411 /* Test MSI to make sure it is working
6412 * If MSI test fails, go back to INTx mode
6414 if (bnx2_test_intr(bp) != 0) {
6415 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6417 bnx2_disable_int(bp);
6420 bnx2_setup_int_mode(bp, 1);
6422 rc = bnx2_init_nic(bp, 0);
6425 rc = bnx2_request_irq(bp);
6428 del_timer_sync(&bp->timer);
6431 bnx2_enable_int(bp);
6434 if (bp->flags & BNX2_FLAG_USING_MSI)
6435 netdev_info(dev, "using MSI\n");
6436 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6437 netdev_info(dev, "using MSIX\n");
6439 netif_tx_start_all_queues(dev);
6444 bnx2_napi_disable(bp);
6449 bnx2_release_firmware(bp);
6454 bnx2_reset_task(struct work_struct *work)
6456 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6461 if (!netif_running(bp->dev)) {
6466 bnx2_netif_stop(bp, true);
6468 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6469 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6470 /* in case PCI block has reset */
6471 pci_restore_state(bp->pdev);
6472 pci_save_state(bp->pdev);
6474 rc = bnx2_init_nic(bp, 1);
6476 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6477 bnx2_napi_enable(bp);
6483 atomic_set(&bp->intr_sem, 1);
6484 bnx2_netif_start(bp, true);
6488 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6491 bnx2_dump_ftq(struct bnx2 *bp)
6494 u32 reg, bdidx, cid, valid;
6495 struct net_device *dev = bp->dev;
6496 static const struct ftq_reg {
6500 BNX2_FTQ_ENTRY(RV2P_P),
6501 BNX2_FTQ_ENTRY(RV2P_T),
6502 BNX2_FTQ_ENTRY(RV2P_M),
6503 BNX2_FTQ_ENTRY(TBDR_),
6504 BNX2_FTQ_ENTRY(TDMA_),
6505 BNX2_FTQ_ENTRY(TXP_),
6506 BNX2_FTQ_ENTRY(TXP_),
6507 BNX2_FTQ_ENTRY(TPAT_),
6508 BNX2_FTQ_ENTRY(RXP_C),
6509 BNX2_FTQ_ENTRY(RXP_),
6510 BNX2_FTQ_ENTRY(COM_COMXQ_),
6511 BNX2_FTQ_ENTRY(COM_COMTQ_),
6512 BNX2_FTQ_ENTRY(COM_COMQ_),
6513 BNX2_FTQ_ENTRY(CP_CPQ_),
6516 netdev_err(dev, "<--- start FTQ dump --->\n");
6517 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6518 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6519 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6521 netdev_err(dev, "CPU states:\n");
6522 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6523 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6524 reg, bnx2_reg_rd_ind(bp, reg),
6525 bnx2_reg_rd_ind(bp, reg + 4),
6526 bnx2_reg_rd_ind(bp, reg + 8),
6527 bnx2_reg_rd_ind(bp, reg + 0x1c),
6528 bnx2_reg_rd_ind(bp, reg + 0x1c),
6529 bnx2_reg_rd_ind(bp, reg + 0x20));
6531 netdev_err(dev, "<--- end FTQ dump --->\n");
6532 netdev_err(dev, "<--- start TBDC dump --->\n");
6533 netdev_err(dev, "TBDC free cnt: %ld\n",
6534 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6535 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6536 for (i = 0; i < 0x20; i++) {
6539 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6540 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6541 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6542 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6543 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6544 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6547 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6548 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6549 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6550 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6551 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6552 bdidx >> 24, (valid >> 8) & 0x0ff);
6554 netdev_err(dev, "<--- end TBDC dump --->\n");
6558 bnx2_dump_state(struct bnx2 *bp)
6560 struct net_device *dev = bp->dev;
6563 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6564 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6565 atomic_read(&bp->intr_sem), val1);
6566 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6567 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6568 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6569 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6570 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6571 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6572 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6573 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6574 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6575 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6576 if (bp->flags & BNX2_FLAG_USING_MSIX)
6577 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6578 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6582 bnx2_tx_timeout(struct net_device *dev)
6584 struct bnx2 *bp = netdev_priv(dev);
6587 bnx2_dump_state(bp);
6588 bnx2_dump_mcp_state(bp);
6590 /* This allows the netif to be shutdown gracefully before resetting */
6591 schedule_work(&bp->reset_task);
6594 /* Called with netif_tx_lock.
6595 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6596 * netif_wake_queue().
6599 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6601 struct bnx2 *bp = netdev_priv(dev);
6603 struct bnx2_tx_bd *txbd;
6604 struct bnx2_sw_tx_bd *tx_buf;
6605 u32 len, vlan_tag_flags, last_frag, mss;
6606 u16 prod, ring_prod;
6608 struct bnx2_napi *bnapi;
6609 struct bnx2_tx_ring_info *txr;
6610 struct netdev_queue *txq;
6612 /* Determine which tx ring we will be placed on */
6613 i = skb_get_queue_mapping(skb);
6614 bnapi = &bp->bnx2_napi[i];
6615 txr = &bnapi->tx_ring;
6616 txq = netdev_get_tx_queue(dev, i);
6618 if (unlikely(bnx2_tx_avail(bp, txr) <
6619 (skb_shinfo(skb)->nr_frags + 1))) {
6620 netif_tx_stop_queue(txq);
6621 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6623 return NETDEV_TX_BUSY;
6625 len = skb_headlen(skb);
6626 prod = txr->tx_prod;
6627 ring_prod = BNX2_TX_RING_IDX(prod);
6630 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6631 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6634 if (skb_vlan_tag_present(skb)) {
6636 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6639 if ((mss = skb_shinfo(skb)->gso_size)) {
6643 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6645 tcp_opt_len = tcp_optlen(skb);
6647 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6648 u32 tcp_off = skb_transport_offset(skb) -
6649 sizeof(struct ipv6hdr) - ETH_HLEN;
6651 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6652 TX_BD_FLAGS_SW_FLAGS;
6653 if (likely(tcp_off == 0))
6654 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6657 vlan_tag_flags |= ((tcp_off & 0x3) <<
6658 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6659 ((tcp_off & 0x10) <<
6660 TX_BD_FLAGS_TCP6_OFF4_SHL);
6661 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6665 if (tcp_opt_len || (iph->ihl > 5)) {
6666 vlan_tag_flags |= ((iph->ihl - 5) +
6667 (tcp_opt_len >> 2)) << 8;
6673 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6674 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6675 dev_kfree_skb_any(skb);
6676 return NETDEV_TX_OK;
6679 tx_buf = &txr->tx_buf_ring[ring_prod];
6681 dma_unmap_addr_set(tx_buf, mapping, mapping);
6683 txbd = &txr->tx_desc_ring[ring_prod];
6685 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6686 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6687 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6688 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6690 last_frag = skb_shinfo(skb)->nr_frags;
6691 tx_buf->nr_frags = last_frag;
6692 tx_buf->is_gso = skb_is_gso(skb);
6694 for (i = 0; i < last_frag; i++) {
6695 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6697 prod = BNX2_NEXT_TX_BD(prod);
6698 ring_prod = BNX2_TX_RING_IDX(prod);
6699 txbd = &txr->tx_desc_ring[ring_prod];
6701 len = skb_frag_size(frag);
6702 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6704 if (dma_mapping_error(&bp->pdev->dev, mapping))
6706 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6709 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6710 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6711 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6712 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6715 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6717 /* Sync BD data before updating TX mailbox */
6720 netdev_tx_sent_queue(txq, skb->len);
6722 prod = BNX2_NEXT_TX_BD(prod);
6723 txr->tx_prod_bseq += skb->len;
6725 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6726 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6730 txr->tx_prod = prod;
6732 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6733 netif_tx_stop_queue(txq);
6735 /* netif_tx_stop_queue() must be done before checking
6736 * tx index in bnx2_tx_avail() below, because in
6737 * bnx2_tx_int(), we update tx index before checking for
6738 * netif_tx_queue_stopped().
6741 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6742 netif_tx_wake_queue(txq);
6745 return NETDEV_TX_OK;
6747 /* save value of frag that failed */
6750 /* start back at beginning and unmap skb */
6751 prod = txr->tx_prod;
6752 ring_prod = BNX2_TX_RING_IDX(prod);
6753 tx_buf = &txr->tx_buf_ring[ring_prod];
6755 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6756 skb_headlen(skb), PCI_DMA_TODEVICE);
6758 /* unmap remaining mapped pages */
6759 for (i = 0; i < last_frag; i++) {
6760 prod = BNX2_NEXT_TX_BD(prod);
6761 ring_prod = BNX2_TX_RING_IDX(prod);
6762 tx_buf = &txr->tx_buf_ring[ring_prod];
6763 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6764 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6768 dev_kfree_skb_any(skb);
6769 return NETDEV_TX_OK;
6772 /* Called with rtnl_lock */
6774 bnx2_close(struct net_device *dev)
6776 struct bnx2 *bp = netdev_priv(dev);
6778 bnx2_disable_int_sync(bp);
6779 bnx2_napi_disable(bp);
6780 netif_tx_disable(dev);
6781 del_timer_sync(&bp->timer);
6782 bnx2_shutdown_chip(bp);
6788 netif_carrier_off(bp->dev);
6793 bnx2_save_stats(struct bnx2 *bp)
6795 u32 *hw_stats = (u32 *) bp->stats_blk;
6796 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6799 /* The 1st 10 counters are 64-bit counters */
6800 for (i = 0; i < 20; i += 2) {
6804 hi = temp_stats[i] + hw_stats[i];
6805 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6806 if (lo > 0xffffffff)
6809 temp_stats[i + 1] = lo & 0xffffffff;
6812 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6813 temp_stats[i] += hw_stats[i];
6816 #define GET_64BIT_NET_STATS64(ctr) \
6817 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6819 #define GET_64BIT_NET_STATS(ctr) \
6820 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6821 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6823 #define GET_32BIT_NET_STATS(ctr) \
6824 (unsigned long) (bp->stats_blk->ctr + \
6825 bp->temp_stats_blk->ctr)
6827 static struct rtnl_link_stats64 *
6828 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6830 struct bnx2 *bp = netdev_priv(dev);
6832 if (bp->stats_blk == NULL)
6835 net_stats->rx_packets =
6836 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6837 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6838 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6840 net_stats->tx_packets =
6841 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6842 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6843 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6845 net_stats->rx_bytes =
6846 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6848 net_stats->tx_bytes =
6849 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6851 net_stats->multicast =
6852 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6854 net_stats->collisions =
6855 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6857 net_stats->rx_length_errors =
6858 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6859 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6861 net_stats->rx_over_errors =
6862 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6863 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6865 net_stats->rx_frame_errors =
6866 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6868 net_stats->rx_crc_errors =
6869 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6871 net_stats->rx_errors = net_stats->rx_length_errors +
6872 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6873 net_stats->rx_crc_errors;
6875 net_stats->tx_aborted_errors =
6876 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6877 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6879 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6880 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6881 net_stats->tx_carrier_errors = 0;
6883 net_stats->tx_carrier_errors =
6884 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6887 net_stats->tx_errors =
6888 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6889 net_stats->tx_aborted_errors +
6890 net_stats->tx_carrier_errors;
6892 net_stats->rx_missed_errors =
6893 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6894 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6895 GET_32BIT_NET_STATS(stat_FwRxDrop);
6900 /* All ethtool functions called with rtnl_lock */
6903 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6905 struct bnx2 *bp = netdev_priv(dev);
6906 int support_serdes = 0, support_copper = 0;
6908 cmd->supported = SUPPORTED_Autoneg;
6909 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6912 } else if (bp->phy_port == PORT_FIBRE)
6917 if (support_serdes) {
6918 cmd->supported |= SUPPORTED_1000baseT_Full |
6920 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6921 cmd->supported |= SUPPORTED_2500baseX_Full;
6924 if (support_copper) {
6925 cmd->supported |= SUPPORTED_10baseT_Half |
6926 SUPPORTED_10baseT_Full |
6927 SUPPORTED_100baseT_Half |
6928 SUPPORTED_100baseT_Full |
6929 SUPPORTED_1000baseT_Full |
6934 spin_lock_bh(&bp->phy_lock);
6935 cmd->port = bp->phy_port;
6936 cmd->advertising = bp->advertising;
6938 if (bp->autoneg & AUTONEG_SPEED) {
6939 cmd->autoneg = AUTONEG_ENABLE;
6941 cmd->autoneg = AUTONEG_DISABLE;
6944 if (netif_carrier_ok(dev)) {
6945 ethtool_cmd_speed_set(cmd, bp->line_speed);
6946 cmd->duplex = bp->duplex;
6947 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6948 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6949 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6951 cmd->eth_tp_mdix = ETH_TP_MDI;
6955 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6956 cmd->duplex = DUPLEX_UNKNOWN;
6958 spin_unlock_bh(&bp->phy_lock);
6960 cmd->transceiver = XCVR_INTERNAL;
6961 cmd->phy_address = bp->phy_addr;
6967 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6969 struct bnx2 *bp = netdev_priv(dev);
6970 u8 autoneg = bp->autoneg;
6971 u8 req_duplex = bp->req_duplex;
6972 u16 req_line_speed = bp->req_line_speed;
6973 u32 advertising = bp->advertising;
6976 spin_lock_bh(&bp->phy_lock);
6978 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6979 goto err_out_unlock;
6981 if (cmd->port != bp->phy_port &&
6982 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6983 goto err_out_unlock;
6985 /* If device is down, we can store the settings only if the user
6986 * is setting the currently active port.
6988 if (!netif_running(dev) && cmd->port != bp->phy_port)
6989 goto err_out_unlock;
6991 if (cmd->autoneg == AUTONEG_ENABLE) {
6992 autoneg |= AUTONEG_SPEED;
6994 advertising = cmd->advertising;
6995 if (cmd->port == PORT_TP) {
6996 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6998 advertising = ETHTOOL_ALL_COPPER_SPEED;
7000 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7002 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7004 advertising |= ADVERTISED_Autoneg;
7007 u32 speed = ethtool_cmd_speed(cmd);
7008 if (cmd->port == PORT_FIBRE) {
7009 if ((speed != SPEED_1000 &&
7010 speed != SPEED_2500) ||
7011 (cmd->duplex != DUPLEX_FULL))
7012 goto err_out_unlock;
7014 if (speed == SPEED_2500 &&
7015 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7016 goto err_out_unlock;
7017 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7018 goto err_out_unlock;
7020 autoneg &= ~AUTONEG_SPEED;
7021 req_line_speed = speed;
7022 req_duplex = cmd->duplex;
7026 bp->autoneg = autoneg;
7027 bp->advertising = advertising;
7028 bp->req_line_speed = req_line_speed;
7029 bp->req_duplex = req_duplex;
7032 /* If device is down, the new settings will be picked up when it is
7035 if (netif_running(dev))
7036 err = bnx2_setup_phy(bp, cmd->port);
7039 spin_unlock_bh(&bp->phy_lock);
7045 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7047 struct bnx2 *bp = netdev_priv(dev);
7049 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7050 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7051 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7052 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7055 #define BNX2_REGDUMP_LEN (32 * 1024)
7058 bnx2_get_regs_len(struct net_device *dev)
7060 return BNX2_REGDUMP_LEN;
7064 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7066 u32 *p = _p, i, offset;
7068 struct bnx2 *bp = netdev_priv(dev);
7069 static const u32 reg_boundaries[] = {
7070 0x0000, 0x0098, 0x0400, 0x045c,
7071 0x0800, 0x0880, 0x0c00, 0x0c10,
7072 0x0c30, 0x0d08, 0x1000, 0x101c,
7073 0x1040, 0x1048, 0x1080, 0x10a4,
7074 0x1400, 0x1490, 0x1498, 0x14f0,
7075 0x1500, 0x155c, 0x1580, 0x15dc,
7076 0x1600, 0x1658, 0x1680, 0x16d8,
7077 0x1800, 0x1820, 0x1840, 0x1854,
7078 0x1880, 0x1894, 0x1900, 0x1984,
7079 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7080 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7081 0x2000, 0x2030, 0x23c0, 0x2400,
7082 0x2800, 0x2820, 0x2830, 0x2850,
7083 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7084 0x3c00, 0x3c94, 0x4000, 0x4010,
7085 0x4080, 0x4090, 0x43c0, 0x4458,
7086 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7087 0x4fc0, 0x5010, 0x53c0, 0x5444,
7088 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7089 0x5fc0, 0x6000, 0x6400, 0x6428,
7090 0x6800, 0x6848, 0x684c, 0x6860,
7091 0x6888, 0x6910, 0x8000
7096 memset(p, 0, BNX2_REGDUMP_LEN);
7098 if (!netif_running(bp->dev))
7102 offset = reg_boundaries[0];
7104 while (offset < BNX2_REGDUMP_LEN) {
7105 *p++ = BNX2_RD(bp, offset);
7107 if (offset == reg_boundaries[i + 1]) {
7108 offset = reg_boundaries[i + 2];
7109 p = (u32 *) (orig_p + offset);
7116 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7118 struct bnx2 *bp = netdev_priv(dev);
7120 if (bp->flags & BNX2_FLAG_NO_WOL) {
7125 wol->supported = WAKE_MAGIC;
7127 wol->wolopts = WAKE_MAGIC;
7131 memset(&wol->sopass, 0, sizeof(wol->sopass));
7135 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7137 struct bnx2 *bp = netdev_priv(dev);
7139 if (wol->wolopts & ~WAKE_MAGIC)
7142 if (wol->wolopts & WAKE_MAGIC) {
7143 if (bp->flags & BNX2_FLAG_NO_WOL)
7152 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7158 bnx2_nway_reset(struct net_device *dev)
7160 struct bnx2 *bp = netdev_priv(dev);
7163 if (!netif_running(dev))
7166 if (!(bp->autoneg & AUTONEG_SPEED)) {
7170 spin_lock_bh(&bp->phy_lock);
7172 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7175 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7176 spin_unlock_bh(&bp->phy_lock);
7180 /* Force a link down visible on the other side */
7181 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7183 spin_unlock_bh(&bp->phy_lock);
7187 spin_lock_bh(&bp->phy_lock);
7189 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7190 bp->serdes_an_pending = 1;
7191 mod_timer(&bp->timer, jiffies + bp->current_interval);
7194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7195 bmcr &= ~BMCR_LOOPBACK;
7196 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7198 spin_unlock_bh(&bp->phy_lock);
7204 bnx2_get_link(struct net_device *dev)
7206 struct bnx2 *bp = netdev_priv(dev);
7212 bnx2_get_eeprom_len(struct net_device *dev)
7214 struct bnx2 *bp = netdev_priv(dev);
7216 if (bp->flash_info == NULL)
7219 return (int) bp->flash_size;
7223 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7226 struct bnx2 *bp = netdev_priv(dev);
7229 /* parameters already validated in ethtool_get_eeprom */
7231 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7237 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7240 struct bnx2 *bp = netdev_priv(dev);
7243 /* parameters already validated in ethtool_set_eeprom */
7245 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7251 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7253 struct bnx2 *bp = netdev_priv(dev);
7255 memset(coal, 0, sizeof(struct ethtool_coalesce));
7257 coal->rx_coalesce_usecs = bp->rx_ticks;
7258 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7259 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7260 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7262 coal->tx_coalesce_usecs = bp->tx_ticks;
7263 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7264 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7265 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7267 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7273 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7275 struct bnx2 *bp = netdev_priv(dev);
7277 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7278 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7280 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7281 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7283 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7284 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7286 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7287 if (bp->rx_quick_cons_trip_int > 0xff)
7288 bp->rx_quick_cons_trip_int = 0xff;
7290 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7291 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7293 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7294 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7296 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7297 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7299 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7300 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7303 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7304 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7305 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7306 bp->stats_ticks = USEC_PER_SEC;
7308 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7309 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7312 if (netif_running(bp->dev)) {
7313 bnx2_netif_stop(bp, true);
7314 bnx2_init_nic(bp, 0);
7315 bnx2_netif_start(bp, true);
7322 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7324 struct bnx2 *bp = netdev_priv(dev);
7326 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7327 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7329 ering->rx_pending = bp->rx_ring_size;
7330 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7332 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7333 ering->tx_pending = bp->tx_ring_size;
7337 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7339 if (netif_running(bp->dev)) {
7340 /* Reset will erase chipset stats; save them */
7341 bnx2_save_stats(bp);
7343 bnx2_netif_stop(bp, true);
7344 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7349 __bnx2_free_irq(bp);
7355 bnx2_set_rx_ring_size(bp, rx);
7356 bp->tx_ring_size = tx;
7358 if (netif_running(bp->dev)) {
7362 rc = bnx2_setup_int_mode(bp, disable_msi);
7367 rc = bnx2_alloc_mem(bp);
7370 rc = bnx2_request_irq(bp);
7373 rc = bnx2_init_nic(bp, 0);
7376 bnx2_napi_enable(bp);
7381 mutex_lock(&bp->cnic_lock);
7382 /* Let cnic know about the new status block. */
7383 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7384 bnx2_setup_cnic_irq_info(bp);
7385 mutex_unlock(&bp->cnic_lock);
7387 bnx2_netif_start(bp, true);
7393 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7395 struct bnx2 *bp = netdev_priv(dev);
7398 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7399 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7400 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7404 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7410 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7412 struct bnx2 *bp = netdev_priv(dev);
7414 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7415 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7416 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7420 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7422 struct bnx2 *bp = netdev_priv(dev);
7424 bp->req_flow_ctrl = 0;
7425 if (epause->rx_pause)
7426 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7427 if (epause->tx_pause)
7428 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7430 if (epause->autoneg) {
7431 bp->autoneg |= AUTONEG_FLOW_CTRL;
7434 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7437 if (netif_running(dev)) {
7438 spin_lock_bh(&bp->phy_lock);
7439 bnx2_setup_phy(bp, bp->phy_port);
7440 spin_unlock_bh(&bp->phy_lock);
7447 char string[ETH_GSTRING_LEN];
7448 } bnx2_stats_str_arr[] = {
7450 { "rx_error_bytes" },
7452 { "tx_error_bytes" },
7453 { "rx_ucast_packets" },
7454 { "rx_mcast_packets" },
7455 { "rx_bcast_packets" },
7456 { "tx_ucast_packets" },
7457 { "tx_mcast_packets" },
7458 { "tx_bcast_packets" },
7459 { "tx_mac_errors" },
7460 { "tx_carrier_errors" },
7461 { "rx_crc_errors" },
7462 { "rx_align_errors" },
7463 { "tx_single_collisions" },
7464 { "tx_multi_collisions" },
7466 { "tx_excess_collisions" },
7467 { "tx_late_collisions" },
7468 { "tx_total_collisions" },
7471 { "rx_undersize_packets" },
7472 { "rx_oversize_packets" },
7473 { "rx_64_byte_packets" },
7474 { "rx_65_to_127_byte_packets" },
7475 { "rx_128_to_255_byte_packets" },
7476 { "rx_256_to_511_byte_packets" },
7477 { "rx_512_to_1023_byte_packets" },
7478 { "rx_1024_to_1522_byte_packets" },
7479 { "rx_1523_to_9022_byte_packets" },
7480 { "tx_64_byte_packets" },
7481 { "tx_65_to_127_byte_packets" },
7482 { "tx_128_to_255_byte_packets" },
7483 { "tx_256_to_511_byte_packets" },
7484 { "tx_512_to_1023_byte_packets" },
7485 { "tx_1024_to_1522_byte_packets" },
7486 { "tx_1523_to_9022_byte_packets" },
7487 { "rx_xon_frames" },
7488 { "rx_xoff_frames" },
7489 { "tx_xon_frames" },
7490 { "tx_xoff_frames" },
7491 { "rx_mac_ctrl_frames" },
7492 { "rx_filtered_packets" },
7493 { "rx_ftq_discards" },
7495 { "rx_fw_discards" },
7498 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7500 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7502 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7503 STATS_OFFSET32(stat_IfHCInOctets_hi),
7504 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7505 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7506 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7507 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7508 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7509 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7510 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7511 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7512 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7513 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7514 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7515 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7516 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7517 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7518 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7519 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7520 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7521 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7522 STATS_OFFSET32(stat_EtherStatsCollisions),
7523 STATS_OFFSET32(stat_EtherStatsFragments),
7524 STATS_OFFSET32(stat_EtherStatsJabbers),
7525 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7526 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7527 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7528 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7529 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7530 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7531 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7532 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7533 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7534 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7535 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7536 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7537 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7538 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7539 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7540 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7541 STATS_OFFSET32(stat_XonPauseFramesReceived),
7542 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7543 STATS_OFFSET32(stat_OutXonSent),
7544 STATS_OFFSET32(stat_OutXoffSent),
7545 STATS_OFFSET32(stat_MacControlFramesReceived),
7546 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7547 STATS_OFFSET32(stat_IfInFTQDiscards),
7548 STATS_OFFSET32(stat_IfInMBUFDiscards),
7549 STATS_OFFSET32(stat_FwRxDrop),
7552 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7553 * skipped because of errata.
7555 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7556 8,0,8,8,8,8,8,8,8,8,
7557 4,0,4,4,4,4,4,4,4,4,
7558 4,4,4,4,4,4,4,4,4,4,
7559 4,4,4,4,4,4,4,4,4,4,
7563 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7564 8,0,8,8,8,8,8,8,8,8,
7565 4,4,4,4,4,4,4,4,4,4,
7566 4,4,4,4,4,4,4,4,4,4,
7567 4,4,4,4,4,4,4,4,4,4,
7571 #define BNX2_NUM_TESTS 6
7574 char string[ETH_GSTRING_LEN];
7575 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7576 { "register_test (offline)" },
7577 { "memory_test (offline)" },
7578 { "loopback_test (offline)" },
7579 { "nvram_test (online)" },
7580 { "interrupt_test (online)" },
7581 { "link_test (online)" },
7585 bnx2_get_sset_count(struct net_device *dev, int sset)
7589 return BNX2_NUM_TESTS;
7591 return BNX2_NUM_STATS;
7598 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7600 struct bnx2 *bp = netdev_priv(dev);
7602 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7603 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7606 bnx2_netif_stop(bp, true);
7607 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7610 if (bnx2_test_registers(bp) != 0) {
7612 etest->flags |= ETH_TEST_FL_FAILED;
7614 if (bnx2_test_memory(bp) != 0) {
7616 etest->flags |= ETH_TEST_FL_FAILED;
7618 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7619 etest->flags |= ETH_TEST_FL_FAILED;
7621 if (!netif_running(bp->dev))
7622 bnx2_shutdown_chip(bp);
7624 bnx2_init_nic(bp, 1);
7625 bnx2_netif_start(bp, true);
7628 /* wait for link up */
7629 for (i = 0; i < 7; i++) {
7632 msleep_interruptible(1000);
7636 if (bnx2_test_nvram(bp) != 0) {
7638 etest->flags |= ETH_TEST_FL_FAILED;
7640 if (bnx2_test_intr(bp) != 0) {
7642 etest->flags |= ETH_TEST_FL_FAILED;
7645 if (bnx2_test_link(bp) != 0) {
7647 etest->flags |= ETH_TEST_FL_FAILED;
7653 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7655 switch (stringset) {
7657 memcpy(buf, bnx2_stats_str_arr,
7658 sizeof(bnx2_stats_str_arr));
7661 memcpy(buf, bnx2_tests_str_arr,
7662 sizeof(bnx2_tests_str_arr));
7668 bnx2_get_ethtool_stats(struct net_device *dev,
7669 struct ethtool_stats *stats, u64 *buf)
7671 struct bnx2 *bp = netdev_priv(dev);
7673 u32 *hw_stats = (u32 *) bp->stats_blk;
7674 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7675 u8 *stats_len_arr = NULL;
7677 if (hw_stats == NULL) {
7678 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7682 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7683 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7684 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7685 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7686 stats_len_arr = bnx2_5706_stats_len_arr;
7688 stats_len_arr = bnx2_5708_stats_len_arr;
7690 for (i = 0; i < BNX2_NUM_STATS; i++) {
7691 unsigned long offset;
7693 if (stats_len_arr[i] == 0) {
7694 /* skip this counter */
7699 offset = bnx2_stats_offset_arr[i];
7700 if (stats_len_arr[i] == 4) {
7701 /* 4-byte counter */
7702 buf[i] = (u64) *(hw_stats + offset) +
7703 *(temp_stats + offset);
7706 /* 8-byte counter */
7707 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7708 *(hw_stats + offset + 1) +
7709 (((u64) *(temp_stats + offset)) << 32) +
7710 *(temp_stats + offset + 1);
7715 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7717 struct bnx2 *bp = netdev_priv(dev);
7720 case ETHTOOL_ID_ACTIVE:
7721 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7722 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7723 return 1; /* cycle on/off once per second */
7726 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7727 BNX2_EMAC_LED_1000MB_OVERRIDE |
7728 BNX2_EMAC_LED_100MB_OVERRIDE |
7729 BNX2_EMAC_LED_10MB_OVERRIDE |
7730 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7731 BNX2_EMAC_LED_TRAFFIC);
7734 case ETHTOOL_ID_OFF:
7735 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7738 case ETHTOOL_ID_INACTIVE:
7739 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7740 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7748 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7750 struct bnx2 *bp = netdev_priv(dev);
7752 /* TSO with VLAN tag won't work with current firmware */
7753 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7754 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7756 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7758 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7759 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7760 netif_running(dev)) {
7761 bnx2_netif_stop(bp, false);
7762 dev->features = features;
7763 bnx2_set_rx_mode(dev);
7764 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7765 bnx2_netif_start(bp, false);
7772 static void bnx2_get_channels(struct net_device *dev,
7773 struct ethtool_channels *channels)
7775 struct bnx2 *bp = netdev_priv(dev);
7776 u32 max_rx_rings = 1;
7777 u32 max_tx_rings = 1;
7779 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7780 max_rx_rings = RX_MAX_RINGS;
7781 max_tx_rings = TX_MAX_RINGS;
7784 channels->max_rx = max_rx_rings;
7785 channels->max_tx = max_tx_rings;
7786 channels->max_other = 0;
7787 channels->max_combined = 0;
7788 channels->rx_count = bp->num_rx_rings;
7789 channels->tx_count = bp->num_tx_rings;
7790 channels->other_count = 0;
7791 channels->combined_count = 0;
7794 static int bnx2_set_channels(struct net_device *dev,
7795 struct ethtool_channels *channels)
7797 struct bnx2 *bp = netdev_priv(dev);
7798 u32 max_rx_rings = 1;
7799 u32 max_tx_rings = 1;
7802 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7803 max_rx_rings = RX_MAX_RINGS;
7804 max_tx_rings = TX_MAX_RINGS;
7806 if (channels->rx_count > max_rx_rings ||
7807 channels->tx_count > max_tx_rings)
7810 bp->num_req_rx_rings = channels->rx_count;
7811 bp->num_req_tx_rings = channels->tx_count;
7813 if (netif_running(dev))
7814 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7815 bp->tx_ring_size, true);
7820 static const struct ethtool_ops bnx2_ethtool_ops = {
7821 .get_settings = bnx2_get_settings,
7822 .set_settings = bnx2_set_settings,
7823 .get_drvinfo = bnx2_get_drvinfo,
7824 .get_regs_len = bnx2_get_regs_len,
7825 .get_regs = bnx2_get_regs,
7826 .get_wol = bnx2_get_wol,
7827 .set_wol = bnx2_set_wol,
7828 .nway_reset = bnx2_nway_reset,
7829 .get_link = bnx2_get_link,
7830 .get_eeprom_len = bnx2_get_eeprom_len,
7831 .get_eeprom = bnx2_get_eeprom,
7832 .set_eeprom = bnx2_set_eeprom,
7833 .get_coalesce = bnx2_get_coalesce,
7834 .set_coalesce = bnx2_set_coalesce,
7835 .get_ringparam = bnx2_get_ringparam,
7836 .set_ringparam = bnx2_set_ringparam,
7837 .get_pauseparam = bnx2_get_pauseparam,
7838 .set_pauseparam = bnx2_set_pauseparam,
7839 .self_test = bnx2_self_test,
7840 .get_strings = bnx2_get_strings,
7841 .set_phys_id = bnx2_set_phys_id,
7842 .get_ethtool_stats = bnx2_get_ethtool_stats,
7843 .get_sset_count = bnx2_get_sset_count,
7844 .get_channels = bnx2_get_channels,
7845 .set_channels = bnx2_set_channels,
7848 /* Called with rtnl_lock */
7850 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7852 struct mii_ioctl_data *data = if_mii(ifr);
7853 struct bnx2 *bp = netdev_priv(dev);
7858 data->phy_id = bp->phy_addr;
7864 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7867 if (!netif_running(dev))
7870 spin_lock_bh(&bp->phy_lock);
7871 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7872 spin_unlock_bh(&bp->phy_lock);
7874 data->val_out = mii_regval;
7880 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7883 if (!netif_running(dev))
7886 spin_lock_bh(&bp->phy_lock);
7887 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7888 spin_unlock_bh(&bp->phy_lock);
7899 /* Called with rtnl_lock */
7901 bnx2_change_mac_addr(struct net_device *dev, void *p)
7903 struct sockaddr *addr = p;
7904 struct bnx2 *bp = netdev_priv(dev);
7906 if (!is_valid_ether_addr(addr->sa_data))
7907 return -EADDRNOTAVAIL;
7909 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7910 if (netif_running(dev))
7911 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7916 /* Called with rtnl_lock */
7918 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7920 struct bnx2 *bp = netdev_priv(dev);
7922 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7923 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7927 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7931 #ifdef CONFIG_NET_POLL_CONTROLLER
7933 poll_bnx2(struct net_device *dev)
7935 struct bnx2 *bp = netdev_priv(dev);
7938 for (i = 0; i < bp->irq_nvecs; i++) {
7939 struct bnx2_irq *irq = &bp->irq_tbl[i];
7941 disable_irq(irq->vector);
7942 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7943 enable_irq(irq->vector);
7949 bnx2_get_5709_media(struct bnx2 *bp)
7951 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7952 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7955 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7957 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7958 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7962 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7963 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7965 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7967 if (bp->func == 0) {
7972 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7980 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7987 bnx2_get_pci_speed(struct bnx2 *bp)
7991 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7992 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7995 bp->flags |= BNX2_FLAG_PCIX;
7997 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7999 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8001 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8002 bp->bus_speed_mhz = 133;
8005 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8006 bp->bus_speed_mhz = 100;
8009 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8010 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8011 bp->bus_speed_mhz = 66;
8014 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8015 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8016 bp->bus_speed_mhz = 50;
8019 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8020 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8021 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8022 bp->bus_speed_mhz = 33;
8027 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8028 bp->bus_speed_mhz = 66;
8030 bp->bus_speed_mhz = 33;
8033 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8034 bp->flags |= BNX2_FLAG_PCI_32BIT;
8039 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8043 unsigned int block_end, rosize, len;
8045 #define BNX2_VPD_NVRAM_OFFSET 0x300
8046 #define BNX2_VPD_LEN 128
8047 #define BNX2_MAX_VER_SLEN 30
8049 data = kmalloc(256, GFP_KERNEL);
8053 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8058 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8059 data[i] = data[i + BNX2_VPD_LEN + 3];
8060 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8061 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8062 data[i + 3] = data[i + BNX2_VPD_LEN];
8065 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8069 rosize = pci_vpd_lrdt_size(&data[i]);
8070 i += PCI_VPD_LRDT_TAG_SIZE;
8071 block_end = i + rosize;
8073 if (block_end > BNX2_VPD_LEN)
8076 j = pci_vpd_find_info_keyword(data, i, rosize,
8077 PCI_VPD_RO_KEYWORD_MFR_ID);
8081 len = pci_vpd_info_field_size(&data[j]);
8083 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8084 if (j + len > block_end || len != 4 ||
8085 memcmp(&data[j], "1028", 4))
8088 j = pci_vpd_find_info_keyword(data, i, rosize,
8089 PCI_VPD_RO_KEYWORD_VENDOR0);
8093 len = pci_vpd_info_field_size(&data[j]);
8095 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8096 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8099 memcpy(bp->fw_version, &data[j], len);
8100 bp->fw_version[len] = ' ';
8107 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8112 u64 dma_mask, persist_dma_mask;
8115 SET_NETDEV_DEV(dev, &pdev->dev);
8116 bp = netdev_priv(dev);
8121 bp->temp_stats_blk =
8122 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8124 if (bp->temp_stats_blk == NULL) {
8129 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8130 rc = pci_enable_device(pdev);
8132 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8136 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8138 "Cannot find PCI device base address, aborting\n");
8140 goto err_out_disable;
8143 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8145 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8146 goto err_out_disable;
8149 pci_set_master(pdev);
8151 bp->pm_cap = pdev->pm_cap;
8152 if (bp->pm_cap == 0) {
8154 "Cannot find power management capability, aborting\n");
8156 goto err_out_release;
8162 spin_lock_init(&bp->phy_lock);
8163 spin_lock_init(&bp->indirect_lock);
8165 mutex_init(&bp->cnic_lock);
8167 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8169 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8170 TX_MAX_TSS_RINGS + 1));
8172 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8174 goto err_out_release;
8177 /* Configure byte swap and enable write to the reg_window registers.
8178 * Rely on CPU to do target byte swapping on big endian systems
8179 * The chip's target access swapping will not swap all accesses
8181 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8182 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8183 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8185 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8187 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8188 if (!pci_is_pcie(pdev)) {
8189 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8193 bp->flags |= BNX2_FLAG_PCIE;
8194 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8195 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8197 /* AER (Advanced Error Reporting) hooks */
8198 err = pci_enable_pcie_error_reporting(pdev);
8200 bp->flags |= BNX2_FLAG_AER_ENABLED;
8203 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8204 if (bp->pcix_cap == 0) {
8206 "Cannot find PCIX capability, aborting\n");
8210 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8213 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8214 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8216 bp->flags |= BNX2_FLAG_MSIX_CAP;
8219 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8220 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8222 bp->flags |= BNX2_FLAG_MSI_CAP;
8225 /* 5708 cannot support DMA addresses > 40-bit. */
8226 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8227 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8229 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8231 /* Configure DMA attributes. */
8232 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8233 dev->features |= NETIF_F_HIGHDMA;
8234 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8237 "pci_set_consistent_dma_mask failed, aborting\n");
8240 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8241 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8245 if (!(bp->flags & BNX2_FLAG_PCIE))
8246 bnx2_get_pci_speed(bp);
8248 /* 5706A0 may falsely detect SERR and PERR. */
8249 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8250 reg = BNX2_RD(bp, PCI_COMMAND);
8251 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8252 BNX2_WR(bp, PCI_COMMAND, reg);
8253 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8254 !(bp->flags & BNX2_FLAG_PCIX)) {
8257 "5706 A1 can only be used in a PCIX bus, aborting\n");
8261 bnx2_init_nvram(bp);
8263 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8265 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8268 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8269 BNX2_SHM_HDR_SIGNATURE_SIG) {
8270 u32 off = bp->func << 2;
8272 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8274 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8276 /* Get the permanent MAC address. First we need to make sure the
8277 * firmware is actually running.
8279 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8281 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8282 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8283 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8288 bnx2_read_vpd_fw_ver(bp);
8290 j = strlen(bp->fw_version);
8291 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8292 for (i = 0; i < 3 && j < 24; i++) {
8296 bp->fw_version[j++] = 'b';
8297 bp->fw_version[j++] = 'c';
8298 bp->fw_version[j++] = ' ';
8300 num = (u8) (reg >> (24 - (i * 8)));
8301 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8302 if (num >= k || !skip0 || k == 1) {
8303 bp->fw_version[j++] = (num / k) + '0';
8308 bp->fw_version[j++] = '.';
8310 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8311 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8314 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8315 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8317 for (i = 0; i < 30; i++) {
8318 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8319 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8324 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8325 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8326 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8327 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8328 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8331 bp->fw_version[j++] = ' ';
8332 for (i = 0; i < 3 && j < 28; i++) {
8333 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8334 reg = be32_to_cpu(reg);
8335 memcpy(&bp->fw_version[j], ®, 4);
8340 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8341 bp->mac_addr[0] = (u8) (reg >> 8);
8342 bp->mac_addr[1] = (u8) reg;
8344 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8345 bp->mac_addr[2] = (u8) (reg >> 24);
8346 bp->mac_addr[3] = (u8) (reg >> 16);
8347 bp->mac_addr[4] = (u8) (reg >> 8);
8348 bp->mac_addr[5] = (u8) reg;
8350 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8351 bnx2_set_rx_ring_size(bp, 255);
8353 bp->tx_quick_cons_trip_int = 2;
8354 bp->tx_quick_cons_trip = 20;
8355 bp->tx_ticks_int = 18;
8358 bp->rx_quick_cons_trip_int = 2;
8359 bp->rx_quick_cons_trip = 12;
8360 bp->rx_ticks_int = 18;
8363 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8365 bp->current_interval = BNX2_TIMER_INTERVAL;
8369 /* allocate stats_blk */
8370 rc = bnx2_alloc_stats_blk(dev);
8374 /* Disable WOL support if we are running on a SERDES chip. */
8375 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8376 bnx2_get_5709_media(bp);
8377 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8378 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8380 bp->phy_port = PORT_TP;
8381 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8382 bp->phy_port = PORT_FIBRE;
8383 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8384 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8385 bp->flags |= BNX2_FLAG_NO_WOL;
8388 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8389 /* Don't do parallel detect on this board because of
8390 * some board problems. The link will not go down
8391 * if we do parallel detect.
8393 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8394 pdev->subsystem_device == 0x310c)
8395 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8398 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8399 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8401 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8402 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8403 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8404 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8405 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8406 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8407 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8409 bnx2_init_fw_cap(bp);
8411 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8412 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8413 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8414 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8415 bp->flags |= BNX2_FLAG_NO_WOL;
8419 if (bp->flags & BNX2_FLAG_NO_WOL)
8420 device_set_wakeup_capable(&bp->pdev->dev, false);
8422 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8424 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8425 bp->tx_quick_cons_trip_int =
8426 bp->tx_quick_cons_trip;
8427 bp->tx_ticks_int = bp->tx_ticks;
8428 bp->rx_quick_cons_trip_int =
8429 bp->rx_quick_cons_trip;
8430 bp->rx_ticks_int = bp->rx_ticks;
8431 bp->comp_prod_trip_int = bp->comp_prod_trip;
8432 bp->com_ticks_int = bp->com_ticks;
8433 bp->cmd_ticks_int = bp->cmd_ticks;
8436 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8438 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8439 * with byte enables disabled on the unused 32-bit word. This is legal
8440 * but causes problems on the AMD 8132 which will eventually stop
8441 * responding after a while.
8443 * AMD believes this incompatibility is unique to the 5706, and
8444 * prefers to locally disable MSI rather than globally disabling it.
8446 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8447 struct pci_dev *amd_8132 = NULL;
8449 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8450 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8453 if (amd_8132->revision >= 0x10 &&
8454 amd_8132->revision <= 0x13) {
8456 pci_dev_put(amd_8132);
8462 bnx2_set_default_link(bp);
8463 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8465 init_timer(&bp->timer);
8466 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8467 bp->timer.data = (unsigned long) bp;
8468 bp->timer.function = bnx2_timer;
8471 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8472 bp->cnic_eth_dev.max_iscsi_conn =
8473 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8474 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8475 bp->cnic_probe = bnx2_cnic_probe;
8477 pci_save_state(pdev);
8482 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8483 pci_disable_pcie_error_reporting(pdev);
8484 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8487 pci_iounmap(pdev, bp->regview);
8491 pci_release_regions(pdev);
8494 pci_disable_device(pdev);
8497 kfree(bp->temp_stats_blk);
8503 bnx2_bus_string(struct bnx2 *bp, char *str)
8507 if (bp->flags & BNX2_FLAG_PCIE) {
8508 s += sprintf(s, "PCI Express");
8510 s += sprintf(s, "PCI");
8511 if (bp->flags & BNX2_FLAG_PCIX)
8512 s += sprintf(s, "-X");
8513 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8514 s += sprintf(s, " 32-bit");
8516 s += sprintf(s, " 64-bit");
8517 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8523 bnx2_del_napi(struct bnx2 *bp)
8527 for (i = 0; i < bp->irq_nvecs; i++)
8528 netif_napi_del(&bp->bnx2_napi[i].napi);
8532 bnx2_init_napi(struct bnx2 *bp)
8536 for (i = 0; i < bp->irq_nvecs; i++) {
8537 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8538 int (*poll)(struct napi_struct *, int);
8543 poll = bnx2_poll_msix;
8545 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8550 static const struct net_device_ops bnx2_netdev_ops = {
8551 .ndo_open = bnx2_open,
8552 .ndo_start_xmit = bnx2_start_xmit,
8553 .ndo_stop = bnx2_close,
8554 .ndo_get_stats64 = bnx2_get_stats64,
8555 .ndo_set_rx_mode = bnx2_set_rx_mode,
8556 .ndo_do_ioctl = bnx2_ioctl,
8557 .ndo_validate_addr = eth_validate_addr,
8558 .ndo_set_mac_address = bnx2_change_mac_addr,
8559 .ndo_change_mtu = bnx2_change_mtu,
8560 .ndo_set_features = bnx2_set_features,
8561 .ndo_tx_timeout = bnx2_tx_timeout,
8562 #ifdef CONFIG_NET_POLL_CONTROLLER
8563 .ndo_poll_controller = poll_bnx2,
8568 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8570 static int version_printed = 0;
8571 struct net_device *dev;
8576 if (version_printed++ == 0)
8577 pr_info("%s", version);
8579 /* dev zeroed in init_etherdev */
8580 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8584 rc = bnx2_init_board(pdev, dev);
8588 dev->netdev_ops = &bnx2_netdev_ops;
8589 dev->watchdog_timeo = TX_TIMEOUT;
8590 dev->ethtool_ops = &bnx2_ethtool_ops;
8592 bp = netdev_priv(dev);
8594 pci_set_drvdata(pdev, dev);
8597 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8598 * New io-page table has been created before bnx2 does reset at open stage.
8599 * We have to wait for the in-flight DMA to complete to avoid it look up
8600 * into the newly created io-page table.
8602 if (is_kdump_kernel())
8603 bnx2_wait_dma_complete(bp);
8605 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8607 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8608 NETIF_F_TSO | NETIF_F_TSO_ECN |
8609 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8611 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8612 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8614 dev->vlan_features = dev->hw_features;
8615 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8616 dev->features |= dev->hw_features;
8617 dev->priv_flags |= IFF_UNICAST_FLT;
8619 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8620 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8622 if ((rc = register_netdev(dev))) {
8623 dev_err(&pdev->dev, "Cannot register net device\n");
8627 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8628 "node addr %pM\n", board_info[ent->driver_data].name,
8629 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8630 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8631 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8632 pdev->irq, dev->dev_addr);
8637 pci_iounmap(pdev, bp->regview);
8638 pci_release_regions(pdev);
8639 pci_disable_device(pdev);
8641 bnx2_free_stats_blk(dev);
8647 bnx2_remove_one(struct pci_dev *pdev)
8649 struct net_device *dev = pci_get_drvdata(pdev);
8650 struct bnx2 *bp = netdev_priv(dev);
8652 unregister_netdev(dev);
8654 del_timer_sync(&bp->timer);
8655 cancel_work_sync(&bp->reset_task);
8657 pci_iounmap(bp->pdev, bp->regview);
8659 bnx2_free_stats_blk(dev);
8660 kfree(bp->temp_stats_blk);
8662 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8663 pci_disable_pcie_error_reporting(pdev);
8664 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8667 bnx2_release_firmware(bp);
8671 pci_release_regions(pdev);
8672 pci_disable_device(pdev);
8675 #ifdef CONFIG_PM_SLEEP
8677 bnx2_suspend(struct device *device)
8679 struct pci_dev *pdev = to_pci_dev(device);
8680 struct net_device *dev = pci_get_drvdata(pdev);
8681 struct bnx2 *bp = netdev_priv(dev);
8683 if (netif_running(dev)) {
8684 cancel_work_sync(&bp->reset_task);
8685 bnx2_netif_stop(bp, true);
8686 netif_device_detach(dev);
8687 del_timer_sync(&bp->timer);
8688 bnx2_shutdown_chip(bp);
8689 __bnx2_free_irq(bp);
8697 bnx2_resume(struct device *device)
8699 struct pci_dev *pdev = to_pci_dev(device);
8700 struct net_device *dev = pci_get_drvdata(pdev);
8701 struct bnx2 *bp = netdev_priv(dev);
8703 if (!netif_running(dev))
8706 bnx2_set_power_state(bp, PCI_D0);
8707 netif_device_attach(dev);
8708 bnx2_request_irq(bp);
8709 bnx2_init_nic(bp, 1);
8710 bnx2_netif_start(bp, true);
8714 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8715 #define BNX2_PM_OPS (&bnx2_pm_ops)
8719 #define BNX2_PM_OPS NULL
8721 #endif /* CONFIG_PM_SLEEP */
8723 * bnx2_io_error_detected - called when PCI error is detected
8724 * @pdev: Pointer to PCI device
8725 * @state: The current pci connection state
8727 * This function is called after a PCI bus error affecting
8728 * this device has been detected.
8730 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8731 pci_channel_state_t state)
8733 struct net_device *dev = pci_get_drvdata(pdev);
8734 struct bnx2 *bp = netdev_priv(dev);
8737 netif_device_detach(dev);
8739 if (state == pci_channel_io_perm_failure) {
8741 return PCI_ERS_RESULT_DISCONNECT;
8744 if (netif_running(dev)) {
8745 bnx2_netif_stop(bp, true);
8746 del_timer_sync(&bp->timer);
8747 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8750 pci_disable_device(pdev);
8753 /* Request a slot slot reset. */
8754 return PCI_ERS_RESULT_NEED_RESET;
8758 * bnx2_io_slot_reset - called after the pci bus has been reset.
8759 * @pdev: Pointer to PCI device
8761 * Restart the card from scratch, as if from a cold-boot.
8763 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8765 struct net_device *dev = pci_get_drvdata(pdev);
8766 struct bnx2 *bp = netdev_priv(dev);
8767 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8771 if (pci_enable_device(pdev)) {
8773 "Cannot re-enable PCI device after reset\n");
8775 pci_set_master(pdev);
8776 pci_restore_state(pdev);
8777 pci_save_state(pdev);
8779 if (netif_running(dev))
8780 err = bnx2_init_nic(bp, 1);
8783 result = PCI_ERS_RESULT_RECOVERED;
8786 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8787 bnx2_napi_enable(bp);
8792 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8795 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8798 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8799 err); /* non-fatal, continue */
8806 * bnx2_io_resume - called when traffic can start flowing again.
8807 * @pdev: Pointer to PCI device
8809 * This callback is called when the error recovery driver tells us that
8810 * its OK to resume normal operation.
8812 static void bnx2_io_resume(struct pci_dev *pdev)
8814 struct net_device *dev = pci_get_drvdata(pdev);
8815 struct bnx2 *bp = netdev_priv(dev);
8818 if (netif_running(dev))
8819 bnx2_netif_start(bp, true);
8821 netif_device_attach(dev);
8825 static void bnx2_shutdown(struct pci_dev *pdev)
8827 struct net_device *dev = pci_get_drvdata(pdev);
8833 bp = netdev_priv(dev);
8838 if (netif_running(dev))
8841 if (system_state == SYSTEM_POWER_OFF)
8842 bnx2_set_power_state(bp, PCI_D3hot);
8847 static const struct pci_error_handlers bnx2_err_handler = {
8848 .error_detected = bnx2_io_error_detected,
8849 .slot_reset = bnx2_io_slot_reset,
8850 .resume = bnx2_io_resume,
8853 static struct pci_driver bnx2_pci_driver = {
8854 .name = DRV_MODULE_NAME,
8855 .id_table = bnx2_pci_tbl,
8856 .probe = bnx2_init_one,
8857 .remove = bnx2_remove_one,
8858 .driver.pm = BNX2_PM_OPS,
8859 .err_handler = &bnx2_err_handler,
8860 .shutdown = bnx2_shutdown,
8863 module_pci_driver(bnx2_pci_driver);