GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME         "bnx2"
62 #define DRV_MODULE_VERSION      "2.2.6"
63 #define DRV_MODULE_RELDATE      "January 29, 2014"
64 #define FW_MIPS_FILE_06         "/*(DEBLOBBED)*/"
65 #define FW_RV2P_FILE_06         "/*(DEBLOBBED)*/"
66 #define FW_MIPS_FILE_09         "/*(DEBLOBBED)*/"
67 #define FW_RV2P_FILE_09_Ax      "/*(DEBLOBBED)*/"
68 #define FW_RV2P_FILE_09         "/*(DEBLOBBED)*/"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] =
76         "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 /*(DEBLOBBED)*/
83
84 static int disable_msi = 0;
85
86 module_param(disable_msi, int, 0444);
87 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
88
89 typedef enum {
90         BCM5706 = 0,
91         NC370T,
92         NC370I,
93         BCM5706S,
94         NC370F,
95         BCM5708,
96         BCM5708S,
97         BCM5709,
98         BCM5709S,
99         BCM5716,
100         BCM5716S,
101 } board_t;
102
103 /* indexed by board_t, above */
104 static struct {
105         char *name;
106 } board_info[] = {
107         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
108         { "HP NC370T Multifunction Gigabit Server Adapter" },
109         { "HP NC370i Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
111         { "HP NC370F Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
118         };
119
120 static const struct pci_device_id bnx2_pci_tbl[] = {
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
139         { PCI_VENDOR_ID_BROADCOM, 0x163b,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
141         { PCI_VENDOR_ID_BROADCOM, 0x163c,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
143         { 0, }
144 };
145
146 static const struct flash_spec flash_table[] =
147 {
148 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
149 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
150         /* Slow EEPROM */
151         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
152          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
153          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
154          "EEPROM - slow"},
155         /* Expansion entry 0001 */
156         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0001"},
160         /* Saifun SA25F010 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
163          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
165          "Non-buffered flash (128kB)"},
166         /* Saifun SA25F020 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
169          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
171          "Non-buffered flash (256kB)"},
172         /* Expansion entry 0100 */
173         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
174          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176          "Entry 0100"},
177         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
178         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
179          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
181          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
182         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
183         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
184          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
185          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
186          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
187         /* Saifun SA25F005 (non-buffered flash) */
188         /* strap, cfg1, & write1 need updates */
189         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
190          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
192          "Non-buffered flash (64kB)"},
193         /* Fast EEPROM */
194         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
195          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
196          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
197          "EEPROM - fast"},
198         /* Expansion entry 1001 */
199         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
200          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1001"},
203         /* Expansion entry 1010 */
204         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
205          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
207          "Entry 1010"},
208         /* ATMEL AT45DB011B (buffered flash) */
209         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
210          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
212          "Buffered flash (128kB)"},
213         /* Expansion entry 1100 */
214         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
215          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
217          "Entry 1100"},
218         /* Expansion entry 1101 */
219         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
220          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
222          "Entry 1101"},
223         /* Ateml Expansion entry 1110 */
224         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
225          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
227          "Entry 1110 (Atmel)"},
228         /* ATMEL AT45DB021B (buffered flash) */
229         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
230          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
231          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
232          "Buffered flash (256kB)"},
233 };
234
235 static const struct flash_spec flash_5709 = {
236         .flags          = BNX2_NV_BUFFERED,
237         .page_bits      = BCM5709_FLASH_PAGE_BITS,
238         .page_size      = BCM5709_FLASH_PAGE_SIZE,
239         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
240         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
241         .name           = "5709 Buffered flash (256kB)",
242 };
243
244 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
245
246 static void bnx2_init_napi(struct bnx2 *bp);
247 static void bnx2_del_napi(struct bnx2 *bp);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251         u32 diff;
252
253         /* The ring uses 256 indices for 255 entries, one of them
254          * needs to be skipped.
255          */
256         diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
257         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
258                 diff &= 0xffff;
259                 if (diff == BNX2_TX_DESC_CNT)
260                         diff = BNX2_MAX_TX_DESC_CNT;
261         }
262         return bp->tx_ring_size - diff;
263 }
264
265 static u32
266 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
267 {
268         unsigned long flags;
269         u32 val;
270
271         spin_lock_irqsave(&bp->indirect_lock, flags);
272         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
274         spin_unlock_irqrestore(&bp->indirect_lock, flags);
275         return val;
276 }
277
278 static void
279 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280 {
281         unsigned long flags;
282
283         spin_lock_irqsave(&bp->indirect_lock, flags);
284         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_irqrestore(&bp->indirect_lock, flags);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         unsigned long flags;
305
306         offset += cid_addr;
307         spin_lock_irqsave(&bp->indirect_lock, flags);
308         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
309                 int i;
310
311                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
313                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 BNX2_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_irqrestore(&bp->indirect_lock, flags);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (!ops)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390                 return -ENODEV;
391
392         bp->cnic_data = data;
393         rcu_assign_pointer(bp->cnic_ops, ops);
394
395         cp->num_irq = 0;
396         cp->drv_state = CNIC_DRV_STATE_REGD;
397
398         bnx2_setup_cnic_irq_info(bp);
399
400         return 0;
401 }
402
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405         struct bnx2 *bp = netdev_priv(dev);
406         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
409         mutex_lock(&bp->cnic_lock);
410         cp->drv_state = 0;
411         bnapi->cnic_present = 0;
412         RCU_INIT_POINTER(bp->cnic_ops, NULL);
413         mutex_unlock(&bp->cnic_lock);
414         synchronize_rcu();
415         return 0;
416 }
417
418 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420         struct bnx2 *bp = netdev_priv(dev);
421         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
423         if (!cp->max_iscsi_conn)
424                 return NULL;
425
426         cp->drv_owner = THIS_MODULE;
427         cp->chip_id = bp->chip_id;
428         cp->pdev = bp->pdev;
429         cp->io_base = bp->regview;
430         cp->drv_ctl = bnx2_drv_ctl;
431         cp->drv_register_cnic = bnx2_register_cnic;
432         cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434         return cp;
435 }
436
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
439 {
440         struct cnic_ops *c_ops;
441         struct cnic_ctl_info info;
442
443         mutex_lock(&bp->cnic_lock);
444         c_ops = rcu_dereference_protected(bp->cnic_ops,
445                                           lockdep_is_held(&bp->cnic_lock));
446         if (c_ops) {
447                 info.cmd = CNIC_CTL_STOP_CMD;
448                 c_ops->cnic_ctl(bp->cnic_data, &info);
449         }
450         mutex_unlock(&bp->cnic_lock);
451 }
452
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
455 {
456         struct cnic_ops *c_ops;
457         struct cnic_ctl_info info;
458
459         mutex_lock(&bp->cnic_lock);
460         c_ops = rcu_dereference_protected(bp->cnic_ops,
461                                           lockdep_is_held(&bp->cnic_lock));
462         if (c_ops) {
463                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465
466                         bnapi->cnic_tag = bnapi->last_status_idx;
467                 }
468                 info.cmd = CNIC_CTL_START_CMD;
469                 c_ops->cnic_ctl(bp->cnic_data, &info);
470         }
471         mutex_unlock(&bp->cnic_lock);
472 }
473
474 #else
475
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
478 {
479 }
480
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
483 {
484 }
485
486 #endif
487
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490 {
491         u32 val1;
492         int i, ret;
493
494         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
496                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497
498                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500
501                 udelay(40);
502         }
503
504         val1 = (bp->phy_addr << 21) | (reg << 16) |
505                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506                 BNX2_EMAC_MDIO_COMM_START_BUSY;
507         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508
509         for (i = 0; i < 50; i++) {
510                 udelay(10);
511
512                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514                         udelay(5);
515
516                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518
519                         break;
520                 }
521         }
522
523         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524                 *val = 0x0;
525                 ret = -EBUSY;
526         }
527         else {
528                 *val = val1;
529                 ret = 0;
530         }
531
532         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
534                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535
536                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538
539                 udelay(40);
540         }
541
542         return ret;
543 }
544
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547 {
548         u32 val1;
549         int i, ret;
550
551         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
553                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554
555                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557
558                 udelay(40);
559         }
560
561         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
565
566         for (i = 0; i < 50; i++) {
567                 udelay(10);
568
569                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
570                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571                         udelay(5);
572                         break;
573                 }
574         }
575
576         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577                 ret = -EBUSY;
578         else
579                 ret = 0;
580
581         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
583                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584
585                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587
588                 udelay(40);
589         }
590
591         return ret;
592 }
593
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
596 {
597         int i;
598         struct bnx2_napi *bnapi;
599
600         for (i = 0; i < bp->irq_nvecs; i++) {
601                 bnapi = &bp->bnx2_napi[i];
602                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604         }
605         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 }
607
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
610 {
611         int i;
612         struct bnx2_napi *bnapi;
613
614         for (i = 0; i < bp->irq_nvecs; i++) {
615                 bnapi = &bp->bnx2_napi[i];
616
617                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620                         bnapi->last_status_idx);
621
622                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624                         bnapi->last_status_idx);
625         }
626         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 }
628
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
631 {
632         int i;
633
634         atomic_inc(&bp->intr_sem);
635         if (!netif_running(bp->dev))
636                 return;
637
638         bnx2_disable_int(bp);
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 synchronize_irq(bp->irq_tbl[i].vector);
641 }
642
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_disable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
654 {
655         int i;
656
657         for (i = 0; i < bp->irq_nvecs; i++)
658                 napi_enable(&bp->bnx2_napi[i].napi);
659 }
660
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
663 {
664         if (stop_cnic)
665                 bnx2_cnic_stop(bp);
666         if (netif_running(bp->dev)) {
667                 bnx2_napi_disable(bp);
668                 netif_tx_disable(bp->dev);
669         }
670         bnx2_disable_int_sync(bp);
671         netif_carrier_off(bp->dev);     /* prevent tx timeout */
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         spin_lock_bh(&bp->phy_lock);
681                         if (bp->link_up)
682                                 netif_carrier_on(bp->dev);
683                         spin_unlock_bh(&bp->phy_lock);
684                         bnx2_napi_enable(bp);
685                         bnx2_enable_int(bp);
686                         if (start_cnic)
687                                 bnx2_cnic_start(bp);
688                 }
689         }
690 }
691
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
694 {
695         int i;
696
697         for (i = 0; i < bp->num_tx_rings; i++) {
698                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700
701                 if (txr->tx_desc_ring) {
702                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703                                           txr->tx_desc_ring,
704                                           txr->tx_desc_mapping);
705                         txr->tx_desc_ring = NULL;
706                 }
707                 kfree(txr->tx_buf_ring);
708                 txr->tx_buf_ring = NULL;
709         }
710 }
711
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
714 {
715         int i;
716
717         for (i = 0; i < bp->num_rx_rings; i++) {
718                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720                 int j;
721
722                 for (j = 0; j < bp->rx_max_ring; j++) {
723                         if (rxr->rx_desc_ring[j])
724                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725                                                   rxr->rx_desc_ring[j],
726                                                   rxr->rx_desc_mapping[j]);
727                         rxr->rx_desc_ring[j] = NULL;
728                 }
729                 vfree(rxr->rx_buf_ring);
730                 rxr->rx_buf_ring = NULL;
731
732                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733                         if (rxr->rx_pg_desc_ring[j])
734                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735                                                   rxr->rx_pg_desc_ring[j],
736                                                   rxr->rx_pg_desc_mapping[j]);
737                         rxr->rx_pg_desc_ring[j] = NULL;
738                 }
739                 vfree(rxr->rx_pg_ring);
740                 rxr->rx_pg_ring = NULL;
741         }
742 }
743
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 {
747         int i;
748
749         for (i = 0; i < bp->num_tx_rings; i++) {
750                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752
753                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754                 if (!txr->tx_buf_ring)
755                         return -ENOMEM;
756
757                 txr->tx_desc_ring =
758                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759                                            &txr->tx_desc_mapping, GFP_KERNEL);
760                 if (!txr->tx_desc_ring)
761                         return -ENOMEM;
762         }
763         return 0;
764 }
765
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 {
769         int i;
770
771         for (i = 0; i < bp->num_rx_rings; i++) {
772                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774                 int j;
775
776                 rxr->rx_buf_ring =
777                         vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
778                 if (!rxr->rx_buf_ring)
779                         return -ENOMEM;
780
781                 for (j = 0; j < bp->rx_max_ring; j++) {
782                         rxr->rx_desc_ring[j] =
783                                 dma_alloc_coherent(&bp->pdev->dev,
784                                                    RXBD_RING_SIZE,
785                                                    &rxr->rx_desc_mapping[j],
786                                                    GFP_KERNEL);
787                         if (!rxr->rx_desc_ring[j])
788                                 return -ENOMEM;
789
790                 }
791
792                 if (bp->rx_pg_ring_size) {
793                         rxr->rx_pg_ring =
794                                 vzalloc(array_size(SW_RXPG_RING_SIZE,
795                                                    bp->rx_max_pg_ring));
796                         if (!rxr->rx_pg_ring)
797                                 return -ENOMEM;
798
799                 }
800
801                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802                         rxr->rx_pg_desc_ring[j] =
803                                 dma_alloc_coherent(&bp->pdev->dev,
804                                                    RXBD_RING_SIZE,
805                                                    &rxr->rx_pg_desc_mapping[j],
806                                                    GFP_KERNEL);
807                         if (!rxr->rx_pg_desc_ring[j])
808                                 return -ENOMEM;
809
810                 }
811         }
812         return 0;
813 }
814
815 static void
816 bnx2_free_stats_blk(struct net_device *dev)
817 {
818         struct bnx2 *bp = netdev_priv(dev);
819
820         if (bp->status_blk) {
821                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
822                                   bp->status_blk,
823                                   bp->status_blk_mapping);
824                 bp->status_blk = NULL;
825                 bp->stats_blk = NULL;
826         }
827 }
828
829 static int
830 bnx2_alloc_stats_blk(struct net_device *dev)
831 {
832         int status_blk_size;
833         void *status_blk;
834         struct bnx2 *bp = netdev_priv(dev);
835
836         /* Combine status and statistics blocks into one allocation. */
837         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
838         if (bp->flags & BNX2_FLAG_MSIX_CAP)
839                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
840                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
841         bp->status_stats_size = status_blk_size +
842                                 sizeof(struct statistics_block);
843         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
844                                          &bp->status_blk_mapping, GFP_KERNEL);
845         if (!status_blk)
846                 return -ENOMEM;
847
848         bp->status_blk = status_blk;
849         bp->stats_blk = status_blk + status_blk_size;
850         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
851
852         return 0;
853 }
854
855 static void
856 bnx2_free_mem(struct bnx2 *bp)
857 {
858         int i;
859         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
860
861         bnx2_free_tx_mem(bp);
862         bnx2_free_rx_mem(bp);
863
864         for (i = 0; i < bp->ctx_pages; i++) {
865                 if (bp->ctx_blk[i]) {
866                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
867                                           bp->ctx_blk[i],
868                                           bp->ctx_blk_mapping[i]);
869                         bp->ctx_blk[i] = NULL;
870                 }
871         }
872
873         if (bnapi->status_blk.msi)
874                 bnapi->status_blk.msi = NULL;
875 }
876
877 static int
878 bnx2_alloc_mem(struct bnx2 *bp)
879 {
880         int i, err;
881         struct bnx2_napi *bnapi;
882
883         bnapi = &bp->bnx2_napi[0];
884         bnapi->status_blk.msi = bp->status_blk;
885         bnapi->hw_tx_cons_ptr =
886                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
887         bnapi->hw_rx_cons_ptr =
888                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
889         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
890                 for (i = 1; i < bp->irq_nvecs; i++) {
891                         struct status_block_msix *sblk;
892
893                         bnapi = &bp->bnx2_napi[i];
894
895                         sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
896                         bnapi->status_blk.msix = sblk;
897                         bnapi->hw_tx_cons_ptr =
898                                 &sblk->status_tx_quick_consumer_index;
899                         bnapi->hw_rx_cons_ptr =
900                                 &sblk->status_rx_quick_consumer_index;
901                         bnapi->int_num = i << 24;
902                 }
903         }
904
905         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
906                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
907                 if (bp->ctx_pages == 0)
908                         bp->ctx_pages = 1;
909                 for (i = 0; i < bp->ctx_pages; i++) {
910                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
911                                                 BNX2_PAGE_SIZE,
912                                                 &bp->ctx_blk_mapping[i],
913                                                 GFP_KERNEL);
914                         if (!bp->ctx_blk[i])
915                                 goto alloc_mem_err;
916                 }
917         }
918
919         err = bnx2_alloc_rx_mem(bp);
920         if (err)
921                 goto alloc_mem_err;
922
923         err = bnx2_alloc_tx_mem(bp);
924         if (err)
925                 goto alloc_mem_err;
926
927         return 0;
928
929 alloc_mem_err:
930         bnx2_free_mem(bp);
931         return -ENOMEM;
932 }
933
934 static void
935 bnx2_report_fw_link(struct bnx2 *bp)
936 {
937         u32 fw_link_status = 0;
938
939         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
940                 return;
941
942         if (bp->link_up) {
943                 u32 bmsr;
944
945                 switch (bp->line_speed) {
946                 case SPEED_10:
947                         if (bp->duplex == DUPLEX_HALF)
948                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
949                         else
950                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
951                         break;
952                 case SPEED_100:
953                         if (bp->duplex == DUPLEX_HALF)
954                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
955                         else
956                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
957                         break;
958                 case SPEED_1000:
959                         if (bp->duplex == DUPLEX_HALF)
960                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
961                         else
962                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
963                         break;
964                 case SPEED_2500:
965                         if (bp->duplex == DUPLEX_HALF)
966                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
967                         else
968                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
969                         break;
970                 }
971
972                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
973
974                 if (bp->autoneg) {
975                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
976
977                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
978                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
979
980                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
981                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
982                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
983                         else
984                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
985                 }
986         }
987         else
988                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
989
990         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
991 }
992
993 static char *
994 bnx2_xceiver_str(struct bnx2 *bp)
995 {
996         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
997                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
998                  "Copper");
999 }
1000
1001 static void
1002 bnx2_report_link(struct bnx2 *bp)
1003 {
1004         if (bp->link_up) {
1005                 netif_carrier_on(bp->dev);
1006                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1007                             bnx2_xceiver_str(bp),
1008                             bp->line_speed,
1009                             bp->duplex == DUPLEX_FULL ? "full" : "half");
1010
1011                 if (bp->flow_ctrl) {
1012                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
1013                                 pr_cont(", receive ");
1014                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1015                                         pr_cont("& transmit ");
1016                         }
1017                         else {
1018                                 pr_cont(", transmit ");
1019                         }
1020                         pr_cont("flow control ON");
1021                 }
1022                 pr_cont("\n");
1023         } else {
1024                 netif_carrier_off(bp->dev);
1025                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1026                            bnx2_xceiver_str(bp));
1027         }
1028
1029         bnx2_report_fw_link(bp);
1030 }
1031
1032 static void
1033 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1034 {
1035         u32 local_adv, remote_adv;
1036
1037         bp->flow_ctrl = 0;
1038         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1039                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1040
1041                 if (bp->duplex == DUPLEX_FULL) {
1042                         bp->flow_ctrl = bp->req_flow_ctrl;
1043                 }
1044                 return;
1045         }
1046
1047         if (bp->duplex != DUPLEX_FULL) {
1048                 return;
1049         }
1050
1051         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1052             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1053                 u32 val;
1054
1055                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1056                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1057                         bp->flow_ctrl |= FLOW_CTRL_TX;
1058                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1059                         bp->flow_ctrl |= FLOW_CTRL_RX;
1060                 return;
1061         }
1062
1063         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1064         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1065
1066         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1067                 u32 new_local_adv = 0;
1068                 u32 new_remote_adv = 0;
1069
1070                 if (local_adv & ADVERTISE_1000XPAUSE)
1071                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1072                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1073                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1074                 if (remote_adv & ADVERTISE_1000XPAUSE)
1075                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1076                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1077                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1078
1079                 local_adv = new_local_adv;
1080                 remote_adv = new_remote_adv;
1081         }
1082
1083         /* See Table 28B-3 of 802.3ab-1999 spec. */
1084         if (local_adv & ADVERTISE_PAUSE_CAP) {
1085                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1086                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1087                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1088                         }
1089                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1090                                 bp->flow_ctrl = FLOW_CTRL_RX;
1091                         }
1092                 }
1093                 else {
1094                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1095                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1096                         }
1097                 }
1098         }
1099         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1100                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1101                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1102
1103                         bp->flow_ctrl = FLOW_CTRL_TX;
1104                 }
1105         }
1106 }
1107
1108 static int
1109 bnx2_5709s_linkup(struct bnx2 *bp)
1110 {
1111         u32 val, speed;
1112
1113         bp->link_up = 1;
1114
1115         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1116         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1117         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1118
1119         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1120                 bp->line_speed = bp->req_line_speed;
1121                 bp->duplex = bp->req_duplex;
1122                 return 0;
1123         }
1124         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1125         switch (speed) {
1126                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1127                         bp->line_speed = SPEED_10;
1128                         break;
1129                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1130                         bp->line_speed = SPEED_100;
1131                         break;
1132                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1133                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1134                         bp->line_speed = SPEED_1000;
1135                         break;
1136                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1137                         bp->line_speed = SPEED_2500;
1138                         break;
1139         }
1140         if (val & MII_BNX2_GP_TOP_AN_FD)
1141                 bp->duplex = DUPLEX_FULL;
1142         else
1143                 bp->duplex = DUPLEX_HALF;
1144         return 0;
1145 }
1146
1147 static int
1148 bnx2_5708s_linkup(struct bnx2 *bp)
1149 {
1150         u32 val;
1151
1152         bp->link_up = 1;
1153         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1154         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1155                 case BCM5708S_1000X_STAT1_SPEED_10:
1156                         bp->line_speed = SPEED_10;
1157                         break;
1158                 case BCM5708S_1000X_STAT1_SPEED_100:
1159                         bp->line_speed = SPEED_100;
1160                         break;
1161                 case BCM5708S_1000X_STAT1_SPEED_1G:
1162                         bp->line_speed = SPEED_1000;
1163                         break;
1164                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1165                         bp->line_speed = SPEED_2500;
1166                         break;
1167         }
1168         if (val & BCM5708S_1000X_STAT1_FD)
1169                 bp->duplex = DUPLEX_FULL;
1170         else
1171                 bp->duplex = DUPLEX_HALF;
1172
1173         return 0;
1174 }
1175
1176 static int
1177 bnx2_5706s_linkup(struct bnx2 *bp)
1178 {
1179         u32 bmcr, local_adv, remote_adv, common;
1180
1181         bp->link_up = 1;
1182         bp->line_speed = SPEED_1000;
1183
1184         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1185         if (bmcr & BMCR_FULLDPLX) {
1186                 bp->duplex = DUPLEX_FULL;
1187         }
1188         else {
1189                 bp->duplex = DUPLEX_HALF;
1190         }
1191
1192         if (!(bmcr & BMCR_ANENABLE)) {
1193                 return 0;
1194         }
1195
1196         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1197         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1198
1199         common = local_adv & remote_adv;
1200         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1201
1202                 if (common & ADVERTISE_1000XFULL) {
1203                         bp->duplex = DUPLEX_FULL;
1204                 }
1205                 else {
1206                         bp->duplex = DUPLEX_HALF;
1207                 }
1208         }
1209
1210         return 0;
1211 }
1212
1213 static int
1214 bnx2_copper_linkup(struct bnx2 *bp)
1215 {
1216         u32 bmcr;
1217
1218         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1219
1220         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1221         if (bmcr & BMCR_ANENABLE) {
1222                 u32 local_adv, remote_adv, common;
1223
1224                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1225                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1226
1227                 common = local_adv & (remote_adv >> 2);
1228                 if (common & ADVERTISE_1000FULL) {
1229                         bp->line_speed = SPEED_1000;
1230                         bp->duplex = DUPLEX_FULL;
1231                 }
1232                 else if (common & ADVERTISE_1000HALF) {
1233                         bp->line_speed = SPEED_1000;
1234                         bp->duplex = DUPLEX_HALF;
1235                 }
1236                 else {
1237                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1238                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1239
1240                         common = local_adv & remote_adv;
1241                         if (common & ADVERTISE_100FULL) {
1242                                 bp->line_speed = SPEED_100;
1243                                 bp->duplex = DUPLEX_FULL;
1244                         }
1245                         else if (common & ADVERTISE_100HALF) {
1246                                 bp->line_speed = SPEED_100;
1247                                 bp->duplex = DUPLEX_HALF;
1248                         }
1249                         else if (common & ADVERTISE_10FULL) {
1250                                 bp->line_speed = SPEED_10;
1251                                 bp->duplex = DUPLEX_FULL;
1252                         }
1253                         else if (common & ADVERTISE_10HALF) {
1254                                 bp->line_speed = SPEED_10;
1255                                 bp->duplex = DUPLEX_HALF;
1256                         }
1257                         else {
1258                                 bp->line_speed = 0;
1259                                 bp->link_up = 0;
1260                         }
1261                 }
1262         }
1263         else {
1264                 if (bmcr & BMCR_SPEED100) {
1265                         bp->line_speed = SPEED_100;
1266                 }
1267                 else {
1268                         bp->line_speed = SPEED_10;
1269                 }
1270                 if (bmcr & BMCR_FULLDPLX) {
1271                         bp->duplex = DUPLEX_FULL;
1272                 }
1273                 else {
1274                         bp->duplex = DUPLEX_HALF;
1275                 }
1276         }
1277
1278         if (bp->link_up) {
1279                 u32 ext_status;
1280
1281                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1282                 if (ext_status & EXT_STATUS_MDIX)
1283                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1284         }
1285
1286         return 0;
1287 }
1288
1289 static void
1290 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1291 {
1292         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1293
1294         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1295         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1296         val |= 0x02 << 8;
1297
1298         if (bp->flow_ctrl & FLOW_CTRL_TX)
1299                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1300
1301         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1302 }
1303
1304 static void
1305 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1306 {
1307         int i;
1308         u32 cid;
1309
1310         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1311                 if (i == 1)
1312                         cid = RX_RSS_CID;
1313                 bnx2_init_rx_context(bp, cid);
1314         }
1315 }
1316
1317 static void
1318 bnx2_set_mac_link(struct bnx2 *bp)
1319 {
1320         u32 val;
1321
1322         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1323         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1324                 (bp->duplex == DUPLEX_HALF)) {
1325                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1326         }
1327
1328         /* Configure the EMAC mode register. */
1329         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1330
1331         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1332                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1333                 BNX2_EMAC_MODE_25G_MODE);
1334
1335         if (bp->link_up) {
1336                 switch (bp->line_speed) {
1337                         case SPEED_10:
1338                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1339                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1340                                         break;
1341                                 }
1342                                 /* fall through */
1343                         case SPEED_100:
1344                                 val |= BNX2_EMAC_MODE_PORT_MII;
1345                                 break;
1346                         case SPEED_2500:
1347                                 val |= BNX2_EMAC_MODE_25G_MODE;
1348                                 /* fall through */
1349                         case SPEED_1000:
1350                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1351                                 break;
1352                 }
1353         }
1354         else {
1355                 val |= BNX2_EMAC_MODE_PORT_GMII;
1356         }
1357
1358         /* Set the MAC to operate in the appropriate duplex mode. */
1359         if (bp->duplex == DUPLEX_HALF)
1360                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1361         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1362
1363         /* Enable/disable rx PAUSE. */
1364         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1365
1366         if (bp->flow_ctrl & FLOW_CTRL_RX)
1367                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1368         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1369
1370         /* Enable/disable tx PAUSE. */
1371         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1372         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1373
1374         if (bp->flow_ctrl & FLOW_CTRL_TX)
1375                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1376         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1377
1378         /* Acknowledge the interrupt. */
1379         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1380
1381         bnx2_init_all_rx_contexts(bp);
1382 }
1383
1384 static void
1385 bnx2_enable_bmsr1(struct bnx2 *bp)
1386 {
1387         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1388             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1389                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1390                                MII_BNX2_BLK_ADDR_GP_STATUS);
1391 }
1392
1393 static void
1394 bnx2_disable_bmsr1(struct bnx2 *bp)
1395 {
1396         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1397             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1398                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1399                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1400 }
1401
1402 static int
1403 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1404 {
1405         u32 up1;
1406         int ret = 1;
1407
1408         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1409                 return 0;
1410
1411         if (bp->autoneg & AUTONEG_SPEED)
1412                 bp->advertising |= ADVERTISED_2500baseX_Full;
1413
1414         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1415                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417         bnx2_read_phy(bp, bp->mii_up1, &up1);
1418         if (!(up1 & BCM5708S_UP1_2G5)) {
1419                 up1 |= BCM5708S_UP1_2G5;
1420                 bnx2_write_phy(bp, bp->mii_up1, up1);
1421                 ret = 0;
1422         }
1423
1424         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1425                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428         return ret;
1429 }
1430
1431 static int
1432 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1433 {
1434         u32 up1;
1435         int ret = 0;
1436
1437         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438                 return 0;
1439
1440         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1441                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1442
1443         bnx2_read_phy(bp, bp->mii_up1, &up1);
1444         if (up1 & BCM5708S_UP1_2G5) {
1445                 up1 &= ~BCM5708S_UP1_2G5;
1446                 bnx2_write_phy(bp, bp->mii_up1, up1);
1447                 ret = 1;
1448         }
1449
1450         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1451                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453
1454         return ret;
1455 }
1456
1457 static void
1458 bnx2_enable_forced_2g5(struct bnx2 *bp)
1459 {
1460         u32 uninitialized_var(bmcr);
1461         int err;
1462
1463         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1464                 return;
1465
1466         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1467                 u32 val;
1468
1469                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1470                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1471                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1472                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1473                         val |= MII_BNX2_SD_MISC1_FORCE |
1474                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1475                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1476                 }
1477
1478                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1479                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1480                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1481
1482         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1483                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1484                 if (!err)
1485                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1486         } else {
1487                 return;
1488         }
1489
1490         if (err)
1491                 return;
1492
1493         if (bp->autoneg & AUTONEG_SPEED) {
1494                 bmcr &= ~BMCR_ANENABLE;
1495                 if (bp->req_duplex == DUPLEX_FULL)
1496                         bmcr |= BMCR_FULLDPLX;
1497         }
1498         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1499 }
1500
1501 static void
1502 bnx2_disable_forced_2g5(struct bnx2 *bp)
1503 {
1504         u32 uninitialized_var(bmcr);
1505         int err;
1506
1507         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1508                 return;
1509
1510         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1511                 u32 val;
1512
1513                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1514                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1515                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1516                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1517                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1518                 }
1519
1520                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1521                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1522                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1523
1524         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1525                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1526                 if (!err)
1527                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1528         } else {
1529                 return;
1530         }
1531
1532         if (err)
1533                 return;
1534
1535         if (bp->autoneg & AUTONEG_SPEED)
1536                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1537         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1538 }
1539
1540 static void
1541 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1542 {
1543         u32 val;
1544
1545         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1546         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1547         if (start)
1548                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1549         else
1550                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1551 }
1552
1553 static int
1554 bnx2_set_link(struct bnx2 *bp)
1555 {
1556         u32 bmsr;
1557         u8 link_up;
1558
1559         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1560                 bp->link_up = 1;
1561                 return 0;
1562         }
1563
1564         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1565                 return 0;
1566
1567         link_up = bp->link_up;
1568
1569         bnx2_enable_bmsr1(bp);
1570         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1571         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1572         bnx2_disable_bmsr1(bp);
1573
1574         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1575             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1576                 u32 val, an_dbg;
1577
1578                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1579                         bnx2_5706s_force_link_dn(bp, 0);
1580                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1581                 }
1582                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1583
1584                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1585                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1586                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1587
1588                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1589                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1590                         bmsr |= BMSR_LSTATUS;
1591                 else
1592                         bmsr &= ~BMSR_LSTATUS;
1593         }
1594
1595         if (bmsr & BMSR_LSTATUS) {
1596                 bp->link_up = 1;
1597
1598                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1599                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1600                                 bnx2_5706s_linkup(bp);
1601                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1602                                 bnx2_5708s_linkup(bp);
1603                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1604                                 bnx2_5709s_linkup(bp);
1605                 }
1606                 else {
1607                         bnx2_copper_linkup(bp);
1608                 }
1609                 bnx2_resolve_flow_ctrl(bp);
1610         }
1611         else {
1612                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1613                     (bp->autoneg & AUTONEG_SPEED))
1614                         bnx2_disable_forced_2g5(bp);
1615
1616                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1617                         u32 bmcr;
1618
1619                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1620                         bmcr |= BMCR_ANENABLE;
1621                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1622
1623                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1624                 }
1625                 bp->link_up = 0;
1626         }
1627
1628         if (bp->link_up != link_up) {
1629                 bnx2_report_link(bp);
1630         }
1631
1632         bnx2_set_mac_link(bp);
1633
1634         return 0;
1635 }
1636
1637 static int
1638 bnx2_reset_phy(struct bnx2 *bp)
1639 {
1640         int i;
1641         u32 reg;
1642
1643         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1644
1645 #define PHY_RESET_MAX_WAIT 100
1646         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1647                 udelay(10);
1648
1649                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1650                 if (!(reg & BMCR_RESET)) {
1651                         udelay(20);
1652                         break;
1653                 }
1654         }
1655         if (i == PHY_RESET_MAX_WAIT) {
1656                 return -EBUSY;
1657         }
1658         return 0;
1659 }
1660
1661 static u32
1662 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1663 {
1664         u32 adv = 0;
1665
1666         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1667                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1668
1669                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1670                         adv = ADVERTISE_1000XPAUSE;
1671                 }
1672                 else {
1673                         adv = ADVERTISE_PAUSE_CAP;
1674                 }
1675         }
1676         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1677                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1678                         adv = ADVERTISE_1000XPSE_ASYM;
1679                 }
1680                 else {
1681                         adv = ADVERTISE_PAUSE_ASYM;
1682                 }
1683         }
1684         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1685                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1686                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1687                 }
1688                 else {
1689                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1690                 }
1691         }
1692         return adv;
1693 }
1694
1695 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1696
1697 static int
1698 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1699 __releases(&bp->phy_lock)
1700 __acquires(&bp->phy_lock)
1701 {
1702         u32 speed_arg = 0, pause_adv;
1703
1704         pause_adv = bnx2_phy_get_pause_adv(bp);
1705
1706         if (bp->autoneg & AUTONEG_SPEED) {
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1708                 if (bp->advertising & ADVERTISED_10baseT_Half)
1709                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710                 if (bp->advertising & ADVERTISED_10baseT_Full)
1711                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712                 if (bp->advertising & ADVERTISED_100baseT_Half)
1713                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1714                 if (bp->advertising & ADVERTISED_100baseT_Full)
1715                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1716                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1717                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1718                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1719                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1720         } else {
1721                 if (bp->req_line_speed == SPEED_2500)
1722                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1723                 else if (bp->req_line_speed == SPEED_1000)
1724                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1725                 else if (bp->req_line_speed == SPEED_100) {
1726                         if (bp->req_duplex == DUPLEX_FULL)
1727                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1728                         else
1729                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1730                 } else if (bp->req_line_speed == SPEED_10) {
1731                         if (bp->req_duplex == DUPLEX_FULL)
1732                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1733                         else
1734                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1735                 }
1736         }
1737
1738         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1739                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1740         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1741                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1742
1743         if (port == PORT_TP)
1744                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1745                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1746
1747         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1748
1749         spin_unlock_bh(&bp->phy_lock);
1750         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1751         spin_lock_bh(&bp->phy_lock);
1752
1753         return 0;
1754 }
1755
1756 static int
1757 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1758 __releases(&bp->phy_lock)
1759 __acquires(&bp->phy_lock)
1760 {
1761         u32 adv, bmcr;
1762         u32 new_adv = 0;
1763
1764         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1765                 return bnx2_setup_remote_phy(bp, port);
1766
1767         if (!(bp->autoneg & AUTONEG_SPEED)) {
1768                 u32 new_bmcr;
1769                 int force_link_down = 0;
1770
1771                 if (bp->req_line_speed == SPEED_2500) {
1772                         if (!bnx2_test_and_enable_2g5(bp))
1773                                 force_link_down = 1;
1774                 } else if (bp->req_line_speed == SPEED_1000) {
1775                         if (bnx2_test_and_disable_2g5(bp))
1776                                 force_link_down = 1;
1777                 }
1778                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1779                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1780
1781                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1782                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1783                 new_bmcr |= BMCR_SPEED1000;
1784
1785                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1786                         if (bp->req_line_speed == SPEED_2500)
1787                                 bnx2_enable_forced_2g5(bp);
1788                         else if (bp->req_line_speed == SPEED_1000) {
1789                                 bnx2_disable_forced_2g5(bp);
1790                                 new_bmcr &= ~0x2000;
1791                         }
1792
1793                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1794                         if (bp->req_line_speed == SPEED_2500)
1795                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1796                         else
1797                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1798                 }
1799
1800                 if (bp->req_duplex == DUPLEX_FULL) {
1801                         adv |= ADVERTISE_1000XFULL;
1802                         new_bmcr |= BMCR_FULLDPLX;
1803                 }
1804                 else {
1805                         adv |= ADVERTISE_1000XHALF;
1806                         new_bmcr &= ~BMCR_FULLDPLX;
1807                 }
1808                 if ((new_bmcr != bmcr) || (force_link_down)) {
1809                         /* Force a link down visible on the other side */
1810                         if (bp->link_up) {
1811                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1812                                                ~(ADVERTISE_1000XFULL |
1813                                                  ADVERTISE_1000XHALF));
1814                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1815                                         BMCR_ANRESTART | BMCR_ANENABLE);
1816
1817                                 bp->link_up = 0;
1818                                 netif_carrier_off(bp->dev);
1819                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1820                                 bnx2_report_link(bp);
1821                         }
1822                         bnx2_write_phy(bp, bp->mii_adv, adv);
1823                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1824                 } else {
1825                         bnx2_resolve_flow_ctrl(bp);
1826                         bnx2_set_mac_link(bp);
1827                 }
1828                 return 0;
1829         }
1830
1831         bnx2_test_and_enable_2g5(bp);
1832
1833         if (bp->advertising & ADVERTISED_1000baseT_Full)
1834                 new_adv |= ADVERTISE_1000XFULL;
1835
1836         new_adv |= bnx2_phy_get_pause_adv(bp);
1837
1838         bnx2_read_phy(bp, bp->mii_adv, &adv);
1839         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1840
1841         bp->serdes_an_pending = 0;
1842         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1843                 /* Force a link down visible on the other side */
1844                 if (bp->link_up) {
1845                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1846                         spin_unlock_bh(&bp->phy_lock);
1847                         msleep(20);
1848                         spin_lock_bh(&bp->phy_lock);
1849                 }
1850
1851                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1852                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1853                         BMCR_ANENABLE);
1854                 /* Speed up link-up time when the link partner
1855                  * does not autonegotiate which is very common
1856                  * in blade servers. Some blade servers use
1857                  * IPMI for kerboard input and it's important
1858                  * to minimize link disruptions. Autoneg. involves
1859                  * exchanging base pages plus 3 next pages and
1860                  * normally completes in about 120 msec.
1861                  */
1862                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1863                 bp->serdes_an_pending = 1;
1864                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1865         } else {
1866                 bnx2_resolve_flow_ctrl(bp);
1867                 bnx2_set_mac_link(bp);
1868         }
1869
1870         return 0;
1871 }
1872
1873 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1874         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1875                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1876                 (ADVERTISED_1000baseT_Full)
1877
1878 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1879         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1880         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1881         ADVERTISED_1000baseT_Full)
1882
1883 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1884         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1885
1886 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1887
1888 static void
1889 bnx2_set_default_remote_link(struct bnx2 *bp)
1890 {
1891         u32 link;
1892
1893         if (bp->phy_port == PORT_TP)
1894                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1895         else
1896                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1897
1898         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1899                 bp->req_line_speed = 0;
1900                 bp->autoneg |= AUTONEG_SPEED;
1901                 bp->advertising = ADVERTISED_Autoneg;
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903                         bp->advertising |= ADVERTISED_10baseT_Half;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1905                         bp->advertising |= ADVERTISED_10baseT_Full;
1906                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1907                         bp->advertising |= ADVERTISED_100baseT_Half;
1908                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1909                         bp->advertising |= ADVERTISED_100baseT_Full;
1910                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911                         bp->advertising |= ADVERTISED_1000baseT_Full;
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913                         bp->advertising |= ADVERTISED_2500baseX_Full;
1914         } else {
1915                 bp->autoneg = 0;
1916                 bp->advertising = 0;
1917                 bp->req_duplex = DUPLEX_FULL;
1918                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1919                         bp->req_line_speed = SPEED_10;
1920                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1921                                 bp->req_duplex = DUPLEX_HALF;
1922                 }
1923                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1924                         bp->req_line_speed = SPEED_100;
1925                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1926                                 bp->req_duplex = DUPLEX_HALF;
1927                 }
1928                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1929                         bp->req_line_speed = SPEED_1000;
1930                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1931                         bp->req_line_speed = SPEED_2500;
1932         }
1933 }
1934
1935 static void
1936 bnx2_set_default_link(struct bnx2 *bp)
1937 {
1938         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1939                 bnx2_set_default_remote_link(bp);
1940                 return;
1941         }
1942
1943         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1944         bp->req_line_speed = 0;
1945         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1946                 u32 reg;
1947
1948                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1949
1950                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1951                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1952                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1953                         bp->autoneg = 0;
1954                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1955                         bp->req_duplex = DUPLEX_FULL;
1956                 }
1957         } else
1958                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1959 }
1960
1961 static void
1962 bnx2_send_heart_beat(struct bnx2 *bp)
1963 {
1964         u32 msg;
1965         u32 addr;
1966
1967         spin_lock(&bp->indirect_lock);
1968         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1969         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1970         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1971         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1972         spin_unlock(&bp->indirect_lock);
1973 }
1974
1975 static void
1976 bnx2_remote_phy_event(struct bnx2 *bp)
1977 {
1978         u32 msg;
1979         u8 link_up = bp->link_up;
1980         u8 old_port;
1981
1982         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1983
1984         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1985                 bnx2_send_heart_beat(bp);
1986
1987         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1988
1989         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1990                 bp->link_up = 0;
1991         else {
1992                 u32 speed;
1993
1994                 bp->link_up = 1;
1995                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1996                 bp->duplex = DUPLEX_FULL;
1997                 switch (speed) {
1998                         case BNX2_LINK_STATUS_10HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                                 /* fall through */
2001                         case BNX2_LINK_STATUS_10FULL:
2002                                 bp->line_speed = SPEED_10;
2003                                 break;
2004                         case BNX2_LINK_STATUS_100HALF:
2005                                 bp->duplex = DUPLEX_HALF;
2006                                 /* fall through */
2007                         case BNX2_LINK_STATUS_100BASE_T4:
2008                         case BNX2_LINK_STATUS_100FULL:
2009                                 bp->line_speed = SPEED_100;
2010                                 break;
2011                         case BNX2_LINK_STATUS_1000HALF:
2012                                 bp->duplex = DUPLEX_HALF;
2013                                 /* fall through */
2014                         case BNX2_LINK_STATUS_1000FULL:
2015                                 bp->line_speed = SPEED_1000;
2016                                 break;
2017                         case BNX2_LINK_STATUS_2500HALF:
2018                                 bp->duplex = DUPLEX_HALF;
2019                                 /* fall through */
2020                         case BNX2_LINK_STATUS_2500FULL:
2021                                 bp->line_speed = SPEED_2500;
2022                                 break;
2023                         default:
2024                                 bp->line_speed = 0;
2025                                 break;
2026                 }
2027
2028                 bp->flow_ctrl = 0;
2029                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2030                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2031                         if (bp->duplex == DUPLEX_FULL)
2032                                 bp->flow_ctrl = bp->req_flow_ctrl;
2033                 } else {
2034                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2035                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2036                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2037                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2038                 }
2039
2040                 old_port = bp->phy_port;
2041                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2042                         bp->phy_port = PORT_FIBRE;
2043                 else
2044                         bp->phy_port = PORT_TP;
2045
2046                 if (old_port != bp->phy_port)
2047                         bnx2_set_default_link(bp);
2048
2049         }
2050         if (bp->link_up != link_up)
2051                 bnx2_report_link(bp);
2052
2053         bnx2_set_mac_link(bp);
2054 }
2055
2056 static int
2057 bnx2_set_remote_link(struct bnx2 *bp)
2058 {
2059         u32 evt_code;
2060
2061         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2062         switch (evt_code) {
2063                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2064                         bnx2_remote_phy_event(bp);
2065                         break;
2066                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2067                 default:
2068                         bnx2_send_heart_beat(bp);
2069                         break;
2070         }
2071         return 0;
2072 }
2073
2074 static int
2075 bnx2_setup_copper_phy(struct bnx2 *bp)
2076 __releases(&bp->phy_lock)
2077 __acquires(&bp->phy_lock)
2078 {
2079         u32 bmcr, adv_reg, new_adv = 0;
2080         u32 new_bmcr;
2081
2082         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2083
2084         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2085         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2086                     ADVERTISE_PAUSE_ASYM);
2087
2088         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2089
2090         if (bp->autoneg & AUTONEG_SPEED) {
2091                 u32 adv1000_reg;
2092                 u32 new_adv1000 = 0;
2093
2094                 new_adv |= bnx2_phy_get_pause_adv(bp);
2095
2096                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2097                 adv1000_reg &= PHY_ALL_1000_SPEED;
2098
2099                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2100                 if ((adv1000_reg != new_adv1000) ||
2101                         (adv_reg != new_adv) ||
2102                         ((bmcr & BMCR_ANENABLE) == 0)) {
2103
2104                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2105                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2106                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2107                                 BMCR_ANENABLE);
2108                 }
2109                 else if (bp->link_up) {
2110                         /* Flow ctrl may have changed from auto to forced */
2111                         /* or vice-versa. */
2112
2113                         bnx2_resolve_flow_ctrl(bp);
2114                         bnx2_set_mac_link(bp);
2115                 }
2116                 return 0;
2117         }
2118
2119         /* advertise nothing when forcing speed */
2120         if (adv_reg != new_adv)
2121                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2122
2123         new_bmcr = 0;
2124         if (bp->req_line_speed == SPEED_100) {
2125                 new_bmcr |= BMCR_SPEED100;
2126         }
2127         if (bp->req_duplex == DUPLEX_FULL) {
2128                 new_bmcr |= BMCR_FULLDPLX;
2129         }
2130         if (new_bmcr != bmcr) {
2131                 u32 bmsr;
2132
2133                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2134                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2135
2136                 if (bmsr & BMSR_LSTATUS) {
2137                         /* Force link down */
2138                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2139                         spin_unlock_bh(&bp->phy_lock);
2140                         msleep(50);
2141                         spin_lock_bh(&bp->phy_lock);
2142
2143                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2144                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2145                 }
2146
2147                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2148
2149                 /* Normally, the new speed is setup after the link has
2150                  * gone down and up again. In some cases, link will not go
2151                  * down so we need to set up the new speed here.
2152                  */
2153                 if (bmsr & BMSR_LSTATUS) {
2154                         bp->line_speed = bp->req_line_speed;
2155                         bp->duplex = bp->req_duplex;
2156                         bnx2_resolve_flow_ctrl(bp);
2157                         bnx2_set_mac_link(bp);
2158                 }
2159         } else {
2160                 bnx2_resolve_flow_ctrl(bp);
2161                 bnx2_set_mac_link(bp);
2162         }
2163         return 0;
2164 }
2165
2166 static int
2167 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2168 __releases(&bp->phy_lock)
2169 __acquires(&bp->phy_lock)
2170 {
2171         if (bp->loopback == MAC_LOOPBACK)
2172                 return 0;
2173
2174         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2175                 return bnx2_setup_serdes_phy(bp, port);
2176         }
2177         else {
2178                 return bnx2_setup_copper_phy(bp);
2179         }
2180 }
2181
2182 static int
2183 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2184 {
2185         u32 val;
2186
2187         bp->mii_bmcr = MII_BMCR + 0x10;
2188         bp->mii_bmsr = MII_BMSR + 0x10;
2189         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2190         bp->mii_adv = MII_ADVERTISE + 0x10;
2191         bp->mii_lpa = MII_LPA + 0x10;
2192         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2193
2194         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2195         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2196
2197         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2198         if (reset_phy)
2199                 bnx2_reset_phy(bp);
2200
2201         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2202
2203         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2204         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2205         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2206         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2207
2208         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2209         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2210         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2211                 val |= BCM5708S_UP1_2G5;
2212         else
2213                 val &= ~BCM5708S_UP1_2G5;
2214         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2215
2216         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2217         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2218         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2219         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2220
2221         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2222
2223         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2224               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2225         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2226
2227         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2228
2229         return 0;
2230 }
2231
2232 static int
2233 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2234 {
2235         u32 val;
2236
2237         if (reset_phy)
2238                 bnx2_reset_phy(bp);
2239
2240         bp->mii_up1 = BCM5708S_UP1;
2241
2242         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2243         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2244         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2245
2246         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2247         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2248         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2249
2250         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2251         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2252         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2253
2254         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2255                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2256                 val |= BCM5708S_UP1_2G5;
2257                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2258         }
2259
2260         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2261             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2262             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2263                 /* increase tx signal amplitude */
2264                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2265                                BCM5708S_BLK_ADDR_TX_MISC);
2266                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2267                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2268                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2269                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2270         }
2271
2272         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2273               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2274
2275         if (val) {
2276                 u32 is_backplane;
2277
2278                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2279                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2280                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2281                                        BCM5708S_BLK_ADDR_TX_MISC);
2282                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2283                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2284                                        BCM5708S_BLK_ADDR_DIG);
2285                 }
2286         }
2287         return 0;
2288 }
2289
2290 static int
2291 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2292 {
2293         if (reset_phy)
2294                 bnx2_reset_phy(bp);
2295
2296         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2297
2298         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2299                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2300
2301         if (bp->dev->mtu > ETH_DATA_LEN) {
2302                 u32 val;
2303
2304                 /* Set extended packet length bit */
2305                 bnx2_write_phy(bp, 0x18, 0x7);
2306                 bnx2_read_phy(bp, 0x18, &val);
2307                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2308
2309                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2310                 bnx2_read_phy(bp, 0x1c, &val);
2311                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2312         }
2313         else {
2314                 u32 val;
2315
2316                 bnx2_write_phy(bp, 0x18, 0x7);
2317                 bnx2_read_phy(bp, 0x18, &val);
2318                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2319
2320                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2321                 bnx2_read_phy(bp, 0x1c, &val);
2322                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2323         }
2324
2325         return 0;
2326 }
2327
2328 static int
2329 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2330 {
2331         u32 val;
2332
2333         if (reset_phy)
2334                 bnx2_reset_phy(bp);
2335
2336         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2337                 bnx2_write_phy(bp, 0x18, 0x0c00);
2338                 bnx2_write_phy(bp, 0x17, 0x000a);
2339                 bnx2_write_phy(bp, 0x15, 0x310b);
2340                 bnx2_write_phy(bp, 0x17, 0x201f);
2341                 bnx2_write_phy(bp, 0x15, 0x9506);
2342                 bnx2_write_phy(bp, 0x17, 0x401f);
2343                 bnx2_write_phy(bp, 0x15, 0x14e2);
2344                 bnx2_write_phy(bp, 0x18, 0x0400);
2345         }
2346
2347         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2348                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2349                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2350                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2351                 val &= ~(1 << 8);
2352                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2353         }
2354
2355         if (bp->dev->mtu > ETH_DATA_LEN) {
2356                 /* Set extended packet length bit */
2357                 bnx2_write_phy(bp, 0x18, 0x7);
2358                 bnx2_read_phy(bp, 0x18, &val);
2359                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2360
2361                 bnx2_read_phy(bp, 0x10, &val);
2362                 bnx2_write_phy(bp, 0x10, val | 0x1);
2363         }
2364         else {
2365                 bnx2_write_phy(bp, 0x18, 0x7);
2366                 bnx2_read_phy(bp, 0x18, &val);
2367                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2368
2369                 bnx2_read_phy(bp, 0x10, &val);
2370                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2371         }
2372
2373         /* ethernet@wirespeed */
2374         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2375         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2376         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2377
2378         /* auto-mdix */
2379         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2380                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2381
2382         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2383         return 0;
2384 }
2385
2386
2387 static int
2388 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2389 __releases(&bp->phy_lock)
2390 __acquires(&bp->phy_lock)
2391 {
2392         u32 val;
2393         int rc = 0;
2394
2395         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2396         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2397
2398         bp->mii_bmcr = MII_BMCR;
2399         bp->mii_bmsr = MII_BMSR;
2400         bp->mii_bmsr1 = MII_BMSR;
2401         bp->mii_adv = MII_ADVERTISE;
2402         bp->mii_lpa = MII_LPA;
2403
2404         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2405
2406         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2407                 goto setup_phy;
2408
2409         bnx2_read_phy(bp, MII_PHYSID1, &val);
2410         bp->phy_id = val << 16;
2411         bnx2_read_phy(bp, MII_PHYSID2, &val);
2412         bp->phy_id |= val & 0xffff;
2413
2414         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2415                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2416                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2417                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2418                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2419                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2420                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2421         }
2422         else {
2423                 rc = bnx2_init_copper_phy(bp, reset_phy);
2424         }
2425
2426 setup_phy:
2427         if (!rc)
2428                 rc = bnx2_setup_phy(bp, bp->phy_port);
2429
2430         return rc;
2431 }
2432
2433 static int
2434 bnx2_set_mac_loopback(struct bnx2 *bp)
2435 {
2436         u32 mac_mode;
2437
2438         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2439         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2440         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2441         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2442         bp->link_up = 1;
2443         return 0;
2444 }
2445
2446 static int bnx2_test_link(struct bnx2 *);
2447
2448 static int
2449 bnx2_set_phy_loopback(struct bnx2 *bp)
2450 {
2451         u32 mac_mode;
2452         int rc, i;
2453
2454         spin_lock_bh(&bp->phy_lock);
2455         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2456                             BMCR_SPEED1000);
2457         spin_unlock_bh(&bp->phy_lock);
2458         if (rc)
2459                 return rc;
2460
2461         for (i = 0; i < 10; i++) {
2462                 if (bnx2_test_link(bp) == 0)
2463                         break;
2464                 msleep(100);
2465         }
2466
2467         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2468         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2469                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2470                       BNX2_EMAC_MODE_25G_MODE);
2471
2472         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2473         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2474         bp->link_up = 1;
2475         return 0;
2476 }
2477
2478 static void
2479 bnx2_dump_mcp_state(struct bnx2 *bp)
2480 {
2481         struct net_device *dev = bp->dev;
2482         u32 mcp_p0, mcp_p1;
2483
2484         netdev_err(dev, "<--- start MCP states dump --->\n");
2485         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2486                 mcp_p0 = BNX2_MCP_STATE_P0;
2487                 mcp_p1 = BNX2_MCP_STATE_P1;
2488         } else {
2489                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2490                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2491         }
2492         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2493                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2494         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2495                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2496                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2497                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2498         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2499                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2500                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2501                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2502         netdev_err(dev, "DEBUG: shmem states:\n");
2503         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2504                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2505                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2506                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2507         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2508         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2509                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2510                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2511         pr_cont(" condition[%08x]\n",
2512                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2513         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2514         DP_SHMEM_LINE(bp, 0x3cc);
2515         DP_SHMEM_LINE(bp, 0x3dc);
2516         DP_SHMEM_LINE(bp, 0x3ec);
2517         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2518         netdev_err(dev, "<--- end MCP states dump --->\n");
2519 }
2520
2521 static int
2522 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2523 {
2524         int i;
2525         u32 val;
2526
2527         bp->fw_wr_seq++;
2528         msg_data |= bp->fw_wr_seq;
2529         bp->fw_last_msg = msg_data;
2530
2531         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2532
2533         if (!ack)
2534                 return 0;
2535
2536         /* wait for an acknowledgement. */
2537         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2538                 msleep(10);
2539
2540                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2541
2542                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2543                         break;
2544         }
2545         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2546                 return 0;
2547
2548         /* If we timed out, inform the firmware that this is the case. */
2549         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2550                 msg_data &= ~BNX2_DRV_MSG_CODE;
2551                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2552
2553                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2554                 if (!silent) {
2555                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2556                         bnx2_dump_mcp_state(bp);
2557                 }
2558
2559                 return -EBUSY;
2560         }
2561
2562         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2563                 return -EIO;
2564
2565         return 0;
2566 }
2567
2568 static int
2569 bnx2_init_5709_context(struct bnx2 *bp)
2570 {
2571         int i, ret = 0;
2572         u32 val;
2573
2574         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2575         val |= (BNX2_PAGE_BITS - 8) << 16;
2576         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2577         for (i = 0; i < 10; i++) {
2578                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2579                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2580                         break;
2581                 udelay(2);
2582         }
2583         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2584                 return -EBUSY;
2585
2586         for (i = 0; i < bp->ctx_pages; i++) {
2587                 int j;
2588
2589                 if (bp->ctx_blk[i])
2590                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2591                 else
2592                         return -ENOMEM;
2593
2594                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2595                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2596                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2597                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2598                         (u64) bp->ctx_blk_mapping[i] >> 32);
2599                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2600                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2601                 for (j = 0; j < 10; j++) {
2602
2603                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2604                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2605                                 break;
2606                         udelay(5);
2607                 }
2608                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2609                         ret = -EBUSY;
2610                         break;
2611                 }
2612         }
2613         return ret;
2614 }
2615
2616 static void
2617 bnx2_init_context(struct bnx2 *bp)
2618 {
2619         u32 vcid;
2620
2621         vcid = 96;
2622         while (vcid) {
2623                 u32 vcid_addr, pcid_addr, offset;
2624                 int i;
2625
2626                 vcid--;
2627
2628                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2629                         u32 new_vcid;
2630
2631                         vcid_addr = GET_PCID_ADDR(vcid);
2632                         if (vcid & 0x8) {
2633                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2634                         }
2635                         else {
2636                                 new_vcid = vcid;
2637                         }
2638                         pcid_addr = GET_PCID_ADDR(new_vcid);
2639                 }
2640                 else {
2641                         vcid_addr = GET_CID_ADDR(vcid);
2642                         pcid_addr = vcid_addr;
2643                 }
2644
2645                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2646                         vcid_addr += (i << PHY_CTX_SHIFT);
2647                         pcid_addr += (i << PHY_CTX_SHIFT);
2648
2649                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2650                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2651
2652                         /* Zero out the context. */
2653                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2654                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2655                 }
2656         }
2657 }
2658
2659 static int
2660 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2661 {
2662         u16 *good_mbuf;
2663         u32 good_mbuf_cnt;
2664         u32 val;
2665
2666         good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2667         if (!good_mbuf)
2668                 return -ENOMEM;
2669
2670         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2671                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2672
2673         good_mbuf_cnt = 0;
2674
2675         /* Allocate a bunch of mbufs and save the good ones in an array. */
2676         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2677         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2678                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2679                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2680
2681                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2682
2683                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2684
2685                 /* The addresses with Bit 9 set are bad memory blocks. */
2686                 if (!(val & (1 << 9))) {
2687                         good_mbuf[good_mbuf_cnt] = (u16) val;
2688                         good_mbuf_cnt++;
2689                 }
2690
2691                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2692         }
2693
2694         /* Free the good ones back to the mbuf pool thus discarding
2695          * all the bad ones. */
2696         while (good_mbuf_cnt) {
2697                 good_mbuf_cnt--;
2698
2699                 val = good_mbuf[good_mbuf_cnt];
2700                 val = (val << 9) | val | 1;
2701
2702                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2703         }
2704         kfree(good_mbuf);
2705         return 0;
2706 }
2707
2708 static void
2709 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2710 {
2711         u32 val;
2712
2713         val = (mac_addr[0] << 8) | mac_addr[1];
2714
2715         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2716
2717         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2718                 (mac_addr[4] << 8) | mac_addr[5];
2719
2720         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2721 }
2722
2723 static inline int
2724 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2725 {
2726         dma_addr_t mapping;
2727         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2728         struct bnx2_rx_bd *rxbd =
2729                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2730         struct page *page = alloc_page(gfp);
2731
2732         if (!page)
2733                 return -ENOMEM;
2734         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2735                                PCI_DMA_FROMDEVICE);
2736         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2737                 __free_page(page);
2738                 return -EIO;
2739         }
2740
2741         rx_pg->page = page;
2742         dma_unmap_addr_set(rx_pg, mapping, mapping);
2743         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2744         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2745         return 0;
2746 }
2747
2748 static void
2749 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2750 {
2751         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2752         struct page *page = rx_pg->page;
2753
2754         if (!page)
2755                 return;
2756
2757         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2758                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2759
2760         __free_page(page);
2761         rx_pg->page = NULL;
2762 }
2763
2764 static inline int
2765 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2766 {
2767         u8 *data;
2768         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2769         dma_addr_t mapping;
2770         struct bnx2_rx_bd *rxbd =
2771                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2772
2773         data = kmalloc(bp->rx_buf_size, gfp);
2774         if (!data)
2775                 return -ENOMEM;
2776
2777         mapping = dma_map_single(&bp->pdev->dev,
2778                                  get_l2_fhdr(data),
2779                                  bp->rx_buf_use_size,
2780                                  PCI_DMA_FROMDEVICE);
2781         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2782                 kfree(data);
2783                 return -EIO;
2784         }
2785
2786         rx_buf->data = data;
2787         dma_unmap_addr_set(rx_buf, mapping, mapping);
2788
2789         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2790         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2791
2792         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2793
2794         return 0;
2795 }
2796
2797 static int
2798 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2799 {
2800         struct status_block *sblk = bnapi->status_blk.msi;
2801         u32 new_link_state, old_link_state;
2802         int is_set = 1;
2803
2804         new_link_state = sblk->status_attn_bits & event;
2805         old_link_state = sblk->status_attn_bits_ack & event;
2806         if (new_link_state != old_link_state) {
2807                 if (new_link_state)
2808                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2809                 else
2810                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2811         } else
2812                 is_set = 0;
2813
2814         return is_set;
2815 }
2816
2817 static void
2818 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2819 {
2820         spin_lock(&bp->phy_lock);
2821
2822         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2823                 bnx2_set_link(bp);
2824         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2825                 bnx2_set_remote_link(bp);
2826
2827         spin_unlock(&bp->phy_lock);
2828
2829 }
2830
2831 static inline u16
2832 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2833 {
2834         u16 cons;
2835
2836         cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2837
2838         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2839                 cons++;
2840         return cons;
2841 }
2842
2843 static int
2844 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2845 {
2846         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2847         u16 hw_cons, sw_cons, sw_ring_cons;
2848         int tx_pkt = 0, index;
2849         unsigned int tx_bytes = 0;
2850         struct netdev_queue *txq;
2851
2852         index = (bnapi - bp->bnx2_napi);
2853         txq = netdev_get_tx_queue(bp->dev, index);
2854
2855         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2856         sw_cons = txr->tx_cons;
2857
2858         while (sw_cons != hw_cons) {
2859                 struct bnx2_sw_tx_bd *tx_buf;
2860                 struct sk_buff *skb;
2861                 int i, last;
2862
2863                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2864
2865                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2866                 skb = tx_buf->skb;
2867
2868                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2869                 prefetch(&skb->end);
2870
2871                 /* partial BD completions possible with TSO packets */
2872                 if (tx_buf->is_gso) {
2873                         u16 last_idx, last_ring_idx;
2874
2875                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2876                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2877                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2878                                 last_idx++;
2879                         }
2880                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2881                                 break;
2882                         }
2883                 }
2884
2885                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2886                         skb_headlen(skb), PCI_DMA_TODEVICE);
2887
2888                 tx_buf->skb = NULL;
2889                 last = tx_buf->nr_frags;
2890
2891                 for (i = 0; i < last; i++) {
2892                         struct bnx2_sw_tx_bd *tx_buf;
2893
2894                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2895
2896                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2897                         dma_unmap_page(&bp->pdev->dev,
2898                                 dma_unmap_addr(tx_buf, mapping),
2899                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2900                                 PCI_DMA_TODEVICE);
2901                 }
2902
2903                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2904
2905                 tx_bytes += skb->len;
2906                 dev_kfree_skb_any(skb);
2907                 tx_pkt++;
2908                 if (tx_pkt == budget)
2909                         break;
2910
2911                 if (hw_cons == sw_cons)
2912                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2913         }
2914
2915         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2916         txr->hw_tx_cons = hw_cons;
2917         txr->tx_cons = sw_cons;
2918
2919         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2920          * before checking for netif_tx_queue_stopped().  Without the
2921          * memory barrier, there is a small possibility that bnx2_start_xmit()
2922          * will miss it and cause the queue to be stopped forever.
2923          */
2924         smp_mb();
2925
2926         if (unlikely(netif_tx_queue_stopped(txq)) &&
2927                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2928                 __netif_tx_lock(txq, smp_processor_id());
2929                 if ((netif_tx_queue_stopped(txq)) &&
2930                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2931                         netif_tx_wake_queue(txq);
2932                 __netif_tx_unlock(txq);
2933         }
2934
2935         return tx_pkt;
2936 }
2937
2938 static void
2939 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940                         struct sk_buff *skb, int count)
2941 {
2942         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2943         struct bnx2_rx_bd *cons_bd, *prod_bd;
2944         int i;
2945         u16 hw_prod, prod;
2946         u16 cons = rxr->rx_pg_cons;
2947
2948         cons_rx_pg = &rxr->rx_pg_ring[cons];
2949
2950         /* The caller was unable to allocate a new page to replace the
2951          * last one in the frags array, so we need to recycle that page
2952          * and then free the skb.
2953          */
2954         if (skb) {
2955                 struct page *page;
2956                 struct skb_shared_info *shinfo;
2957
2958                 shinfo = skb_shinfo(skb);
2959                 shinfo->nr_frags--;
2960                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2961                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2962
2963                 cons_rx_pg->page = page;
2964                 dev_kfree_skb(skb);
2965         }
2966
2967         hw_prod = rxr->rx_pg_prod;
2968
2969         for (i = 0; i < count; i++) {
2970                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2971
2972                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2973                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2974                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2975                                                 [BNX2_RX_IDX(cons)];
2976                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2977                                                 [BNX2_RX_IDX(prod)];
2978
2979                 if (prod != cons) {
2980                         prod_rx_pg->page = cons_rx_pg->page;
2981                         cons_rx_pg->page = NULL;
2982                         dma_unmap_addr_set(prod_rx_pg, mapping,
2983                                 dma_unmap_addr(cons_rx_pg, mapping));
2984
2985                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2986                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2987
2988                 }
2989                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2990                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2991         }
2992         rxr->rx_pg_prod = hw_prod;
2993         rxr->rx_pg_cons = cons;
2994 }
2995
2996 static inline void
2997 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2998                    u8 *data, u16 cons, u16 prod)
2999 {
3000         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3001         struct bnx2_rx_bd *cons_bd, *prod_bd;
3002
3003         cons_rx_buf = &rxr->rx_buf_ring[cons];
3004         prod_rx_buf = &rxr->rx_buf_ring[prod];
3005
3006         dma_sync_single_for_device(&bp->pdev->dev,
3007                 dma_unmap_addr(cons_rx_buf, mapping),
3008                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3009
3010         rxr->rx_prod_bseq += bp->rx_buf_use_size;
3011
3012         prod_rx_buf->data = data;
3013
3014         if (cons == prod)
3015                 return;
3016
3017         dma_unmap_addr_set(prod_rx_buf, mapping,
3018                         dma_unmap_addr(cons_rx_buf, mapping));
3019
3020         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3021         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3022         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3023         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3024 }
3025
3026 static struct sk_buff *
3027 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3028             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3029             u32 ring_idx)
3030 {
3031         int err;
3032         u16 prod = ring_idx & 0xffff;
3033         struct sk_buff *skb;
3034
3035         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3036         if (unlikely(err)) {
3037                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3038 error:
3039                 if (hdr_len) {
3040                         unsigned int raw_len = len + 4;
3041                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3042
3043                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3044                 }
3045                 return NULL;
3046         }
3047
3048         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3049                          PCI_DMA_FROMDEVICE);
3050         skb = build_skb(data, 0);
3051         if (!skb) {
3052                 kfree(data);
3053                 goto error;
3054         }
3055         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3056         if (hdr_len == 0) {
3057                 skb_put(skb, len);
3058                 return skb;
3059         } else {
3060                 unsigned int i, frag_len, frag_size, pages;
3061                 struct bnx2_sw_pg *rx_pg;
3062                 u16 pg_cons = rxr->rx_pg_cons;
3063                 u16 pg_prod = rxr->rx_pg_prod;
3064
3065                 frag_size = len + 4 - hdr_len;
3066                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3067                 skb_put(skb, hdr_len);
3068
3069                 for (i = 0; i < pages; i++) {
3070                         dma_addr_t mapping_old;
3071
3072                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3073                         if (unlikely(frag_len <= 4)) {
3074                                 unsigned int tail = 4 - frag_len;
3075
3076                                 rxr->rx_pg_cons = pg_cons;
3077                                 rxr->rx_pg_prod = pg_prod;
3078                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3079                                                         pages - i);
3080                                 skb->len -= tail;
3081                                 if (i == 0) {
3082                                         skb->tail -= tail;
3083                                 } else {
3084                                         skb_frag_t *frag =
3085                                                 &skb_shinfo(skb)->frags[i - 1];
3086                                         skb_frag_size_sub(frag, tail);
3087                                         skb->data_len -= tail;
3088                                 }
3089                                 return skb;
3090                         }
3091                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3092
3093                         /* Don't unmap yet.  If we're unable to allocate a new
3094                          * page, we need to recycle the page and the DMA addr.
3095                          */
3096                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3097                         if (i == pages - 1)
3098                                 frag_len -= 4;
3099
3100                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3101                         rx_pg->page = NULL;
3102
3103                         err = bnx2_alloc_rx_page(bp, rxr,
3104                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3105                                                  GFP_ATOMIC);
3106                         if (unlikely(err)) {
3107                                 rxr->rx_pg_cons = pg_cons;
3108                                 rxr->rx_pg_prod = pg_prod;
3109                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3110                                                         pages - i);
3111                                 return NULL;
3112                         }
3113
3114                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3115                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3116
3117                         frag_size -= frag_len;
3118                         skb->data_len += frag_len;
3119                         skb->truesize += PAGE_SIZE;
3120                         skb->len += frag_len;
3121
3122                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3123                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3124                 }
3125                 rxr->rx_pg_prod = pg_prod;
3126                 rxr->rx_pg_cons = pg_cons;
3127         }
3128         return skb;
3129 }
3130
3131 static inline u16
3132 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3133 {
3134         u16 cons;
3135
3136         cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3137
3138         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3139                 cons++;
3140         return cons;
3141 }
3142
3143 static int
3144 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3145 {
3146         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3147         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3148         struct l2_fhdr *rx_hdr;
3149         int rx_pkt = 0, pg_ring_used = 0;
3150
3151         if (budget <= 0)
3152                 return rx_pkt;
3153
3154         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3155         sw_cons = rxr->rx_cons;
3156         sw_prod = rxr->rx_prod;
3157
3158         /* Memory barrier necessary as speculative reads of the rx
3159          * buffer can be ahead of the index in the status block
3160          */
3161         rmb();
3162         while (sw_cons != hw_cons) {
3163                 unsigned int len, hdr_len;
3164                 u32 status;
3165                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3166                 struct sk_buff *skb;
3167                 dma_addr_t dma_addr;
3168                 u8 *data;
3169                 u16 next_ring_idx;
3170
3171                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3172                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3173
3174                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3175                 data = rx_buf->data;
3176                 rx_buf->data = NULL;
3177
3178                 rx_hdr = get_l2_fhdr(data);
3179                 prefetch(rx_hdr);
3180
3181                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3182
3183                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3184                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3185                         PCI_DMA_FROMDEVICE);
3186
3187                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3188                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3189                 prefetch(get_l2_fhdr(next_rx_buf->data));
3190
3191                 len = rx_hdr->l2_fhdr_pkt_len;
3192                 status = rx_hdr->l2_fhdr_status;
3193
3194                 hdr_len = 0;
3195                 if (status & L2_FHDR_STATUS_SPLIT) {
3196                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3197                         pg_ring_used = 1;
3198                 } else if (len > bp->rx_jumbo_thresh) {
3199                         hdr_len = bp->rx_jumbo_thresh;
3200                         pg_ring_used = 1;
3201                 }
3202
3203                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3204                                        L2_FHDR_ERRORS_PHY_DECODE |
3205                                        L2_FHDR_ERRORS_ALIGNMENT |
3206                                        L2_FHDR_ERRORS_TOO_SHORT |
3207                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3208
3209                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3210                                           sw_ring_prod);
3211                         if (pg_ring_used) {
3212                                 int pages;
3213
3214                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3215
3216                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3217                         }
3218                         goto next_rx;
3219                 }
3220
3221                 len -= 4;
3222
3223                 if (len <= bp->rx_copy_thresh) {
3224                         skb = netdev_alloc_skb(bp->dev, len + 6);
3225                         if (!skb) {
3226                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3227                                                   sw_ring_prod);
3228                                 goto next_rx;
3229                         }
3230
3231                         /* aligned copy */
3232                         memcpy(skb->data,
3233                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3234                                len + 6);
3235                         skb_reserve(skb, 6);
3236                         skb_put(skb, len);
3237
3238                         bnx2_reuse_rx_data(bp, rxr, data,
3239                                 sw_ring_cons, sw_ring_prod);
3240
3241                 } else {
3242                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3243                                           (sw_ring_cons << 16) | sw_ring_prod);
3244                         if (!skb)
3245                                 goto next_rx;
3246                 }
3247                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3248                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3249                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3250
3251                 skb->protocol = eth_type_trans(skb, bp->dev);
3252
3253                 if (len > (bp->dev->mtu + ETH_HLEN) &&
3254                     skb->protocol != htons(0x8100) &&
3255                     skb->protocol != htons(ETH_P_8021AD)) {
3256
3257                         dev_kfree_skb(skb);
3258                         goto next_rx;
3259
3260                 }
3261
3262                 skb_checksum_none_assert(skb);
3263                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3264                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3265                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3266
3267                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3268                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3269                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3270                 }
3271                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3272                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3273                      L2_FHDR_STATUS_USE_RXHASH))
3274                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3275                                      PKT_HASH_TYPE_L3);
3276
3277                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3278                 napi_gro_receive(&bnapi->napi, skb);
3279                 rx_pkt++;
3280
3281 next_rx:
3282                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3283                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3284
3285                 if (rx_pkt == budget)
3286                         break;
3287
3288                 /* Refresh hw_cons to see if there is new work */
3289                 if (sw_cons == hw_cons) {
3290                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3291                         rmb();
3292                 }
3293         }
3294         rxr->rx_cons = sw_cons;
3295         rxr->rx_prod = sw_prod;
3296
3297         if (pg_ring_used)
3298                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3299
3300         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3301
3302         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3303
3304         mmiowb();
3305
3306         return rx_pkt;
3307
3308 }
3309
3310 /* MSI ISR - The only difference between this and the INTx ISR
3311  * is that the MSI interrupt is always serviced.
3312  */
3313 static irqreturn_t
3314 bnx2_msi(int irq, void *dev_instance)
3315 {
3316         struct bnx2_napi *bnapi = dev_instance;
3317         struct bnx2 *bp = bnapi->bp;
3318
3319         prefetch(bnapi->status_blk.msi);
3320         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3321                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3322                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3323
3324         /* Return here if interrupt is disabled. */
3325         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3326                 return IRQ_HANDLED;
3327
3328         napi_schedule(&bnapi->napi);
3329
3330         return IRQ_HANDLED;
3331 }
3332
3333 static irqreturn_t
3334 bnx2_msi_1shot(int irq, void *dev_instance)
3335 {
3336         struct bnx2_napi *bnapi = dev_instance;
3337         struct bnx2 *bp = bnapi->bp;
3338
3339         prefetch(bnapi->status_blk.msi);
3340
3341         /* Return here if interrupt is disabled. */
3342         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343                 return IRQ_HANDLED;
3344
3345         napi_schedule(&bnapi->napi);
3346
3347         return IRQ_HANDLED;
3348 }
3349
3350 static irqreturn_t
3351 bnx2_interrupt(int irq, void *dev_instance)
3352 {
3353         struct bnx2_napi *bnapi = dev_instance;
3354         struct bnx2 *bp = bnapi->bp;
3355         struct status_block *sblk = bnapi->status_blk.msi;
3356
3357         /* When using INTx, it is possible for the interrupt to arrive
3358          * at the CPU before the status block posted prior to the
3359          * interrupt. Reading a register will flush the status block.
3360          * When using MSI, the MSI message will always complete after
3361          * the status block write.
3362          */
3363         if ((sblk->status_idx == bnapi->last_status_idx) &&
3364             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3365              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3366                 return IRQ_NONE;
3367
3368         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3369                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3370                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3371
3372         /* Read back to deassert IRQ immediately to avoid too many
3373          * spurious interrupts.
3374          */
3375         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3376
3377         /* Return here if interrupt is shared and is disabled. */
3378         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3379                 return IRQ_HANDLED;
3380
3381         if (napi_schedule_prep(&bnapi->napi)) {
3382                 bnapi->last_status_idx = sblk->status_idx;
3383                 __napi_schedule(&bnapi->napi);
3384         }
3385
3386         return IRQ_HANDLED;
3387 }
3388
3389 static inline int
3390 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3391 {
3392         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3393         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3394
3395         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3396             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3397                 return 1;
3398         return 0;
3399 }
3400
3401 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3402                                  STATUS_ATTN_BITS_TIMER_ABORT)
3403
3404 static inline int
3405 bnx2_has_work(struct bnx2_napi *bnapi)
3406 {
3407         struct status_block *sblk = bnapi->status_blk.msi;
3408
3409         if (bnx2_has_fast_work(bnapi))
3410                 return 1;
3411
3412 #ifdef BCM_CNIC
3413         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3414                 return 1;
3415 #endif
3416
3417         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3418             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3419                 return 1;
3420
3421         return 0;
3422 }
3423
3424 static void
3425 bnx2_chk_missed_msi(struct bnx2 *bp)
3426 {
3427         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3428         u32 msi_ctrl;
3429
3430         if (bnx2_has_work(bnapi)) {
3431                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3432                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3433                         return;
3434
3435                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3436                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3437                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3438                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3439                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3440                 }
3441         }
3442
3443         bp->idle_chk_status_idx = bnapi->last_status_idx;
3444 }
3445
3446 #ifdef BCM_CNIC
3447 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3448 {
3449         struct cnic_ops *c_ops;
3450
3451         if (!bnapi->cnic_present)
3452                 return;
3453
3454         rcu_read_lock();
3455         c_ops = rcu_dereference(bp->cnic_ops);
3456         if (c_ops)
3457                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3458                                                       bnapi->status_blk.msi);
3459         rcu_read_unlock();
3460 }
3461 #endif
3462
3463 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3464 {
3465         struct status_block *sblk = bnapi->status_blk.msi;
3466         u32 status_attn_bits = sblk->status_attn_bits;
3467         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3468
3469         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3470             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3471
3472                 bnx2_phy_int(bp, bnapi);
3473
3474                 /* This is needed to take care of transient status
3475                  * during link changes.
3476                  */
3477                 BNX2_WR(bp, BNX2_HC_COMMAND,
3478                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3479                 BNX2_RD(bp, BNX2_HC_COMMAND);
3480         }
3481 }
3482
3483 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3484                           int work_done, int budget)
3485 {
3486         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3487         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3488
3489         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3490                 bnx2_tx_int(bp, bnapi, 0);
3491
3492         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3493                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3494
3495         return work_done;
3496 }
3497
3498 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3499 {
3500         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3501         struct bnx2 *bp = bnapi->bp;
3502         int work_done = 0;
3503         struct status_block_msix *sblk = bnapi->status_blk.msix;
3504
3505         while (1) {
3506                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3507                 if (unlikely(work_done >= budget))
3508                         break;
3509
3510                 bnapi->last_status_idx = sblk->status_idx;
3511                 /* status idx must be read before checking for more work. */
3512                 rmb();
3513                 if (likely(!bnx2_has_fast_work(bnapi))) {
3514
3515                         napi_complete_done(napi, work_done);
3516                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3517                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518                                 bnapi->last_status_idx);
3519                         break;
3520                 }
3521         }
3522         return work_done;
3523 }
3524
3525 static int bnx2_poll(struct napi_struct *napi, int budget)
3526 {
3527         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3528         struct bnx2 *bp = bnapi->bp;
3529         int work_done = 0;
3530         struct status_block *sblk = bnapi->status_blk.msi;
3531
3532         while (1) {
3533                 bnx2_poll_link(bp, bnapi);
3534
3535                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3536
3537 #ifdef BCM_CNIC
3538                 bnx2_poll_cnic(bp, bnapi);
3539 #endif
3540
3541                 /* bnapi->last_status_idx is used below to tell the hw how
3542                  * much work has been processed, so we must read it before
3543                  * checking for more work.
3544                  */
3545                 bnapi->last_status_idx = sblk->status_idx;
3546
3547                 if (unlikely(work_done >= budget))
3548                         break;
3549
3550                 rmb();
3551                 if (likely(!bnx2_has_work(bnapi))) {
3552                         napi_complete_done(napi, work_done);
3553                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3554                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3555                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3556                                         bnapi->last_status_idx);
3557                                 break;
3558                         }
3559                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3560                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3561                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3562                                 bnapi->last_status_idx);
3563
3564                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3565                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3566                                 bnapi->last_status_idx);
3567                         break;
3568                 }
3569         }
3570
3571         return work_done;
3572 }
3573
3574 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3575  * from set_multicast.
3576  */
3577 static void
3578 bnx2_set_rx_mode(struct net_device *dev)
3579 {
3580         struct bnx2 *bp = netdev_priv(dev);
3581         u32 rx_mode, sort_mode;
3582         struct netdev_hw_addr *ha;
3583         int i;
3584
3585         if (!netif_running(dev))
3586                 return;
3587
3588         spin_lock_bh(&bp->phy_lock);
3589
3590         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3591                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3592         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3593         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3594              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3595                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3596         if (dev->flags & IFF_PROMISC) {
3597                 /* Promiscuous mode. */
3598                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3601         }
3602         else if (dev->flags & IFF_ALLMULTI) {
3603                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3604                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3605                                 0xffffffff);
3606                 }
3607                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3608         }
3609         else {
3610                 /* Accept one or more multicast(s). */
3611                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3612                 u32 regidx;
3613                 u32 bit;
3614                 u32 crc;
3615
3616                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3617
3618                 netdev_for_each_mc_addr(ha, dev) {
3619                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3620                         bit = crc & 0xff;
3621                         regidx = (bit & 0xe0) >> 5;
3622                         bit &= 0x1f;
3623                         mc_filter[regidx] |= (1 << bit);
3624                 }
3625
3626                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3627                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3628                                 mc_filter[i]);
3629                 }
3630
3631                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3632         }
3633
3634         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3635                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3636                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3637                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3638         } else if (!(dev->flags & IFF_PROMISC)) {
3639                 /* Add all entries into to the match filter list */
3640                 i = 0;
3641                 netdev_for_each_uc_addr(ha, dev) {
3642                         bnx2_set_mac_addr(bp, ha->addr,
3643                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3644                         sort_mode |= (1 <<
3645                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3646                         i++;
3647                 }
3648
3649         }
3650
3651         if (rx_mode != bp->rx_mode) {
3652                 bp->rx_mode = rx_mode;
3653                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3654         }
3655
3656         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3657         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3658         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3659
3660         spin_unlock_bh(&bp->phy_lock);
3661 }
3662
3663 static int
3664 check_fw_section(const struct firmware *fw,
3665                  const struct bnx2_fw_file_section *section,
3666                  u32 alignment, bool non_empty)
3667 {
3668         u32 offset = be32_to_cpu(section->offset);
3669         u32 len = be32_to_cpu(section->len);
3670
3671         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3672                 return -EINVAL;
3673         if ((non_empty && len == 0) || len > fw->size - offset ||
3674             len & (alignment - 1))
3675                 return -EINVAL;
3676         return 0;
3677 }
3678
3679 static int
3680 check_mips_fw_entry(const struct firmware *fw,
3681                     const struct bnx2_mips_fw_file_entry *entry)
3682 {
3683         if (check_fw_section(fw, &entry->text, 4, true) ||
3684             check_fw_section(fw, &entry->data, 4, false) ||
3685             check_fw_section(fw, &entry->rodata, 4, false))
3686                 return -EINVAL;
3687         return 0;
3688 }
3689
3690 static void bnx2_release_firmware(struct bnx2 *bp)
3691 {
3692         if (bp->rv2p_firmware) {
3693                 release_firmware(bp->mips_firmware);
3694                 release_firmware(bp->rv2p_firmware);
3695                 bp->rv2p_firmware = NULL;
3696         }
3697 }
3698
3699 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3700 {
3701         const char *mips_fw_file, *rv2p_fw_file;
3702         const struct bnx2_mips_fw_file *mips_fw;
3703         const struct bnx2_rv2p_fw_file *rv2p_fw;
3704         int rc;
3705
3706         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3707                 mips_fw_file = FW_MIPS_FILE_09;
3708                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3709                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3710                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3711                 else
3712                         rv2p_fw_file = FW_RV2P_FILE_09;
3713         } else {
3714                 mips_fw_file = FW_MIPS_FILE_06;
3715                 rv2p_fw_file = FW_RV2P_FILE_06;
3716         }
3717
3718         rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3719         if (rc) {
3720                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3721                 goto out;
3722         }
3723
3724         rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3725         if (rc) {
3726                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3727                 goto err_release_mips_firmware;
3728         }
3729         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3730         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3731         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3732             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3733             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3734             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3735             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3736             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3737                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3738                 rc = -EINVAL;
3739                 goto err_release_firmware;
3740         }
3741         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3742             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3743             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3744                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3745                 rc = -EINVAL;
3746                 goto err_release_firmware;
3747         }
3748 out:
3749         return rc;
3750
3751 err_release_firmware:
3752         release_firmware(bp->rv2p_firmware);
3753         bp->rv2p_firmware = NULL;
3754 err_release_mips_firmware:
3755         release_firmware(bp->mips_firmware);
3756         goto out;
3757 }
3758
3759 static int bnx2_request_firmware(struct bnx2 *bp)
3760 {
3761         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3762 }
3763
3764 static u32
3765 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3766 {
3767         switch (idx) {
3768         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3769                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3770                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3771                 break;
3772         }
3773         return rv2p_code;
3774 }
3775
3776 static int
3777 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3778              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3779 {
3780         u32 rv2p_code_len, file_offset;
3781         __be32 *rv2p_code;
3782         int i;
3783         u32 val, cmd, addr;
3784
3785         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3786         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3787
3788         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3789
3790         if (rv2p_proc == RV2P_PROC1) {
3791                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3792                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3793         } else {
3794                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3795                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3796         }
3797
3798         for (i = 0; i < rv2p_code_len; i += 8) {
3799                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3800                 rv2p_code++;
3801                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3802                 rv2p_code++;
3803
3804                 val = (i / 8) | cmd;
3805                 BNX2_WR(bp, addr, val);
3806         }
3807
3808         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3809         for (i = 0; i < 8; i++) {
3810                 u32 loc, code;
3811
3812                 loc = be32_to_cpu(fw_entry->fixup[i]);
3813                 if (loc && ((loc * 4) < rv2p_code_len)) {
3814                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3815                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3816                         code = be32_to_cpu(*(rv2p_code + loc));
3817                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3818                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3819
3820                         val = (loc / 2) | cmd;
3821                         BNX2_WR(bp, addr, val);
3822                 }
3823         }
3824
3825         /* Reset the processor, un-stall is done later. */
3826         if (rv2p_proc == RV2P_PROC1) {
3827                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3828         }
3829         else {
3830                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3831         }
3832
3833         return 0;
3834 }
3835
3836 static int
3837 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3838             const struct bnx2_mips_fw_file_entry *fw_entry)
3839 {
3840         u32 addr, len, file_offset;
3841         __be32 *data;
3842         u32 offset;
3843         u32 val;
3844
3845         /* Halt the CPU. */
3846         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3847         val |= cpu_reg->mode_value_halt;
3848         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3849         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3850
3851         /* Load the Text area. */
3852         addr = be32_to_cpu(fw_entry->text.addr);
3853         len = be32_to_cpu(fw_entry->text.len);
3854         file_offset = be32_to_cpu(fw_entry->text.offset);
3855         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3856
3857         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3858         if (len) {
3859                 int j;
3860
3861                 for (j = 0; j < (len / 4); j++, offset += 4)
3862                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863         }
3864
3865         /* Load the Data area. */
3866         addr = be32_to_cpu(fw_entry->data.addr);
3867         len = be32_to_cpu(fw_entry->data.len);
3868         file_offset = be32_to_cpu(fw_entry->data.offset);
3869         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3870
3871         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3872         if (len) {
3873                 int j;
3874
3875                 for (j = 0; j < (len / 4); j++, offset += 4)
3876                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877         }
3878
3879         /* Load the Read-Only area. */
3880         addr = be32_to_cpu(fw_entry->rodata.addr);
3881         len = be32_to_cpu(fw_entry->rodata.len);
3882         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3883         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3884
3885         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3886         if (len) {
3887                 int j;
3888
3889                 for (j = 0; j < (len / 4); j++, offset += 4)
3890                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3891         }
3892
3893         /* Clear the pre-fetch instruction. */
3894         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3895
3896         val = be32_to_cpu(fw_entry->start_addr);
3897         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3898
3899         /* Start the CPU. */
3900         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3901         val &= ~cpu_reg->mode_value_halt;
3902         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3903         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3904
3905         return 0;
3906 }
3907
3908 static int
3909 bnx2_init_cpus(struct bnx2 *bp)
3910 {
3911         const struct bnx2_mips_fw_file *mips_fw =
3912                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3913         const struct bnx2_rv2p_fw_file *rv2p_fw =
3914                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3915         int rc;
3916
3917         /* Initialize the RV2P processor. */
3918         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3919         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3920
3921         /* Initialize the RX Processor. */
3922         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3923         if (rc)
3924                 goto init_cpu_err;
3925
3926         /* Initialize the TX Processor. */
3927         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3928         if (rc)
3929                 goto init_cpu_err;
3930
3931         /* Initialize the TX Patch-up Processor. */
3932         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3933         if (rc)
3934                 goto init_cpu_err;
3935
3936         /* Initialize the Completion Processor. */
3937         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3938         if (rc)
3939                 goto init_cpu_err;
3940
3941         /* Initialize the Command Processor. */
3942         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3943
3944 init_cpu_err:
3945         return rc;
3946 }
3947
3948 static void
3949 bnx2_setup_wol(struct bnx2 *bp)
3950 {
3951         int i;
3952         u32 val, wol_msg;
3953
3954         if (bp->wol) {
3955                 u32 advertising;
3956                 u8 autoneg;
3957
3958                 autoneg = bp->autoneg;
3959                 advertising = bp->advertising;
3960
3961                 if (bp->phy_port == PORT_TP) {
3962                         bp->autoneg = AUTONEG_SPEED;
3963                         bp->advertising = ADVERTISED_10baseT_Half |
3964                                 ADVERTISED_10baseT_Full |
3965                                 ADVERTISED_100baseT_Half |
3966                                 ADVERTISED_100baseT_Full |
3967                                 ADVERTISED_Autoneg;
3968                 }
3969
3970                 spin_lock_bh(&bp->phy_lock);
3971                 bnx2_setup_phy(bp, bp->phy_port);
3972                 spin_unlock_bh(&bp->phy_lock);
3973
3974                 bp->autoneg = autoneg;
3975                 bp->advertising = advertising;
3976
3977                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3978
3979                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3980
3981                 /* Enable port mode. */
3982                 val &= ~BNX2_EMAC_MODE_PORT;
3983                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3984                        BNX2_EMAC_MODE_ACPI_RCVD |
3985                        BNX2_EMAC_MODE_MPKT;
3986                 if (bp->phy_port == PORT_TP) {
3987                         val |= BNX2_EMAC_MODE_PORT_MII;
3988                 } else {
3989                         val |= BNX2_EMAC_MODE_PORT_GMII;
3990                         if (bp->line_speed == SPEED_2500)
3991                                 val |= BNX2_EMAC_MODE_25G_MODE;
3992                 }
3993
3994                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3995
3996                 /* receive all multicast */
3997                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3998                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3999                                 0xffffffff);
4000                 }
4001                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4002
4003                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4004                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4005                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4006                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4007
4008                 /* Need to enable EMAC and RPM for WOL. */
4009                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4010                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4011                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4012                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4013
4014                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4015                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4016                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4017
4018                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4019         } else {
4020                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4021         }
4022
4023         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4024                 u32 val;
4025
4026                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4027                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4028                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4029                         return;
4030                 }
4031                 /* Tell firmware not to power down the PHY yet, otherwise
4032                  * the chip will take a long time to respond to MMIO reads.
4033                  */
4034                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4035                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4036                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4037                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4038                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4039         }
4040
4041 }
4042
4043 static int
4044 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4045 {
4046         switch (state) {
4047         case PCI_D0: {
4048                 u32 val;
4049
4050                 pci_enable_wake(bp->pdev, PCI_D0, false);
4051                 pci_set_power_state(bp->pdev, PCI_D0);
4052
4053                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4054                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4055                 val &= ~BNX2_EMAC_MODE_MPKT;
4056                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4057
4058                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4059                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4060                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4061                 break;
4062         }
4063         case PCI_D3hot: {
4064                 bnx2_setup_wol(bp);
4065                 pci_wake_from_d3(bp->pdev, bp->wol);
4066                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4067                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4068
4069                         if (bp->wol)
4070                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4071                         break;
4072
4073                 }
4074                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4075                         u32 val;
4076
4077                         /* Tell firmware not to power down the PHY yet,
4078                          * otherwise the other port may not respond to
4079                          * MMIO reads.
4080                          */
4081                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4082                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4083                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4084                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4085                 }
4086                 pci_set_power_state(bp->pdev, PCI_D3hot);
4087
4088                 /* No more memory access after this point until
4089                  * device is brought back to D0.
4090                  */
4091                 break;
4092         }
4093         default:
4094                 return -EINVAL;
4095         }
4096         return 0;
4097 }
4098
4099 static int
4100 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4101 {
4102         u32 val;
4103         int j;
4104
4105         /* Request access to the flash interface. */
4106         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4107         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4109                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4110                         break;
4111
4112                 udelay(5);
4113         }
4114
4115         if (j >= NVRAM_TIMEOUT_COUNT)
4116                 return -EBUSY;
4117
4118         return 0;
4119 }
4120
4121 static int
4122 bnx2_release_nvram_lock(struct bnx2 *bp)
4123 {
4124         int j;
4125         u32 val;
4126
4127         /* Relinquish nvram interface. */
4128         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4129
4130         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4131                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4132                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4133                         break;
4134
4135                 udelay(5);
4136         }
4137
4138         if (j >= NVRAM_TIMEOUT_COUNT)
4139                 return -EBUSY;
4140
4141         return 0;
4142 }
4143
4144
4145 static int
4146 bnx2_enable_nvram_write(struct bnx2 *bp)
4147 {
4148         u32 val;
4149
4150         val = BNX2_RD(bp, BNX2_MISC_CFG);
4151         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4152
4153         if (bp->flash_info->flags & BNX2_NV_WREN) {
4154                 int j;
4155
4156                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4157                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4158                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4159
4160                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4161                         udelay(5);
4162
4163                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4164                         if (val & BNX2_NVM_COMMAND_DONE)
4165                                 break;
4166                 }
4167
4168                 if (j >= NVRAM_TIMEOUT_COUNT)
4169                         return -EBUSY;
4170         }
4171         return 0;
4172 }
4173
4174 static void
4175 bnx2_disable_nvram_write(struct bnx2 *bp)
4176 {
4177         u32 val;
4178
4179         val = BNX2_RD(bp, BNX2_MISC_CFG);
4180         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4181 }
4182
4183
4184 static void
4185 bnx2_enable_nvram_access(struct bnx2 *bp)
4186 {
4187         u32 val;
4188
4189         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4190         /* Enable both bits, even on read. */
4191         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4192                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4193 }
4194
4195 static void
4196 bnx2_disable_nvram_access(struct bnx2 *bp)
4197 {
4198         u32 val;
4199
4200         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4201         /* Disable both bits, even after read. */
4202         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4203                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4204                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4205 }
4206
4207 static int
4208 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4209 {
4210         u32 cmd;
4211         int j;
4212
4213         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4214                 /* Buffered flash, no erase needed */
4215                 return 0;
4216
4217         /* Build an erase command */
4218         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4219               BNX2_NVM_COMMAND_DOIT;
4220
4221         /* Need to clear DONE bit separately. */
4222         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4223
4224         /* Address of the NVRAM to read from. */
4225         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4226
4227         /* Issue an erase command. */
4228         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4229
4230         /* Wait for completion. */
4231         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4232                 u32 val;
4233
4234                 udelay(5);
4235
4236                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4237                 if (val & BNX2_NVM_COMMAND_DONE)
4238                         break;
4239         }
4240
4241         if (j >= NVRAM_TIMEOUT_COUNT)
4242                 return -EBUSY;
4243
4244         return 0;
4245 }
4246
4247 static int
4248 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4249 {
4250         u32 cmd;
4251         int j;
4252
4253         /* Build the command word. */
4254         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4255
4256         /* Calculate an offset of a buffered flash, not needed for 5709. */
4257         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4258                 offset = ((offset / bp->flash_info->page_size) <<
4259                            bp->flash_info->page_bits) +
4260                           (offset % bp->flash_info->page_size);
4261         }
4262
4263         /* Need to clear DONE bit separately. */
4264         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4265
4266         /* Address of the NVRAM to read from. */
4267         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4268
4269         /* Issue a read command. */
4270         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4271
4272         /* Wait for completion. */
4273         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4274                 u32 val;
4275
4276                 udelay(5);
4277
4278                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4279                 if (val & BNX2_NVM_COMMAND_DONE) {
4280                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4281                         memcpy(ret_val, &v, 4);
4282                         break;
4283                 }
4284         }
4285         if (j >= NVRAM_TIMEOUT_COUNT)
4286                 return -EBUSY;
4287
4288         return 0;
4289 }
4290
4291
4292 static int
4293 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4294 {
4295         u32 cmd;
4296         __be32 val32;
4297         int j;
4298
4299         /* Build the command word. */
4300         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4301
4302         /* Calculate an offset of a buffered flash, not needed for 5709. */
4303         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4304                 offset = ((offset / bp->flash_info->page_size) <<
4305                           bp->flash_info->page_bits) +
4306                          (offset % bp->flash_info->page_size);
4307         }
4308
4309         /* Need to clear DONE bit separately. */
4310         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4311
4312         memcpy(&val32, val, 4);
4313
4314         /* Write the data. */
4315         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4316
4317         /* Address of the NVRAM to write to. */
4318         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4319
4320         /* Issue the write command. */
4321         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4322
4323         /* Wait for completion. */
4324         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4325                 udelay(5);
4326
4327                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4328                         break;
4329         }
4330         if (j >= NVRAM_TIMEOUT_COUNT)
4331                 return -EBUSY;
4332
4333         return 0;
4334 }
4335
4336 static int
4337 bnx2_init_nvram(struct bnx2 *bp)
4338 {
4339         u32 val;
4340         int j, entry_count, rc = 0;
4341         const struct flash_spec *flash;
4342
4343         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4344                 bp->flash_info = &flash_5709;
4345                 goto get_flash_size;
4346         }
4347
4348         /* Determine the selected interface. */
4349         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4350
4351         entry_count = ARRAY_SIZE(flash_table);
4352
4353         if (val & 0x40000000) {
4354
4355                 /* Flash interface has been reconfigured */
4356                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4357                      j++, flash++) {
4358                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4359                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4360                                 bp->flash_info = flash;
4361                                 break;
4362                         }
4363                 }
4364         }
4365         else {
4366                 u32 mask;
4367                 /* Not yet been reconfigured */
4368
4369                 if (val & (1 << 23))
4370                         mask = FLASH_BACKUP_STRAP_MASK;
4371                 else
4372                         mask = FLASH_STRAP_MASK;
4373
4374                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4375                         j++, flash++) {
4376
4377                         if ((val & mask) == (flash->strapping & mask)) {
4378                                 bp->flash_info = flash;
4379
4380                                 /* Request access to the flash interface. */
4381                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4382                                         return rc;
4383
4384                                 /* Enable access to flash interface */
4385                                 bnx2_enable_nvram_access(bp);
4386
4387                                 /* Reconfigure the flash interface */
4388                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4389                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4390                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4391                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4392
4393                                 /* Disable access to flash interface */
4394                                 bnx2_disable_nvram_access(bp);
4395                                 bnx2_release_nvram_lock(bp);
4396
4397                                 break;
4398                         }
4399                 }
4400         } /* if (val & 0x40000000) */
4401
4402         if (j == entry_count) {
4403                 bp->flash_info = NULL;
4404                 pr_alert("Unknown flash/EEPROM type\n");
4405                 return -ENODEV;
4406         }
4407
4408 get_flash_size:
4409         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4410         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4411         if (val)
4412                 bp->flash_size = val;
4413         else
4414                 bp->flash_size = bp->flash_info->total_size;
4415
4416         return rc;
4417 }
4418
4419 static int
4420 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4421                 int buf_size)
4422 {
4423         int rc = 0;
4424         u32 cmd_flags, offset32, len32, extra;
4425
4426         if (buf_size == 0)
4427                 return 0;
4428
4429         /* Request access to the flash interface. */
4430         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4431                 return rc;
4432
4433         /* Enable access to flash interface */
4434         bnx2_enable_nvram_access(bp);
4435
4436         len32 = buf_size;
4437         offset32 = offset;
4438         extra = 0;
4439
4440         cmd_flags = 0;
4441
4442         if (offset32 & 3) {
4443                 u8 buf[4];
4444                 u32 pre_len;
4445
4446                 offset32 &= ~3;
4447                 pre_len = 4 - (offset & 3);
4448
4449                 if (pre_len >= len32) {
4450                         pre_len = len32;
4451                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4452                                     BNX2_NVM_COMMAND_LAST;
4453                 }
4454                 else {
4455                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4456                 }
4457
4458                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4459
4460                 if (rc)
4461                         return rc;
4462
4463                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4464
4465                 offset32 += 4;
4466                 ret_buf += pre_len;
4467                 len32 -= pre_len;
4468         }
4469         if (len32 & 3) {
4470                 extra = 4 - (len32 & 3);
4471                 len32 = (len32 + 4) & ~3;
4472         }
4473
4474         if (len32 == 4) {
4475                 u8 buf[4];
4476
4477                 if (cmd_flags)
4478                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4479                 else
4480                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4481                                     BNX2_NVM_COMMAND_LAST;
4482
4483                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4484
4485                 memcpy(ret_buf, buf, 4 - extra);
4486         }
4487         else if (len32 > 0) {
4488                 u8 buf[4];
4489
4490                 /* Read the first word. */
4491                 if (cmd_flags)
4492                         cmd_flags = 0;
4493                 else
4494                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4495
4496                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4497
4498                 /* Advance to the next dword. */
4499                 offset32 += 4;
4500                 ret_buf += 4;
4501                 len32 -= 4;
4502
4503                 while (len32 > 4 && rc == 0) {
4504                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4505
4506                         /* Advance to the next dword. */
4507                         offset32 += 4;
4508                         ret_buf += 4;
4509                         len32 -= 4;
4510                 }
4511
4512                 if (rc)
4513                         return rc;
4514
4515                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4516                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4517
4518                 memcpy(ret_buf, buf, 4 - extra);
4519         }
4520
4521         /* Disable access to flash interface */
4522         bnx2_disable_nvram_access(bp);
4523
4524         bnx2_release_nvram_lock(bp);
4525
4526         return rc;
4527 }
4528
4529 static int
4530 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4531                 int buf_size)
4532 {
4533         u32 written, offset32, len32;
4534         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4535         int rc = 0;
4536         int align_start, align_end;
4537
4538         buf = data_buf;
4539         offset32 = offset;
4540         len32 = buf_size;
4541         align_start = align_end = 0;
4542
4543         if ((align_start = (offset32 & 3))) {
4544                 offset32 &= ~3;
4545                 len32 += align_start;
4546                 if (len32 < 4)
4547                         len32 = 4;
4548                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4549                         return rc;
4550         }
4551
4552         if (len32 & 3) {
4553                 align_end = 4 - (len32 & 3);
4554                 len32 += align_end;
4555                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4556                         return rc;
4557         }
4558
4559         if (align_start || align_end) {
4560                 align_buf = kmalloc(len32, GFP_KERNEL);
4561                 if (!align_buf)
4562                         return -ENOMEM;
4563                 if (align_start) {
4564                         memcpy(align_buf, start, 4);
4565                 }
4566                 if (align_end) {
4567                         memcpy(align_buf + len32 - 4, end, 4);
4568                 }
4569                 memcpy(align_buf + align_start, data_buf, buf_size);
4570                 buf = align_buf;
4571         }
4572
4573         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4574                 flash_buffer = kmalloc(264, GFP_KERNEL);
4575                 if (!flash_buffer) {
4576                         rc = -ENOMEM;
4577                         goto nvram_write_end;
4578                 }
4579         }
4580
4581         written = 0;
4582         while ((written < len32) && (rc == 0)) {
4583                 u32 page_start, page_end, data_start, data_end;
4584                 u32 addr, cmd_flags;
4585                 int i;
4586
4587                 /* Find the page_start addr */
4588                 page_start = offset32 + written;
4589                 page_start -= (page_start % bp->flash_info->page_size);
4590                 /* Find the page_end addr */
4591                 page_end = page_start + bp->flash_info->page_size;
4592                 /* Find the data_start addr */
4593                 data_start = (written == 0) ? offset32 : page_start;
4594                 /* Find the data_end addr */
4595                 data_end = (page_end > offset32 + len32) ?
4596                         (offset32 + len32) : page_end;
4597
4598                 /* Request access to the flash interface. */
4599                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4600                         goto nvram_write_end;
4601
4602                 /* Enable access to flash interface */
4603                 bnx2_enable_nvram_access(bp);
4604
4605                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4606                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4607                         int j;
4608
4609                         /* Read the whole page into the buffer
4610                          * (non-buffer flash only) */
4611                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4612                                 if (j == (bp->flash_info->page_size - 4)) {
4613                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4614                                 }
4615                                 rc = bnx2_nvram_read_dword(bp,
4616                                         page_start + j,
4617                                         &flash_buffer[j],
4618                                         cmd_flags);
4619
4620                                 if (rc)
4621                                         goto nvram_write_end;
4622
4623                                 cmd_flags = 0;
4624                         }
4625                 }
4626
4627                 /* Enable writes to flash interface (unlock write-protect) */
4628                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4629                         goto nvram_write_end;
4630
4631                 /* Loop to write back the buffer data from page_start to
4632                  * data_start */
4633                 i = 0;
4634                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4635                         /* Erase the page */
4636                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4637                                 goto nvram_write_end;
4638
4639                         /* Re-enable the write again for the actual write */
4640                         bnx2_enable_nvram_write(bp);
4641
4642                         for (addr = page_start; addr < data_start;
4643                                 addr += 4, i += 4) {
4644
4645                                 rc = bnx2_nvram_write_dword(bp, addr,
4646                                         &flash_buffer[i], cmd_flags);
4647
4648                                 if (rc != 0)
4649                                         goto nvram_write_end;
4650
4651                                 cmd_flags = 0;
4652                         }
4653                 }
4654
4655                 /* Loop to write the new data from data_start to data_end */
4656                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4657                         if ((addr == page_end - 4) ||
4658                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4659                                  (addr == data_end - 4))) {
4660
4661                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4662                         }
4663                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4664                                 cmd_flags);
4665
4666                         if (rc != 0)
4667                                 goto nvram_write_end;
4668
4669                         cmd_flags = 0;
4670                         buf += 4;
4671                 }
4672
4673                 /* Loop to write back the buffer data from data_end
4674                  * to page_end */
4675                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4676                         for (addr = data_end; addr < page_end;
4677                                 addr += 4, i += 4) {
4678
4679                                 if (addr == page_end-4) {
4680                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4681                                 }
4682                                 rc = bnx2_nvram_write_dword(bp, addr,
4683                                         &flash_buffer[i], cmd_flags);
4684
4685                                 if (rc != 0)
4686                                         goto nvram_write_end;
4687
4688                                 cmd_flags = 0;
4689                         }
4690                 }
4691
4692                 /* Disable writes to flash interface (lock write-protect) */
4693                 bnx2_disable_nvram_write(bp);
4694
4695                 /* Disable access to flash interface */
4696                 bnx2_disable_nvram_access(bp);
4697                 bnx2_release_nvram_lock(bp);
4698
4699                 /* Increment written */
4700                 written += data_end - data_start;
4701         }
4702
4703 nvram_write_end:
4704         kfree(flash_buffer);
4705         kfree(align_buf);
4706         return rc;
4707 }
4708
4709 static void
4710 bnx2_init_fw_cap(struct bnx2 *bp)
4711 {
4712         u32 val, sig = 0;
4713
4714         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4716
4717         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4718                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4719
4720         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4721         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4722                 return;
4723
4724         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4725                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4726                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4727         }
4728
4729         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4730             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4731                 u32 link;
4732
4733                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4734
4735                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4736                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4737                         bp->phy_port = PORT_FIBRE;
4738                 else
4739                         bp->phy_port = PORT_TP;
4740
4741                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4742                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4743         }
4744
4745         if (netif_running(bp->dev) && sig)
4746                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4747 }
4748
4749 static void
4750 bnx2_setup_msix_tbl(struct bnx2 *bp)
4751 {
4752         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4753
4754         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4755         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4756 }
4757
4758 static void
4759 bnx2_wait_dma_complete(struct bnx2 *bp)
4760 {
4761         u32 val;
4762         int i;
4763
4764         /*
4765          * Wait for the current PCI transaction to complete before
4766          * issuing a reset.
4767          */
4768         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4769             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4770                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4771                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4772                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4773                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4774                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4775                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4776                 udelay(5);
4777         } else {  /* 5709 */
4778                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4779                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4780                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4781                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4782
4783                 for (i = 0; i < 100; i++) {
4784                         msleep(1);
4785                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4786                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4787                                 break;
4788                 }
4789         }
4790
4791         return;
4792 }
4793
4794
4795 static int
4796 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4797 {
4798         u32 val;
4799         int i, rc = 0;
4800         u8 old_port;
4801
4802         /* Wait for the current PCI transaction to complete before
4803          * issuing a reset. */
4804         bnx2_wait_dma_complete(bp);
4805
4806         /* Wait for the firmware to tell us it is ok to issue a reset. */
4807         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4808
4809         /* Deposit a driver reset signature so the firmware knows that
4810          * this is a soft reset. */
4811         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4812                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4813
4814         /* Do a dummy read to force the chip to complete all current transaction
4815          * before we issue a reset. */
4816         val = BNX2_RD(bp, BNX2_MISC_ID);
4817
4818         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4819                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4820                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4821                 udelay(5);
4822
4823                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4824                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4825
4826                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4827
4828         } else {
4829                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4830                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4831                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4832
4833                 /* Chip reset. */
4834                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4835
4836                 /* Reading back any register after chip reset will hang the
4837                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4838                  * of margin for write posting.
4839                  */
4840                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4841                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4842                         msleep(20);
4843
4844                 /* Reset takes approximate 30 usec */
4845                 for (i = 0; i < 10; i++) {
4846                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4847                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4848                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4849                                 break;
4850                         udelay(10);
4851                 }
4852
4853                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4854                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4855                         pr_err("Chip reset did not complete\n");
4856                         return -EBUSY;
4857                 }
4858         }
4859
4860         /* Make sure byte swapping is properly configured. */
4861         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4862         if (val != 0x01020304) {
4863                 pr_err("Chip not in correct endian mode\n");
4864                 return -ENODEV;
4865         }
4866
4867         /* Wait for the firmware to finish its initialization. */
4868         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4869         if (rc)
4870                 return rc;
4871
4872         spin_lock_bh(&bp->phy_lock);
4873         old_port = bp->phy_port;
4874         bnx2_init_fw_cap(bp);
4875         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4876             old_port != bp->phy_port)
4877                 bnx2_set_default_remote_link(bp);
4878         spin_unlock_bh(&bp->phy_lock);
4879
4880         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4881                 /* Adjust the voltage regular to two steps lower.  The default
4882                  * of this register is 0x0000000e. */
4883                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4884
4885                 /* Remove bad rbuf memory from the free pool. */
4886                 rc = bnx2_alloc_bad_rbuf(bp);
4887         }
4888
4889         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4890                 bnx2_setup_msix_tbl(bp);
4891                 /* Prevent MSIX table reads and write from timing out */
4892                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4893                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4894         }
4895
4896         return rc;
4897 }
4898
4899 static int
4900 bnx2_init_chip(struct bnx2 *bp)
4901 {
4902         u32 val, mtu;
4903         int rc, i;
4904
4905         /* Make sure the interrupt is not active. */
4906         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4907
4908         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4909               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4910 #ifdef __BIG_ENDIAN
4911               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4912 #endif
4913               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4914               DMA_READ_CHANS << 12 |
4915               DMA_WRITE_CHANS << 16;
4916
4917         val |= (0x2 << 20) | (1 << 11);
4918
4919         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4920                 val |= (1 << 23);
4921
4922         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4923             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4924             !(bp->flags & BNX2_FLAG_PCIX))
4925                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4926
4927         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4928
4929         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4930                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4931                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4932                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4933         }
4934
4935         if (bp->flags & BNX2_FLAG_PCIX) {
4936                 u16 val16;
4937
4938                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4939                                      &val16);
4940                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4941                                       val16 & ~PCI_X_CMD_ERO);
4942         }
4943
4944         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4945                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4946                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4947                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4948
4949         /* Initialize context mapping and zero out the quick contexts.  The
4950          * context block must have already been enabled. */
4951         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4952                 rc = bnx2_init_5709_context(bp);
4953                 if (rc)
4954                         return rc;
4955         } else
4956                 bnx2_init_context(bp);
4957
4958         if ((rc = bnx2_init_cpus(bp)) != 0)
4959                 return rc;
4960
4961         bnx2_init_nvram(bp);
4962
4963         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4964
4965         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4966         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4967         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4968         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4969                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4970                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4971                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4972         }
4973
4974         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4975
4976         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4977         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4978         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4979
4980         val = (BNX2_PAGE_BITS - 8) << 24;
4981         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4982
4983         /* Configure page size. */
4984         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4985         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4986         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4987         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4988
4989         val = bp->mac_addr[0] +
4990               (bp->mac_addr[1] << 8) +
4991               (bp->mac_addr[2] << 16) +
4992               bp->mac_addr[3] +
4993               (bp->mac_addr[4] << 8) +
4994               (bp->mac_addr[5] << 16);
4995         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4996
4997         /* Program the MTU.  Also include 4 bytes for CRC32. */
4998         mtu = bp->dev->mtu;
4999         val = mtu + ETH_HLEN + ETH_FCS_LEN;
5000         if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5001                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5002         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5003
5004         if (mtu < ETH_DATA_LEN)
5005                 mtu = ETH_DATA_LEN;
5006
5007         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5008         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5009         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5010
5011         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5012         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5013                 bp->bnx2_napi[i].last_status_idx = 0;
5014
5015         bp->idle_chk_status_idx = 0xffff;
5016
5017         /* Set up how to generate a link change interrupt. */
5018         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5019
5020         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5021                 (u64) bp->status_blk_mapping & 0xffffffff);
5022         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5023
5024         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5025                 (u64) bp->stats_blk_mapping & 0xffffffff);
5026         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5027                 (u64) bp->stats_blk_mapping >> 32);
5028
5029         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5030                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5031
5032         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5033                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5034
5035         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5036                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5037
5038         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5039
5040         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5041
5042         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5043                 (bp->com_ticks_int << 16) | bp->com_ticks);
5044
5045         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5046                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5047
5048         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5049                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5050         else
5051                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5052         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5053
5054         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5055                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5056         else {
5057                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5058                       BNX2_HC_CONFIG_COLLECT_STATS;
5059         }
5060
5061         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5062                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5063                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5064
5065                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5066         }
5067
5068         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5069                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5070
5071         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5072
5073         if (bp->rx_ticks < 25)
5074                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5075         else
5076                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5077
5078         for (i = 1; i < bp->irq_nvecs; i++) {
5079                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5080                            BNX2_HC_SB_CONFIG_1;
5081
5082                 BNX2_WR(bp, base,
5083                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5084                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5085                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5086
5087                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5088                         (bp->tx_quick_cons_trip_int << 16) |
5089                          bp->tx_quick_cons_trip);
5090
5091                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5092                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5093
5094                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5095                         (bp->rx_quick_cons_trip_int << 16) |
5096                         bp->rx_quick_cons_trip);
5097
5098                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5099                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5100         }
5101
5102         /* Clear internal stats counters. */
5103         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5104
5105         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5106
5107         /* Initialize the receive filter. */
5108         bnx2_set_rx_mode(bp->dev);
5109
5110         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5111                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5112                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5113                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5114         }
5115         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5116                           1, 0);
5117
5118         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5119         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5120
5121         udelay(20);
5122
5123         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5124
5125         return rc;
5126 }
5127
5128 static void
5129 bnx2_clear_ring_states(struct bnx2 *bp)
5130 {
5131         struct bnx2_napi *bnapi;
5132         struct bnx2_tx_ring_info *txr;
5133         struct bnx2_rx_ring_info *rxr;
5134         int i;
5135
5136         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5137                 bnapi = &bp->bnx2_napi[i];
5138                 txr = &bnapi->tx_ring;
5139                 rxr = &bnapi->rx_ring;
5140
5141                 txr->tx_cons = 0;
5142                 txr->hw_tx_cons = 0;
5143                 rxr->rx_prod_bseq = 0;
5144                 rxr->rx_prod = 0;
5145                 rxr->rx_cons = 0;
5146                 rxr->rx_pg_prod = 0;
5147                 rxr->rx_pg_cons = 0;
5148         }
5149 }
5150
5151 static void
5152 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5153 {
5154         u32 val, offset0, offset1, offset2, offset3;
5155         u32 cid_addr = GET_CID_ADDR(cid);
5156
5157         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5158                 offset0 = BNX2_L2CTX_TYPE_XI;
5159                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5160                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5161                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5162         } else {
5163                 offset0 = BNX2_L2CTX_TYPE;
5164                 offset1 = BNX2_L2CTX_CMD_TYPE;
5165                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5166                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5167         }
5168         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5169         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5170
5171         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5172         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5173
5174         val = (u64) txr->tx_desc_mapping >> 32;
5175         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5176
5177         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5178         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5179 }
5180
5181 static void
5182 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5183 {
5184         struct bnx2_tx_bd *txbd;
5185         u32 cid = TX_CID;
5186         struct bnx2_napi *bnapi;
5187         struct bnx2_tx_ring_info *txr;
5188
5189         bnapi = &bp->bnx2_napi[ring_num];
5190         txr = &bnapi->tx_ring;
5191
5192         if (ring_num == 0)
5193                 cid = TX_CID;
5194         else
5195                 cid = TX_TSS_CID + ring_num - 1;
5196
5197         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5198
5199         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5200
5201         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5202         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5203
5204         txr->tx_prod = 0;
5205         txr->tx_prod_bseq = 0;
5206
5207         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5208         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5209
5210         bnx2_init_tx_context(bp, cid, txr);
5211 }
5212
5213 static void
5214 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5215                      u32 buf_size, int num_rings)
5216 {
5217         int i;
5218         struct bnx2_rx_bd *rxbd;
5219
5220         for (i = 0; i < num_rings; i++) {
5221                 int j;
5222
5223                 rxbd = &rx_ring[i][0];
5224                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5225                         rxbd->rx_bd_len = buf_size;
5226                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5227                 }
5228                 if (i == (num_rings - 1))
5229                         j = 0;
5230                 else
5231                         j = i + 1;
5232                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5233                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5234         }
5235 }
5236
5237 static void
5238 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5239 {
5240         int i;
5241         u16 prod, ring_prod;
5242         u32 cid, rx_cid_addr, val;
5243         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5244         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5245
5246         if (ring_num == 0)
5247                 cid = RX_CID;
5248         else
5249                 cid = RX_RSS_CID + ring_num - 1;
5250
5251         rx_cid_addr = GET_CID_ADDR(cid);
5252
5253         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5254                              bp->rx_buf_use_size, bp->rx_max_ring);
5255
5256         bnx2_init_rx_context(bp, cid);
5257
5258         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5259                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5260                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5261         }
5262
5263         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5264         if (bp->rx_pg_ring_size) {
5265                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5266                                      rxr->rx_pg_desc_mapping,
5267                                      PAGE_SIZE, bp->rx_max_pg_ring);
5268                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5269                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5270                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5271                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5272
5273                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5274                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5275
5276                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5277                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5278
5279                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5280                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5281         }
5282
5283         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5284         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5285
5286         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5287         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5288
5289         ring_prod = prod = rxr->rx_pg_prod;
5290         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5291                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5292                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5293                                     ring_num, i, bp->rx_pg_ring_size);
5294                         break;
5295                 }
5296                 prod = BNX2_NEXT_RX_BD(prod);
5297                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5298         }
5299         rxr->rx_pg_prod = prod;
5300
5301         ring_prod = prod = rxr->rx_prod;
5302         for (i = 0; i < bp->rx_ring_size; i++) {
5303                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5304                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5305                                     ring_num, i, bp->rx_ring_size);
5306                         break;
5307                 }
5308                 prod = BNX2_NEXT_RX_BD(prod);
5309                 ring_prod = BNX2_RX_RING_IDX(prod);
5310         }
5311         rxr->rx_prod = prod;
5312
5313         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5314         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5315         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5316
5317         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5318         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5319
5320         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5321 }
5322
5323 static void
5324 bnx2_init_all_rings(struct bnx2 *bp)
5325 {
5326         int i;
5327         u32 val;
5328
5329         bnx2_clear_ring_states(bp);
5330
5331         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5332         for (i = 0; i < bp->num_tx_rings; i++)
5333                 bnx2_init_tx_ring(bp, i);
5334
5335         if (bp->num_tx_rings > 1)
5336                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5337                         (TX_TSS_CID << 7));
5338
5339         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5340         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5341
5342         for (i = 0; i < bp->num_rx_rings; i++)
5343                 bnx2_init_rx_ring(bp, i);
5344
5345         if (bp->num_rx_rings > 1) {
5346                 u32 tbl_32 = 0;
5347
5348                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5349                         int shift = (i % 8) << 2;
5350
5351                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5352                         if ((i % 8) == 7) {
5353                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5354                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5355                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5356                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5357                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5358                                 tbl_32 = 0;
5359                         }
5360                 }
5361
5362                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5363                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5364
5365                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5366
5367         }
5368 }
5369
5370 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5371 {
5372         u32 max, num_rings = 1;
5373
5374         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5375                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5376                 num_rings++;
5377         }
5378         /* round to next power of 2 */
5379         max = max_size;
5380         while ((max & num_rings) == 0)
5381                 max >>= 1;
5382
5383         if (num_rings != max)
5384                 max <<= 1;
5385
5386         return max;
5387 }
5388
5389 static void
5390 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5391 {
5392         u32 rx_size, rx_space, jumbo_size;
5393
5394         /* 8 for CRC and VLAN */
5395         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5396
5397         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5398                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5399
5400         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5401         bp->rx_pg_ring_size = 0;
5402         bp->rx_max_pg_ring = 0;
5403         bp->rx_max_pg_ring_idx = 0;
5404         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5405                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5406
5407                 jumbo_size = size * pages;
5408                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5409                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5410
5411                 bp->rx_pg_ring_size = jumbo_size;
5412                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5413                                                         BNX2_MAX_RX_PG_RINGS);
5414                 bp->rx_max_pg_ring_idx =
5415                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5416                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5417                 bp->rx_copy_thresh = 0;
5418         }
5419
5420         bp->rx_buf_use_size = rx_size;
5421         /* hw alignment + build_skb() overhead*/
5422         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5423                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5424         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5425         bp->rx_ring_size = size;
5426         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5427         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5428 }
5429
5430 static void
5431 bnx2_free_tx_skbs(struct bnx2 *bp)
5432 {
5433         int i;
5434
5435         for (i = 0; i < bp->num_tx_rings; i++) {
5436                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5437                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5438                 int j;
5439
5440                 if (!txr->tx_buf_ring)
5441                         continue;
5442
5443                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5444                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5445                         struct sk_buff *skb = tx_buf->skb;
5446                         int k, last;
5447
5448                         if (!skb) {
5449                                 j = BNX2_NEXT_TX_BD(j);
5450                                 continue;
5451                         }
5452
5453                         dma_unmap_single(&bp->pdev->dev,
5454                                          dma_unmap_addr(tx_buf, mapping),
5455                                          skb_headlen(skb),
5456                                          PCI_DMA_TODEVICE);
5457
5458                         tx_buf->skb = NULL;
5459
5460                         last = tx_buf->nr_frags;
5461                         j = BNX2_NEXT_TX_BD(j);
5462                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5463                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5464                                 dma_unmap_page(&bp->pdev->dev,
5465                                         dma_unmap_addr(tx_buf, mapping),
5466                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5467                                         PCI_DMA_TODEVICE);
5468                         }
5469                         dev_kfree_skb(skb);
5470                 }
5471                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5472         }
5473 }
5474
5475 static void
5476 bnx2_free_rx_skbs(struct bnx2 *bp)
5477 {
5478         int i;
5479
5480         for (i = 0; i < bp->num_rx_rings; i++) {
5481                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5482                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5483                 int j;
5484
5485                 if (!rxr->rx_buf_ring)
5486                         return;
5487
5488                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5489                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5490                         u8 *data = rx_buf->data;
5491
5492                         if (!data)
5493                                 continue;
5494
5495                         dma_unmap_single(&bp->pdev->dev,
5496                                          dma_unmap_addr(rx_buf, mapping),
5497                                          bp->rx_buf_use_size,
5498                                          PCI_DMA_FROMDEVICE);
5499
5500                         rx_buf->data = NULL;
5501
5502                         kfree(data);
5503                 }
5504                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5505                         bnx2_free_rx_page(bp, rxr, j);
5506         }
5507 }
5508
5509 static void
5510 bnx2_free_skbs(struct bnx2 *bp)
5511 {
5512         bnx2_free_tx_skbs(bp);
5513         bnx2_free_rx_skbs(bp);
5514 }
5515
5516 static int
5517 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5518 {
5519         int rc;
5520
5521         rc = bnx2_reset_chip(bp, reset_code);
5522         bnx2_free_skbs(bp);
5523         if (rc)
5524                 return rc;
5525
5526         if ((rc = bnx2_init_chip(bp)) != 0)
5527                 return rc;
5528
5529         bnx2_init_all_rings(bp);
5530         return 0;
5531 }
5532
5533 static int
5534 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5535 {
5536         int rc;
5537
5538         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5539                 return rc;
5540
5541         spin_lock_bh(&bp->phy_lock);
5542         bnx2_init_phy(bp, reset_phy);
5543         bnx2_set_link(bp);
5544         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5545                 bnx2_remote_phy_event(bp);
5546         spin_unlock_bh(&bp->phy_lock);
5547         return 0;
5548 }
5549
5550 static int
5551 bnx2_shutdown_chip(struct bnx2 *bp)
5552 {
5553         u32 reset_code;
5554
5555         if (bp->flags & BNX2_FLAG_NO_WOL)
5556                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5557         else if (bp->wol)
5558                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5559         else
5560                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5561
5562         return bnx2_reset_chip(bp, reset_code);
5563 }
5564
5565 static int
5566 bnx2_test_registers(struct bnx2 *bp)
5567 {
5568         int ret;
5569         int i, is_5709;
5570         static const struct {
5571                 u16   offset;
5572                 u16   flags;
5573 #define BNX2_FL_NOT_5709        1
5574                 u32   rw_mask;
5575                 u32   ro_mask;
5576         } reg_tbl[] = {
5577                 { 0x006c, 0, 0x00000000, 0x0000003f },
5578                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5579                 { 0x0094, 0, 0x00000000, 0x00000000 },
5580
5581                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5582                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5583                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5584                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5585                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5586                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5587                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5588                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5590
5591                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5592                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5593                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5594                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5595                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5596                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5597
5598                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5599                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5600                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5601
5602                 { 0x1000, 0, 0x00000000, 0x00000001 },
5603                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5604
5605                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5606                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5607                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5608                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5609                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5610                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5611                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5612                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5613                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5614                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5615
5616                 { 0x1800, 0, 0x00000000, 0x00000001 },
5617                 { 0x1804, 0, 0x00000000, 0x00000003 },
5618
5619                 { 0x2800, 0, 0x00000000, 0x00000001 },
5620                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5621                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5622                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5623                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5624                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5625                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5626                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5627                 { 0x2840, 0, 0x00000000, 0xffffffff },
5628                 { 0x2844, 0, 0x00000000, 0xffffffff },
5629                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5630                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5631
5632                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5633                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5634
5635                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5636                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5637                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5638                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5639                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5640                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5641                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5642                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5643                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5644
5645                 { 0x5004, 0, 0x00000000, 0x0000007f },
5646                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5647
5648                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5649                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5650                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5651                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5652                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5653                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5654                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5655                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5656                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5657
5658                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5659                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5660                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5661                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5662                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5663                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5664                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5665                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5666                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5667                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5668                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5669                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5670                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5671                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5672                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5673                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5674                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5675                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5676                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5677                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5678                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5679                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5680                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5681
5682                 { 0xffff, 0, 0x00000000, 0x00000000 },
5683         };
5684
5685         ret = 0;
5686         is_5709 = 0;
5687         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5688                 is_5709 = 1;
5689
5690         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5691                 u32 offset, rw_mask, ro_mask, save_val, val;
5692                 u16 flags = reg_tbl[i].flags;
5693
5694                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5695                         continue;
5696
5697                 offset = (u32) reg_tbl[i].offset;
5698                 rw_mask = reg_tbl[i].rw_mask;
5699                 ro_mask = reg_tbl[i].ro_mask;
5700
5701                 save_val = readl(bp->regview + offset);
5702
5703                 writel(0, bp->regview + offset);
5704
5705                 val = readl(bp->regview + offset);
5706                 if ((val & rw_mask) != 0) {
5707                         goto reg_test_err;
5708                 }
5709
5710                 if ((val & ro_mask) != (save_val & ro_mask)) {
5711                         goto reg_test_err;
5712                 }
5713
5714                 writel(0xffffffff, bp->regview + offset);
5715
5716                 val = readl(bp->regview + offset);
5717                 if ((val & rw_mask) != rw_mask) {
5718                         goto reg_test_err;
5719                 }
5720
5721                 if ((val & ro_mask) != (save_val & ro_mask)) {
5722                         goto reg_test_err;
5723                 }
5724
5725                 writel(save_val, bp->regview + offset);
5726                 continue;
5727
5728 reg_test_err:
5729                 writel(save_val, bp->regview + offset);
5730                 ret = -ENODEV;
5731                 break;
5732         }
5733         return ret;
5734 }
5735
5736 static int
5737 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5738 {
5739         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5740                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5741         int i;
5742
5743         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5744                 u32 offset;
5745
5746                 for (offset = 0; offset < size; offset += 4) {
5747
5748                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5749
5750                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5751                                 test_pattern[i]) {
5752                                 return -ENODEV;
5753                         }
5754                 }
5755         }
5756         return 0;
5757 }
5758
5759 static int
5760 bnx2_test_memory(struct bnx2 *bp)
5761 {
5762         int ret = 0;
5763         int i;
5764         static struct mem_entry {
5765                 u32   offset;
5766                 u32   len;
5767         } mem_tbl_5706[] = {
5768                 { 0x60000,  0x4000 },
5769                 { 0xa0000,  0x3000 },
5770                 { 0xe0000,  0x4000 },
5771                 { 0x120000, 0x4000 },
5772                 { 0x1a0000, 0x4000 },
5773                 { 0x160000, 0x4000 },
5774                 { 0xffffffff, 0    },
5775         },
5776         mem_tbl_5709[] = {
5777                 { 0x60000,  0x4000 },
5778                 { 0xa0000,  0x3000 },
5779                 { 0xe0000,  0x4000 },
5780                 { 0x120000, 0x4000 },
5781                 { 0x1a0000, 0x4000 },
5782                 { 0xffffffff, 0    },
5783         };
5784         struct mem_entry *mem_tbl;
5785
5786         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5787                 mem_tbl = mem_tbl_5709;
5788         else
5789                 mem_tbl = mem_tbl_5706;
5790
5791         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5792                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5793                         mem_tbl[i].len)) != 0) {
5794                         return ret;
5795                 }
5796         }
5797
5798         return ret;
5799 }
5800
5801 #define BNX2_MAC_LOOPBACK       0
5802 #define BNX2_PHY_LOOPBACK       1
5803
5804 static int
5805 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5806 {
5807         unsigned int pkt_size, num_pkts, i;
5808         struct sk_buff *skb;
5809         u8 *data;
5810         unsigned char *packet;
5811         u16 rx_start_idx, rx_idx;
5812         dma_addr_t map;
5813         struct bnx2_tx_bd *txbd;
5814         struct bnx2_sw_bd *rx_buf;
5815         struct l2_fhdr *rx_hdr;
5816         int ret = -ENODEV;
5817         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5818         struct bnx2_tx_ring_info *txr;
5819         struct bnx2_rx_ring_info *rxr;
5820
5821         tx_napi = bnapi;
5822
5823         txr = &tx_napi->tx_ring;
5824         rxr = &bnapi->rx_ring;
5825         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5826                 bp->loopback = MAC_LOOPBACK;
5827                 bnx2_set_mac_loopback(bp);
5828         }
5829         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5830                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5831                         return 0;
5832
5833                 bp->loopback = PHY_LOOPBACK;
5834                 bnx2_set_phy_loopback(bp);
5835         }
5836         else
5837                 return -EINVAL;
5838
5839         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5840         skb = netdev_alloc_skb(bp->dev, pkt_size);
5841         if (!skb)
5842                 return -ENOMEM;
5843         packet = skb_put(skb, pkt_size);
5844         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5845         memset(packet + ETH_ALEN, 0x0, 8);
5846         for (i = 14; i < pkt_size; i++)
5847                 packet[i] = (unsigned char) (i & 0xff);
5848
5849         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5850                              PCI_DMA_TODEVICE);
5851         if (dma_mapping_error(&bp->pdev->dev, map)) {
5852                 dev_kfree_skb(skb);
5853                 return -EIO;
5854         }
5855
5856         BNX2_WR(bp, BNX2_HC_COMMAND,
5857                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5858
5859         BNX2_RD(bp, BNX2_HC_COMMAND);
5860
5861         udelay(5);
5862         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5863
5864         num_pkts = 0;
5865
5866         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5867
5868         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5869         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5870         txbd->tx_bd_mss_nbytes = pkt_size;
5871         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5872
5873         num_pkts++;
5874         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5875         txr->tx_prod_bseq += pkt_size;
5876
5877         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5878         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5879
5880         udelay(100);
5881
5882         BNX2_WR(bp, BNX2_HC_COMMAND,
5883                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5884
5885         BNX2_RD(bp, BNX2_HC_COMMAND);
5886
5887         udelay(5);
5888
5889         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5890         dev_kfree_skb(skb);
5891
5892         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5893                 goto loopback_test_done;
5894
5895         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5896         if (rx_idx != rx_start_idx + num_pkts) {
5897                 goto loopback_test_done;
5898         }
5899
5900         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5901         data = rx_buf->data;
5902
5903         rx_hdr = get_l2_fhdr(data);
5904         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5905
5906         dma_sync_single_for_cpu(&bp->pdev->dev,
5907                 dma_unmap_addr(rx_buf, mapping),
5908                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5909
5910         if (rx_hdr->l2_fhdr_status &
5911                 (L2_FHDR_ERRORS_BAD_CRC |
5912                 L2_FHDR_ERRORS_PHY_DECODE |
5913                 L2_FHDR_ERRORS_ALIGNMENT |
5914                 L2_FHDR_ERRORS_TOO_SHORT |
5915                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5916
5917                 goto loopback_test_done;
5918         }
5919
5920         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5921                 goto loopback_test_done;
5922         }
5923
5924         for (i = 14; i < pkt_size; i++) {
5925                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5926                         goto loopback_test_done;
5927                 }
5928         }
5929
5930         ret = 0;
5931
5932 loopback_test_done:
5933         bp->loopback = 0;
5934         return ret;
5935 }
5936
5937 #define BNX2_MAC_LOOPBACK_FAILED        1
5938 #define BNX2_PHY_LOOPBACK_FAILED        2
5939 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5940                                          BNX2_PHY_LOOPBACK_FAILED)
5941
5942 static int
5943 bnx2_test_loopback(struct bnx2 *bp)
5944 {
5945         int rc = 0;
5946
5947         if (!netif_running(bp->dev))
5948                 return BNX2_LOOPBACK_FAILED;
5949
5950         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5951         spin_lock_bh(&bp->phy_lock);
5952         bnx2_init_phy(bp, 1);
5953         spin_unlock_bh(&bp->phy_lock);
5954         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5955                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5956         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5957                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5958         return rc;
5959 }
5960
5961 #define NVRAM_SIZE 0x200
5962 #define CRC32_RESIDUAL 0xdebb20e3
5963
5964 static int
5965 bnx2_test_nvram(struct bnx2 *bp)
5966 {
5967         __be32 buf[NVRAM_SIZE / 4];
5968         u8 *data = (u8 *) buf;
5969         int rc = 0;
5970         u32 magic, csum;
5971
5972         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5973                 goto test_nvram_done;
5974
5975         magic = be32_to_cpu(buf[0]);
5976         if (magic != 0x669955aa) {
5977                 rc = -ENODEV;
5978                 goto test_nvram_done;
5979         }
5980
5981         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5982                 goto test_nvram_done;
5983
5984         csum = ether_crc_le(0x100, data);
5985         if (csum != CRC32_RESIDUAL) {
5986                 rc = -ENODEV;
5987                 goto test_nvram_done;
5988         }
5989
5990         csum = ether_crc_le(0x100, data + 0x100);
5991         if (csum != CRC32_RESIDUAL) {
5992                 rc = -ENODEV;
5993         }
5994
5995 test_nvram_done:
5996         return rc;
5997 }
5998
5999 static int
6000 bnx2_test_link(struct bnx2 *bp)
6001 {
6002         u32 bmsr;
6003
6004         if (!netif_running(bp->dev))
6005                 return -ENODEV;
6006
6007         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6008                 if (bp->link_up)
6009                         return 0;
6010                 return -ENODEV;
6011         }
6012         spin_lock_bh(&bp->phy_lock);
6013         bnx2_enable_bmsr1(bp);
6014         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6015         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6016         bnx2_disable_bmsr1(bp);
6017         spin_unlock_bh(&bp->phy_lock);
6018
6019         if (bmsr & BMSR_LSTATUS) {
6020                 return 0;
6021         }
6022         return -ENODEV;
6023 }
6024
6025 static int
6026 bnx2_test_intr(struct bnx2 *bp)
6027 {
6028         int i;
6029         u16 status_idx;
6030
6031         if (!netif_running(bp->dev))
6032                 return -ENODEV;
6033
6034         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6035
6036         /* This register is not touched during run-time. */
6037         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6038         BNX2_RD(bp, BNX2_HC_COMMAND);
6039
6040         for (i = 0; i < 10; i++) {
6041                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6042                         status_idx) {
6043
6044                         break;
6045                 }
6046
6047                 msleep_interruptible(10);
6048         }
6049         if (i < 10)
6050                 return 0;
6051
6052         return -ENODEV;
6053 }
6054
6055 /* Determining link for parallel detection. */
6056 static int
6057 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6058 {
6059         u32 mode_ctl, an_dbg, exp;
6060
6061         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6062                 return 0;
6063
6064         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6065         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6066
6067         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6068                 return 0;
6069
6070         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6071         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6072         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6073
6074         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6075                 return 0;
6076
6077         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6078         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6079         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6080
6081         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6082                 return 0;
6083
6084         return 1;
6085 }
6086
6087 static void
6088 bnx2_5706_serdes_timer(struct bnx2 *bp)
6089 {
6090         int check_link = 1;
6091
6092         spin_lock(&bp->phy_lock);
6093         if (bp->serdes_an_pending) {
6094                 bp->serdes_an_pending--;
6095                 check_link = 0;
6096         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6097                 u32 bmcr;
6098
6099                 bp->current_interval = BNX2_TIMER_INTERVAL;
6100
6101                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6102
6103                 if (bmcr & BMCR_ANENABLE) {
6104                         if (bnx2_5706_serdes_has_link(bp)) {
6105                                 bmcr &= ~BMCR_ANENABLE;
6106                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6107                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6108                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6109                         }
6110                 }
6111         }
6112         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6113                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6114                 u32 phy2;
6115
6116                 bnx2_write_phy(bp, 0x17, 0x0f01);
6117                 bnx2_read_phy(bp, 0x15, &phy2);
6118                 if (phy2 & 0x20) {
6119                         u32 bmcr;
6120
6121                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6122                         bmcr |= BMCR_ANENABLE;
6123                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6124
6125                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6126                 }
6127         } else
6128                 bp->current_interval = BNX2_TIMER_INTERVAL;
6129
6130         if (check_link) {
6131                 u32 val;
6132
6133                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6134                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6135                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6136
6137                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6138                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6139                                 bnx2_5706s_force_link_dn(bp, 1);
6140                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6141                         } else
6142                                 bnx2_set_link(bp);
6143                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6144                         bnx2_set_link(bp);
6145         }
6146         spin_unlock(&bp->phy_lock);
6147 }
6148
6149 static void
6150 bnx2_5708_serdes_timer(struct bnx2 *bp)
6151 {
6152         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6153                 return;
6154
6155         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6156                 bp->serdes_an_pending = 0;
6157                 return;
6158         }
6159
6160         spin_lock(&bp->phy_lock);
6161         if (bp->serdes_an_pending)
6162                 bp->serdes_an_pending--;
6163         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6164                 u32 bmcr;
6165
6166                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6167                 if (bmcr & BMCR_ANENABLE) {
6168                         bnx2_enable_forced_2g5(bp);
6169                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6170                 } else {
6171                         bnx2_disable_forced_2g5(bp);
6172                         bp->serdes_an_pending = 2;
6173                         bp->current_interval = BNX2_TIMER_INTERVAL;
6174                 }
6175
6176         } else
6177                 bp->current_interval = BNX2_TIMER_INTERVAL;
6178
6179         spin_unlock(&bp->phy_lock);
6180 }
6181
6182 static void
6183 bnx2_timer(struct timer_list *t)
6184 {
6185         struct bnx2 *bp = from_timer(bp, t, timer);
6186
6187         if (!netif_running(bp->dev))
6188                 return;
6189
6190         if (atomic_read(&bp->intr_sem) != 0)
6191                 goto bnx2_restart_timer;
6192
6193         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6194              BNX2_FLAG_USING_MSI)
6195                 bnx2_chk_missed_msi(bp);
6196
6197         bnx2_send_heart_beat(bp);
6198
6199         bp->stats_blk->stat_FwRxDrop =
6200                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6201
6202         /* workaround occasional corrupted counters */
6203         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6204                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6205                         BNX2_HC_COMMAND_STATS_NOW);
6206
6207         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6208                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6209                         bnx2_5706_serdes_timer(bp);
6210                 else
6211                         bnx2_5708_serdes_timer(bp);
6212         }
6213
6214 bnx2_restart_timer:
6215         mod_timer(&bp->timer, jiffies + bp->current_interval);
6216 }
6217
6218 static int
6219 bnx2_request_irq(struct bnx2 *bp)
6220 {
6221         unsigned long flags;
6222         struct bnx2_irq *irq;
6223         int rc = 0, i;
6224
6225         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6226                 flags = 0;
6227         else
6228                 flags = IRQF_SHARED;
6229
6230         for (i = 0; i < bp->irq_nvecs; i++) {
6231                 irq = &bp->irq_tbl[i];
6232                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6233                                  &bp->bnx2_napi[i]);
6234                 if (rc)
6235                         break;
6236                 irq->requested = 1;
6237         }
6238         return rc;
6239 }
6240
6241 static void
6242 __bnx2_free_irq(struct bnx2 *bp)
6243 {
6244         struct bnx2_irq *irq;
6245         int i;
6246
6247         for (i = 0; i < bp->irq_nvecs; i++) {
6248                 irq = &bp->irq_tbl[i];
6249                 if (irq->requested)
6250                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6251                 irq->requested = 0;
6252         }
6253 }
6254
6255 static void
6256 bnx2_free_irq(struct bnx2 *bp)
6257 {
6258
6259         __bnx2_free_irq(bp);
6260         if (bp->flags & BNX2_FLAG_USING_MSI)
6261                 pci_disable_msi(bp->pdev);
6262         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6263                 pci_disable_msix(bp->pdev);
6264
6265         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6266 }
6267
6268 static void
6269 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6270 {
6271         int i, total_vecs;
6272         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6273         struct net_device *dev = bp->dev;
6274         const int len = sizeof(bp->irq_tbl[0].name);
6275
6276         bnx2_setup_msix_tbl(bp);
6277         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6278         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6279         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6280
6281         /*  Need to flush the previous three writes to ensure MSI-X
6282          *  is setup properly */
6283         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6284
6285         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6286                 msix_ent[i].entry = i;
6287                 msix_ent[i].vector = 0;
6288         }
6289
6290         total_vecs = msix_vecs;
6291 #ifdef BCM_CNIC
6292         total_vecs++;
6293 #endif
6294         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6295                                            BNX2_MIN_MSIX_VEC, total_vecs);
6296         if (total_vecs < 0)
6297                 return;
6298
6299         msix_vecs = total_vecs;
6300 #ifdef BCM_CNIC
6301         msix_vecs--;
6302 #endif
6303         bp->irq_nvecs = msix_vecs;
6304         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6305         for (i = 0; i < total_vecs; i++) {
6306                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6307                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6308                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6309         }
6310 }
6311
6312 static int
6313 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6314 {
6315         int cpus = netif_get_num_default_rss_queues();
6316         int msix_vecs;
6317
6318         if (!bp->num_req_rx_rings)
6319                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6320         else if (!bp->num_req_tx_rings)
6321                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6322         else
6323                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6324
6325         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6326
6327         bp->irq_tbl[0].handler = bnx2_interrupt;
6328         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6329         bp->irq_nvecs = 1;
6330         bp->irq_tbl[0].vector = bp->pdev->irq;
6331
6332         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6333                 bnx2_enable_msix(bp, msix_vecs);
6334
6335         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6336             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6337                 if (pci_enable_msi(bp->pdev) == 0) {
6338                         bp->flags |= BNX2_FLAG_USING_MSI;
6339                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6340                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6341                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6342                         } else
6343                                 bp->irq_tbl[0].handler = bnx2_msi;
6344
6345                         bp->irq_tbl[0].vector = bp->pdev->irq;
6346                 }
6347         }
6348
6349         if (!bp->num_req_tx_rings)
6350                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6351         else
6352                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6353
6354         if (!bp->num_req_rx_rings)
6355                 bp->num_rx_rings = bp->irq_nvecs;
6356         else
6357                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6358
6359         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6360
6361         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6362 }
6363
6364 /* Called with rtnl_lock */
6365 static int
6366 bnx2_open(struct net_device *dev)
6367 {
6368         struct bnx2 *bp = netdev_priv(dev);
6369         int rc;
6370
6371         rc = bnx2_request_firmware(bp);
6372         if (rc < 0)
6373                 goto out;
6374
6375         netif_carrier_off(dev);
6376
6377         bnx2_disable_int(bp);
6378
6379         rc = bnx2_setup_int_mode(bp, disable_msi);
6380         if (rc)
6381                 goto open_err;
6382         bnx2_init_napi(bp);
6383         bnx2_napi_enable(bp);
6384         rc = bnx2_alloc_mem(bp);
6385         if (rc)
6386                 goto open_err;
6387
6388         rc = bnx2_request_irq(bp);
6389         if (rc)
6390                 goto open_err;
6391
6392         rc = bnx2_init_nic(bp, 1);
6393         if (rc)
6394                 goto open_err;
6395
6396         mod_timer(&bp->timer, jiffies + bp->current_interval);
6397
6398         atomic_set(&bp->intr_sem, 0);
6399
6400         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6401
6402         bnx2_enable_int(bp);
6403
6404         if (bp->flags & BNX2_FLAG_USING_MSI) {
6405                 /* Test MSI to make sure it is working
6406                  * If MSI test fails, go back to INTx mode
6407                  */
6408                 if (bnx2_test_intr(bp) != 0) {
6409                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6410
6411                         bnx2_disable_int(bp);
6412                         bnx2_free_irq(bp);
6413
6414                         bnx2_setup_int_mode(bp, 1);
6415
6416                         rc = bnx2_init_nic(bp, 0);
6417
6418                         if (!rc)
6419                                 rc = bnx2_request_irq(bp);
6420
6421                         if (rc) {
6422                                 del_timer_sync(&bp->timer);
6423                                 goto open_err;
6424                         }
6425                         bnx2_enable_int(bp);
6426                 }
6427         }
6428         if (bp->flags & BNX2_FLAG_USING_MSI)
6429                 netdev_info(dev, "using MSI\n");
6430         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6431                 netdev_info(dev, "using MSIX\n");
6432
6433         netif_tx_start_all_queues(dev);
6434 out:
6435         return rc;
6436
6437 open_err:
6438         bnx2_napi_disable(bp);
6439         bnx2_free_skbs(bp);
6440         bnx2_free_irq(bp);
6441         bnx2_free_mem(bp);
6442         bnx2_del_napi(bp);
6443         bnx2_release_firmware(bp);
6444         goto out;
6445 }
6446
6447 static void
6448 bnx2_reset_task(struct work_struct *work)
6449 {
6450         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6451         int rc;
6452         u16 pcicmd;
6453
6454         rtnl_lock();
6455         if (!netif_running(bp->dev)) {
6456                 rtnl_unlock();
6457                 return;
6458         }
6459
6460         bnx2_netif_stop(bp, true);
6461
6462         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6463         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6464                 /* in case PCI block has reset */
6465                 pci_restore_state(bp->pdev);
6466                 pci_save_state(bp->pdev);
6467         }
6468         rc = bnx2_init_nic(bp, 1);
6469         if (rc) {
6470                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6471                 bnx2_napi_enable(bp);
6472                 dev_close(bp->dev);
6473                 rtnl_unlock();
6474                 return;
6475         }
6476
6477         atomic_set(&bp->intr_sem, 1);
6478         bnx2_netif_start(bp, true);
6479         rtnl_unlock();
6480 }
6481
6482 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6483
6484 static void
6485 bnx2_dump_ftq(struct bnx2 *bp)
6486 {
6487         int i;
6488         u32 reg, bdidx, cid, valid;
6489         struct net_device *dev = bp->dev;
6490         static const struct ftq_reg {
6491                 char *name;
6492                 u32 off;
6493         } ftq_arr[] = {
6494                 BNX2_FTQ_ENTRY(RV2P_P),
6495                 BNX2_FTQ_ENTRY(RV2P_T),
6496                 BNX2_FTQ_ENTRY(RV2P_M),
6497                 BNX2_FTQ_ENTRY(TBDR_),
6498                 BNX2_FTQ_ENTRY(TDMA_),
6499                 BNX2_FTQ_ENTRY(TXP_),
6500                 BNX2_FTQ_ENTRY(TXP_),
6501                 BNX2_FTQ_ENTRY(TPAT_),
6502                 BNX2_FTQ_ENTRY(RXP_C),
6503                 BNX2_FTQ_ENTRY(RXP_),
6504                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6505                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6506                 BNX2_FTQ_ENTRY(COM_COMQ_),
6507                 BNX2_FTQ_ENTRY(CP_CPQ_),
6508         };
6509
6510         netdev_err(dev, "<--- start FTQ dump --->\n");
6511         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6512                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6513                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6514
6515         netdev_err(dev, "CPU states:\n");
6516         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6517                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6518                            reg, bnx2_reg_rd_ind(bp, reg),
6519                            bnx2_reg_rd_ind(bp, reg + 4),
6520                            bnx2_reg_rd_ind(bp, reg + 8),
6521                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6522                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6523                            bnx2_reg_rd_ind(bp, reg + 0x20));
6524
6525         netdev_err(dev, "<--- end FTQ dump --->\n");
6526         netdev_err(dev, "<--- start TBDC dump --->\n");
6527         netdev_err(dev, "TBDC free cnt: %ld\n",
6528                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6529         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6530         for (i = 0; i < 0x20; i++) {
6531                 int j = 0;
6532
6533                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6534                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6535                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6536                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6537                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6538                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6539                         j++;
6540
6541                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6542                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6543                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6544                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6545                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6546                            bdidx >> 24, (valid >> 8) & 0x0ff);
6547         }
6548         netdev_err(dev, "<--- end TBDC dump --->\n");
6549 }
6550
6551 static void
6552 bnx2_dump_state(struct bnx2 *bp)
6553 {
6554         struct net_device *dev = bp->dev;
6555         u32 val1, val2;
6556
6557         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6558         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6559                    atomic_read(&bp->intr_sem), val1);
6560         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6561         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6562         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6563         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6564                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6565                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6566         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6567                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6568         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6569                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6570         if (bp->flags & BNX2_FLAG_USING_MSIX)
6571                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6572                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6573 }
6574
6575 static void
6576 bnx2_tx_timeout(struct net_device *dev)
6577 {
6578         struct bnx2 *bp = netdev_priv(dev);
6579
6580         bnx2_dump_ftq(bp);
6581         bnx2_dump_state(bp);
6582         bnx2_dump_mcp_state(bp);
6583
6584         /* This allows the netif to be shutdown gracefully before resetting */
6585         schedule_work(&bp->reset_task);
6586 }
6587
6588 /* Called with netif_tx_lock.
6589  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6590  * netif_wake_queue().
6591  */
6592 static netdev_tx_t
6593 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6594 {
6595         struct bnx2 *bp = netdev_priv(dev);
6596         dma_addr_t mapping;
6597         struct bnx2_tx_bd *txbd;
6598         struct bnx2_sw_tx_bd *tx_buf;
6599         u32 len, vlan_tag_flags, last_frag, mss;
6600         u16 prod, ring_prod;
6601         int i;
6602         struct bnx2_napi *bnapi;
6603         struct bnx2_tx_ring_info *txr;
6604         struct netdev_queue *txq;
6605
6606         /*  Determine which tx ring we will be placed on */
6607         i = skb_get_queue_mapping(skb);
6608         bnapi = &bp->bnx2_napi[i];
6609         txr = &bnapi->tx_ring;
6610         txq = netdev_get_tx_queue(dev, i);
6611
6612         if (unlikely(bnx2_tx_avail(bp, txr) <
6613             (skb_shinfo(skb)->nr_frags + 1))) {
6614                 netif_tx_stop_queue(txq);
6615                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6616
6617                 return NETDEV_TX_BUSY;
6618         }
6619         len = skb_headlen(skb);
6620         prod = txr->tx_prod;
6621         ring_prod = BNX2_TX_RING_IDX(prod);
6622
6623         vlan_tag_flags = 0;
6624         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6625                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6626         }
6627
6628         if (skb_vlan_tag_present(skb)) {
6629                 vlan_tag_flags |=
6630                         (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6631         }
6632
6633         if ((mss = skb_shinfo(skb)->gso_size)) {
6634                 u32 tcp_opt_len;
6635                 struct iphdr *iph;
6636
6637                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6638
6639                 tcp_opt_len = tcp_optlen(skb);
6640
6641                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6642                         u32 tcp_off = skb_transport_offset(skb) -
6643                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6644
6645                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6646                                           TX_BD_FLAGS_SW_FLAGS;
6647                         if (likely(tcp_off == 0))
6648                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6649                         else {
6650                                 tcp_off >>= 3;
6651                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6652                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6653                                                   ((tcp_off & 0x10) <<
6654                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6655                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6656                         }
6657                 } else {
6658                         iph = ip_hdr(skb);
6659                         if (tcp_opt_len || (iph->ihl > 5)) {
6660                                 vlan_tag_flags |= ((iph->ihl - 5) +
6661                                                    (tcp_opt_len >> 2)) << 8;
6662                         }
6663                 }
6664         } else
6665                 mss = 0;
6666
6667         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6668         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6669                 dev_kfree_skb_any(skb);
6670                 return NETDEV_TX_OK;
6671         }
6672
6673         tx_buf = &txr->tx_buf_ring[ring_prod];
6674         tx_buf->skb = skb;
6675         dma_unmap_addr_set(tx_buf, mapping, mapping);
6676
6677         txbd = &txr->tx_desc_ring[ring_prod];
6678
6679         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6680         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6681         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6682         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6683
6684         last_frag = skb_shinfo(skb)->nr_frags;
6685         tx_buf->nr_frags = last_frag;
6686         tx_buf->is_gso = skb_is_gso(skb);
6687
6688         for (i = 0; i < last_frag; i++) {
6689                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6690
6691                 prod = BNX2_NEXT_TX_BD(prod);
6692                 ring_prod = BNX2_TX_RING_IDX(prod);
6693                 txbd = &txr->tx_desc_ring[ring_prod];
6694
6695                 len = skb_frag_size(frag);
6696                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6697                                            DMA_TO_DEVICE);
6698                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6699                         goto dma_error;
6700                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6701                                    mapping);
6702
6703                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6704                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6705                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6706                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6707
6708         }
6709         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6710
6711         /* Sync BD data before updating TX mailbox */
6712         wmb();
6713
6714         netdev_tx_sent_queue(txq, skb->len);
6715
6716         prod = BNX2_NEXT_TX_BD(prod);
6717         txr->tx_prod_bseq += skb->len;
6718
6719         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6720         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6721
6722         mmiowb();
6723
6724         txr->tx_prod = prod;
6725
6726         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6727                 netif_tx_stop_queue(txq);
6728
6729                 /* netif_tx_stop_queue() must be done before checking
6730                  * tx index in bnx2_tx_avail() below, because in
6731                  * bnx2_tx_int(), we update tx index before checking for
6732                  * netif_tx_queue_stopped().
6733                  */
6734                 smp_mb();
6735                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6736                         netif_tx_wake_queue(txq);
6737         }
6738
6739         return NETDEV_TX_OK;
6740 dma_error:
6741         /* save value of frag that failed */
6742         last_frag = i;
6743
6744         /* start back at beginning and unmap skb */
6745         prod = txr->tx_prod;
6746         ring_prod = BNX2_TX_RING_IDX(prod);
6747         tx_buf = &txr->tx_buf_ring[ring_prod];
6748         tx_buf->skb = NULL;
6749         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6750                          skb_headlen(skb), PCI_DMA_TODEVICE);
6751
6752         /* unmap remaining mapped pages */
6753         for (i = 0; i < last_frag; i++) {
6754                 prod = BNX2_NEXT_TX_BD(prod);
6755                 ring_prod = BNX2_TX_RING_IDX(prod);
6756                 tx_buf = &txr->tx_buf_ring[ring_prod];
6757                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6758                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6759                                PCI_DMA_TODEVICE);
6760         }
6761
6762         dev_kfree_skb_any(skb);
6763         return NETDEV_TX_OK;
6764 }
6765
6766 /* Called with rtnl_lock */
6767 static int
6768 bnx2_close(struct net_device *dev)
6769 {
6770         struct bnx2 *bp = netdev_priv(dev);
6771
6772         bnx2_disable_int_sync(bp);
6773         bnx2_napi_disable(bp);
6774         netif_tx_disable(dev);
6775         del_timer_sync(&bp->timer);
6776         bnx2_shutdown_chip(bp);
6777         bnx2_free_irq(bp);
6778         bnx2_free_skbs(bp);
6779         bnx2_free_mem(bp);
6780         bnx2_del_napi(bp);
6781         bp->link_up = 0;
6782         netif_carrier_off(bp->dev);
6783         return 0;
6784 }
6785
6786 static void
6787 bnx2_save_stats(struct bnx2 *bp)
6788 {
6789         u32 *hw_stats = (u32 *) bp->stats_blk;
6790         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6791         int i;
6792
6793         /* The 1st 10 counters are 64-bit counters */
6794         for (i = 0; i < 20; i += 2) {
6795                 u32 hi;
6796                 u64 lo;
6797
6798                 hi = temp_stats[i] + hw_stats[i];
6799                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6800                 if (lo > 0xffffffff)
6801                         hi++;
6802                 temp_stats[i] = hi;
6803                 temp_stats[i + 1] = lo & 0xffffffff;
6804         }
6805
6806         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6807                 temp_stats[i] += hw_stats[i];
6808 }
6809
6810 #define GET_64BIT_NET_STATS64(ctr)              \
6811         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6812
6813 #define GET_64BIT_NET_STATS(ctr)                                \
6814         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6815         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6816
6817 #define GET_32BIT_NET_STATS(ctr)                                \
6818         (unsigned long) (bp->stats_blk->ctr +                   \
6819                          bp->temp_stats_blk->ctr)
6820
6821 static void
6822 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6823 {
6824         struct bnx2 *bp = netdev_priv(dev);
6825
6826         if (!bp->stats_blk)
6827                 return;
6828
6829         net_stats->rx_packets =
6830                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6831                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6832                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6833
6834         net_stats->tx_packets =
6835                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6836                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6837                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6838
6839         net_stats->rx_bytes =
6840                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6841
6842         net_stats->tx_bytes =
6843                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6844
6845         net_stats->multicast =
6846                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6847
6848         net_stats->collisions =
6849                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6850
6851         net_stats->rx_length_errors =
6852                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6853                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6854
6855         net_stats->rx_over_errors =
6856                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6857                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6858
6859         net_stats->rx_frame_errors =
6860                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6861
6862         net_stats->rx_crc_errors =
6863                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6864
6865         net_stats->rx_errors = net_stats->rx_length_errors +
6866                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6867                 net_stats->rx_crc_errors;
6868
6869         net_stats->tx_aborted_errors =
6870                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6871                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6872
6873         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6874             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6875                 net_stats->tx_carrier_errors = 0;
6876         else {
6877                 net_stats->tx_carrier_errors =
6878                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6879         }
6880
6881         net_stats->tx_errors =
6882                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6883                 net_stats->tx_aborted_errors +
6884                 net_stats->tx_carrier_errors;
6885
6886         net_stats->rx_missed_errors =
6887                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6888                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6889                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6890
6891 }
6892
6893 /* All ethtool functions called with rtnl_lock */
6894
6895 static int
6896 bnx2_get_link_ksettings(struct net_device *dev,
6897                         struct ethtool_link_ksettings *cmd)
6898 {
6899         struct bnx2 *bp = netdev_priv(dev);
6900         int support_serdes = 0, support_copper = 0;
6901         u32 supported, advertising;
6902
6903         supported = SUPPORTED_Autoneg;
6904         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6905                 support_serdes = 1;
6906                 support_copper = 1;
6907         } else if (bp->phy_port == PORT_FIBRE)
6908                 support_serdes = 1;
6909         else
6910                 support_copper = 1;
6911
6912         if (support_serdes) {
6913                 supported |= SUPPORTED_1000baseT_Full |
6914                         SUPPORTED_FIBRE;
6915                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6916                         supported |= SUPPORTED_2500baseX_Full;
6917         }
6918         if (support_copper) {
6919                 supported |= SUPPORTED_10baseT_Half |
6920                         SUPPORTED_10baseT_Full |
6921                         SUPPORTED_100baseT_Half |
6922                         SUPPORTED_100baseT_Full |
6923                         SUPPORTED_1000baseT_Full |
6924                         SUPPORTED_TP;
6925         }
6926
6927         spin_lock_bh(&bp->phy_lock);
6928         cmd->base.port = bp->phy_port;
6929         advertising = bp->advertising;
6930
6931         if (bp->autoneg & AUTONEG_SPEED) {
6932                 cmd->base.autoneg = AUTONEG_ENABLE;
6933         } else {
6934                 cmd->base.autoneg = AUTONEG_DISABLE;
6935         }
6936
6937         if (netif_carrier_ok(dev)) {
6938                 cmd->base.speed = bp->line_speed;
6939                 cmd->base.duplex = bp->duplex;
6940                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6941                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6942                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6943                         else
6944                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6945                 }
6946         }
6947         else {
6948                 cmd->base.speed = SPEED_UNKNOWN;
6949                 cmd->base.duplex = DUPLEX_UNKNOWN;
6950         }
6951         spin_unlock_bh(&bp->phy_lock);
6952
6953         cmd->base.phy_address = bp->phy_addr;
6954
6955         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6956                                                 supported);
6957         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6958                                                 advertising);
6959
6960         return 0;
6961 }
6962
6963 static int
6964 bnx2_set_link_ksettings(struct net_device *dev,
6965                         const struct ethtool_link_ksettings *cmd)
6966 {
6967         struct bnx2 *bp = netdev_priv(dev);
6968         u8 autoneg = bp->autoneg;
6969         u8 req_duplex = bp->req_duplex;
6970         u16 req_line_speed = bp->req_line_speed;
6971         u32 advertising = bp->advertising;
6972         int err = -EINVAL;
6973
6974         spin_lock_bh(&bp->phy_lock);
6975
6976         if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6977                 goto err_out_unlock;
6978
6979         if (cmd->base.port != bp->phy_port &&
6980             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6981                 goto err_out_unlock;
6982
6983         /* If device is down, we can store the settings only if the user
6984          * is setting the currently active port.
6985          */
6986         if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6987                 goto err_out_unlock;
6988
6989         if (cmd->base.autoneg == AUTONEG_ENABLE) {
6990                 autoneg |= AUTONEG_SPEED;
6991
6992                 ethtool_convert_link_mode_to_legacy_u32(
6993                         &advertising, cmd->link_modes.advertising);
6994
6995                 if (cmd->base.port == PORT_TP) {
6996                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6997                         if (!advertising)
6998                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6999                 } else {
7000                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7001                         if (!advertising)
7002                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7003                 }
7004                 advertising |= ADVERTISED_Autoneg;
7005         }
7006         else {
7007                 u32 speed = cmd->base.speed;
7008
7009                 if (cmd->base.port == PORT_FIBRE) {
7010                         if ((speed != SPEED_1000 &&
7011                              speed != SPEED_2500) ||
7012                             (cmd->base.duplex != DUPLEX_FULL))
7013                                 goto err_out_unlock;
7014
7015                         if (speed == SPEED_2500 &&
7016                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7017                                 goto err_out_unlock;
7018                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7019                         goto err_out_unlock;
7020
7021                 autoneg &= ~AUTONEG_SPEED;
7022                 req_line_speed = speed;
7023                 req_duplex = cmd->base.duplex;
7024                 advertising = 0;
7025         }
7026
7027         bp->autoneg = autoneg;
7028         bp->advertising = advertising;
7029         bp->req_line_speed = req_line_speed;
7030         bp->req_duplex = req_duplex;
7031
7032         err = 0;
7033         /* If device is down, the new settings will be picked up when it is
7034          * brought up.
7035          */
7036         if (netif_running(dev))
7037                 err = bnx2_setup_phy(bp, cmd->base.port);
7038
7039 err_out_unlock:
7040         spin_unlock_bh(&bp->phy_lock);
7041
7042         return err;
7043 }
7044
7045 static void
7046 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7047 {
7048         struct bnx2 *bp = netdev_priv(dev);
7049
7050         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7051         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7052         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7053         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7054 }
7055
7056 #define BNX2_REGDUMP_LEN                (32 * 1024)
7057
7058 static int
7059 bnx2_get_regs_len(struct net_device *dev)
7060 {
7061         return BNX2_REGDUMP_LEN;
7062 }
7063
7064 static void
7065 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7066 {
7067         u32 *p = _p, i, offset;
7068         u8 *orig_p = _p;
7069         struct bnx2 *bp = netdev_priv(dev);
7070         static const u32 reg_boundaries[] = {
7071                 0x0000, 0x0098, 0x0400, 0x045c,
7072                 0x0800, 0x0880, 0x0c00, 0x0c10,
7073                 0x0c30, 0x0d08, 0x1000, 0x101c,
7074                 0x1040, 0x1048, 0x1080, 0x10a4,
7075                 0x1400, 0x1490, 0x1498, 0x14f0,
7076                 0x1500, 0x155c, 0x1580, 0x15dc,
7077                 0x1600, 0x1658, 0x1680, 0x16d8,
7078                 0x1800, 0x1820, 0x1840, 0x1854,
7079                 0x1880, 0x1894, 0x1900, 0x1984,
7080                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7081                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7082                 0x2000, 0x2030, 0x23c0, 0x2400,
7083                 0x2800, 0x2820, 0x2830, 0x2850,
7084                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7085                 0x3c00, 0x3c94, 0x4000, 0x4010,
7086                 0x4080, 0x4090, 0x43c0, 0x4458,
7087                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7088                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7089                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7090                 0x5fc0, 0x6000, 0x6400, 0x6428,
7091                 0x6800, 0x6848, 0x684c, 0x6860,
7092                 0x6888, 0x6910, 0x8000
7093         };
7094
7095         regs->version = 0;
7096
7097         memset(p, 0, BNX2_REGDUMP_LEN);
7098
7099         if (!netif_running(bp->dev))
7100                 return;
7101
7102         i = 0;
7103         offset = reg_boundaries[0];
7104         p += offset;
7105         while (offset < BNX2_REGDUMP_LEN) {
7106                 *p++ = BNX2_RD(bp, offset);
7107                 offset += 4;
7108                 if (offset == reg_boundaries[i + 1]) {
7109                         offset = reg_boundaries[i + 2];
7110                         p = (u32 *) (orig_p + offset);
7111                         i += 2;
7112                 }
7113         }
7114 }
7115
7116 static void
7117 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7118 {
7119         struct bnx2 *bp = netdev_priv(dev);
7120
7121         if (bp->flags & BNX2_FLAG_NO_WOL) {
7122                 wol->supported = 0;
7123                 wol->wolopts = 0;
7124         }
7125         else {
7126                 wol->supported = WAKE_MAGIC;
7127                 if (bp->wol)
7128                         wol->wolopts = WAKE_MAGIC;
7129                 else
7130                         wol->wolopts = 0;
7131         }
7132         memset(&wol->sopass, 0, sizeof(wol->sopass));
7133 }
7134
7135 static int
7136 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7137 {
7138         struct bnx2 *bp = netdev_priv(dev);
7139
7140         if (wol->wolopts & ~WAKE_MAGIC)
7141                 return -EINVAL;
7142
7143         if (wol->wolopts & WAKE_MAGIC) {
7144                 if (bp->flags & BNX2_FLAG_NO_WOL)
7145                         return -EINVAL;
7146
7147                 bp->wol = 1;
7148         }
7149         else {
7150                 bp->wol = 0;
7151         }
7152
7153         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7154
7155         return 0;
7156 }
7157
7158 static int
7159 bnx2_nway_reset(struct net_device *dev)
7160 {
7161         struct bnx2 *bp = netdev_priv(dev);
7162         u32 bmcr;
7163
7164         if (!netif_running(dev))
7165                 return -EAGAIN;
7166
7167         if (!(bp->autoneg & AUTONEG_SPEED)) {
7168                 return -EINVAL;
7169         }
7170
7171         spin_lock_bh(&bp->phy_lock);
7172
7173         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7174                 int rc;
7175
7176                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7177                 spin_unlock_bh(&bp->phy_lock);
7178                 return rc;
7179         }
7180
7181         /* Force a link down visible on the other side */
7182         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7183                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7184                 spin_unlock_bh(&bp->phy_lock);
7185
7186                 msleep(20);
7187
7188                 spin_lock_bh(&bp->phy_lock);
7189
7190                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7191                 bp->serdes_an_pending = 1;
7192                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7193         }
7194
7195         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7196         bmcr &= ~BMCR_LOOPBACK;
7197         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7198
7199         spin_unlock_bh(&bp->phy_lock);
7200
7201         return 0;
7202 }
7203
7204 static u32
7205 bnx2_get_link(struct net_device *dev)
7206 {
7207         struct bnx2 *bp = netdev_priv(dev);
7208
7209         return bp->link_up;
7210 }
7211
7212 static int
7213 bnx2_get_eeprom_len(struct net_device *dev)
7214 {
7215         struct bnx2 *bp = netdev_priv(dev);
7216
7217         if (!bp->flash_info)
7218                 return 0;
7219
7220         return (int) bp->flash_size;
7221 }
7222
7223 static int
7224 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7225                 u8 *eebuf)
7226 {
7227         struct bnx2 *bp = netdev_priv(dev);
7228         int rc;
7229
7230         /* parameters already validated in ethtool_get_eeprom */
7231
7232         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7233
7234         return rc;
7235 }
7236
7237 static int
7238 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7239                 u8 *eebuf)
7240 {
7241         struct bnx2 *bp = netdev_priv(dev);
7242         int rc;
7243
7244         /* parameters already validated in ethtool_set_eeprom */
7245
7246         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7247
7248         return rc;
7249 }
7250
7251 static int
7252 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7253 {
7254         struct bnx2 *bp = netdev_priv(dev);
7255
7256         memset(coal, 0, sizeof(struct ethtool_coalesce));
7257
7258         coal->rx_coalesce_usecs = bp->rx_ticks;
7259         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7260         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7261         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7262
7263         coal->tx_coalesce_usecs = bp->tx_ticks;
7264         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7265         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7266         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7267
7268         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7269
7270         return 0;
7271 }
7272
7273 static int
7274 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7275 {
7276         struct bnx2 *bp = netdev_priv(dev);
7277
7278         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7279         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7280
7281         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7282         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7283
7284         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7285         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7286
7287         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7288         if (bp->rx_quick_cons_trip_int > 0xff)
7289                 bp->rx_quick_cons_trip_int = 0xff;
7290
7291         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7292         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7293
7294         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7295         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7296
7297         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7298         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7299
7300         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7301         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7302                 0xff;
7303
7304         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7305         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7306                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7307                         bp->stats_ticks = USEC_PER_SEC;
7308         }
7309         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7310                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7311         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7312
7313         if (netif_running(bp->dev)) {
7314                 bnx2_netif_stop(bp, true);
7315                 bnx2_init_nic(bp, 0);
7316                 bnx2_netif_start(bp, true);
7317         }
7318
7319         return 0;
7320 }
7321
7322 static void
7323 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7324 {
7325         struct bnx2 *bp = netdev_priv(dev);
7326
7327         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7328         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7329
7330         ering->rx_pending = bp->rx_ring_size;
7331         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7332
7333         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7334         ering->tx_pending = bp->tx_ring_size;
7335 }
7336
7337 static int
7338 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7339 {
7340         if (netif_running(bp->dev)) {
7341                 /* Reset will erase chipset stats; save them */
7342                 bnx2_save_stats(bp);
7343
7344                 bnx2_netif_stop(bp, true);
7345                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7346                 if (reset_irq) {
7347                         bnx2_free_irq(bp);
7348                         bnx2_del_napi(bp);
7349                 } else {
7350                         __bnx2_free_irq(bp);
7351                 }
7352                 bnx2_free_skbs(bp);
7353                 bnx2_free_mem(bp);
7354         }
7355
7356         bnx2_set_rx_ring_size(bp, rx);
7357         bp->tx_ring_size = tx;
7358
7359         if (netif_running(bp->dev)) {
7360                 int rc = 0;
7361
7362                 if (reset_irq) {
7363                         rc = bnx2_setup_int_mode(bp, disable_msi);
7364                         bnx2_init_napi(bp);
7365                 }
7366
7367                 if (!rc)
7368                         rc = bnx2_alloc_mem(bp);
7369
7370                 if (!rc)
7371                         rc = bnx2_request_irq(bp);
7372
7373                 if (!rc)
7374                         rc = bnx2_init_nic(bp, 0);
7375
7376                 if (rc) {
7377                         bnx2_napi_enable(bp);
7378                         dev_close(bp->dev);
7379                         return rc;
7380                 }
7381 #ifdef BCM_CNIC
7382                 mutex_lock(&bp->cnic_lock);
7383                 /* Let cnic know about the new status block. */
7384                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7385                         bnx2_setup_cnic_irq_info(bp);
7386                 mutex_unlock(&bp->cnic_lock);
7387 #endif
7388                 bnx2_netif_start(bp, true);
7389         }
7390         return 0;
7391 }
7392
7393 static int
7394 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7395 {
7396         struct bnx2 *bp = netdev_priv(dev);
7397         int rc;
7398
7399         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7400                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7401                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7402
7403                 return -EINVAL;
7404         }
7405         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7406                                    false);
7407         return rc;
7408 }
7409
7410 static void
7411 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7412 {
7413         struct bnx2 *bp = netdev_priv(dev);
7414
7415         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7416         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7417         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7418 }
7419
7420 static int
7421 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7422 {
7423         struct bnx2 *bp = netdev_priv(dev);
7424
7425         bp->req_flow_ctrl = 0;
7426         if (epause->rx_pause)
7427                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7428         if (epause->tx_pause)
7429                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7430
7431         if (epause->autoneg) {
7432                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7433         }
7434         else {
7435                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7436         }
7437
7438         if (netif_running(dev)) {
7439                 spin_lock_bh(&bp->phy_lock);
7440                 bnx2_setup_phy(bp, bp->phy_port);
7441                 spin_unlock_bh(&bp->phy_lock);
7442         }
7443
7444         return 0;
7445 }
7446
7447 static struct {
7448         char string[ETH_GSTRING_LEN];
7449 } bnx2_stats_str_arr[] = {
7450         { "rx_bytes" },
7451         { "rx_error_bytes" },
7452         { "tx_bytes" },
7453         { "tx_error_bytes" },
7454         { "rx_ucast_packets" },
7455         { "rx_mcast_packets" },
7456         { "rx_bcast_packets" },
7457         { "tx_ucast_packets" },
7458         { "tx_mcast_packets" },
7459         { "tx_bcast_packets" },
7460         { "tx_mac_errors" },
7461         { "tx_carrier_errors" },
7462         { "rx_crc_errors" },
7463         { "rx_align_errors" },
7464         { "tx_single_collisions" },
7465         { "tx_multi_collisions" },
7466         { "tx_deferred" },
7467         { "tx_excess_collisions" },
7468         { "tx_late_collisions" },
7469         { "tx_total_collisions" },
7470         { "rx_fragments" },
7471         { "rx_jabbers" },
7472         { "rx_undersize_packets" },
7473         { "rx_oversize_packets" },
7474         { "rx_64_byte_packets" },
7475         { "rx_65_to_127_byte_packets" },
7476         { "rx_128_to_255_byte_packets" },
7477         { "rx_256_to_511_byte_packets" },
7478         { "rx_512_to_1023_byte_packets" },
7479         { "rx_1024_to_1522_byte_packets" },
7480         { "rx_1523_to_9022_byte_packets" },
7481         { "tx_64_byte_packets" },
7482         { "tx_65_to_127_byte_packets" },
7483         { "tx_128_to_255_byte_packets" },
7484         { "tx_256_to_511_byte_packets" },
7485         { "tx_512_to_1023_byte_packets" },
7486         { "tx_1024_to_1522_byte_packets" },
7487         { "tx_1523_to_9022_byte_packets" },
7488         { "rx_xon_frames" },
7489         { "rx_xoff_frames" },
7490         { "tx_xon_frames" },
7491         { "tx_xoff_frames" },
7492         { "rx_mac_ctrl_frames" },
7493         { "rx_filtered_packets" },
7494         { "rx_ftq_discards" },
7495         { "rx_discards" },
7496         { "rx_fw_discards" },
7497 };
7498
7499 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7500
7501 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7502
7503 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7504     STATS_OFFSET32(stat_IfHCInOctets_hi),
7505     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7506     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7507     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7508     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7509     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7510     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7511     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7514     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7515     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7516     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7517     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7518     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7519     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7520     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7521     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7522     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7523     STATS_OFFSET32(stat_EtherStatsCollisions),
7524     STATS_OFFSET32(stat_EtherStatsFragments),
7525     STATS_OFFSET32(stat_EtherStatsJabbers),
7526     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7527     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7528     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7529     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7530     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7542     STATS_OFFSET32(stat_XonPauseFramesReceived),
7543     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7544     STATS_OFFSET32(stat_OutXonSent),
7545     STATS_OFFSET32(stat_OutXoffSent),
7546     STATS_OFFSET32(stat_MacControlFramesReceived),
7547     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7548     STATS_OFFSET32(stat_IfInFTQDiscards),
7549     STATS_OFFSET32(stat_IfInMBUFDiscards),
7550     STATS_OFFSET32(stat_FwRxDrop),
7551 };
7552
7553 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7554  * skipped because of errata.
7555  */
7556 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7557         8,0,8,8,8,8,8,8,8,8,
7558         4,0,4,4,4,4,4,4,4,4,
7559         4,4,4,4,4,4,4,4,4,4,
7560         4,4,4,4,4,4,4,4,4,4,
7561         4,4,4,4,4,4,4,
7562 };
7563
7564 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7565         8,0,8,8,8,8,8,8,8,8,
7566         4,4,4,4,4,4,4,4,4,4,
7567         4,4,4,4,4,4,4,4,4,4,
7568         4,4,4,4,4,4,4,4,4,4,
7569         4,4,4,4,4,4,4,
7570 };
7571
7572 #define BNX2_NUM_TESTS 6
7573
7574 static struct {
7575         char string[ETH_GSTRING_LEN];
7576 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7577         { "register_test (offline)" },
7578         { "memory_test (offline)" },
7579         { "loopback_test (offline)" },
7580         { "nvram_test (online)" },
7581         { "interrupt_test (online)" },
7582         { "link_test (online)" },
7583 };
7584
7585 static int
7586 bnx2_get_sset_count(struct net_device *dev, int sset)
7587 {
7588         switch (sset) {
7589         case ETH_SS_TEST:
7590                 return BNX2_NUM_TESTS;
7591         case ETH_SS_STATS:
7592                 return BNX2_NUM_STATS;
7593         default:
7594                 return -EOPNOTSUPP;
7595         }
7596 }
7597
7598 static void
7599 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7600 {
7601         struct bnx2 *bp = netdev_priv(dev);
7602
7603         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7604         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7605                 int i;
7606
7607                 bnx2_netif_stop(bp, true);
7608                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7609                 bnx2_free_skbs(bp);
7610
7611                 if (bnx2_test_registers(bp) != 0) {
7612                         buf[0] = 1;
7613                         etest->flags |= ETH_TEST_FL_FAILED;
7614                 }
7615                 if (bnx2_test_memory(bp) != 0) {
7616                         buf[1] = 1;
7617                         etest->flags |= ETH_TEST_FL_FAILED;
7618                 }
7619                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7620                         etest->flags |= ETH_TEST_FL_FAILED;
7621
7622                 if (!netif_running(bp->dev))
7623                         bnx2_shutdown_chip(bp);
7624                 else {
7625                         bnx2_init_nic(bp, 1);
7626                         bnx2_netif_start(bp, true);
7627                 }
7628
7629                 /* wait for link up */
7630                 for (i = 0; i < 7; i++) {
7631                         if (bp->link_up)
7632                                 break;
7633                         msleep_interruptible(1000);
7634                 }
7635         }
7636
7637         if (bnx2_test_nvram(bp) != 0) {
7638                 buf[3] = 1;
7639                 etest->flags |= ETH_TEST_FL_FAILED;
7640         }
7641         if (bnx2_test_intr(bp) != 0) {
7642                 buf[4] = 1;
7643                 etest->flags |= ETH_TEST_FL_FAILED;
7644         }
7645
7646         if (bnx2_test_link(bp) != 0) {
7647                 buf[5] = 1;
7648                 etest->flags |= ETH_TEST_FL_FAILED;
7649
7650         }
7651 }
7652
7653 static void
7654 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7655 {
7656         switch (stringset) {
7657         case ETH_SS_STATS:
7658                 memcpy(buf, bnx2_stats_str_arr,
7659                         sizeof(bnx2_stats_str_arr));
7660                 break;
7661         case ETH_SS_TEST:
7662                 memcpy(buf, bnx2_tests_str_arr,
7663                         sizeof(bnx2_tests_str_arr));
7664                 break;
7665         }
7666 }
7667
7668 static void
7669 bnx2_get_ethtool_stats(struct net_device *dev,
7670                 struct ethtool_stats *stats, u64 *buf)
7671 {
7672         struct bnx2 *bp = netdev_priv(dev);
7673         int i;
7674         u32 *hw_stats = (u32 *) bp->stats_blk;
7675         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7676         u8 *stats_len_arr = NULL;
7677
7678         if (!hw_stats) {
7679                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7680                 return;
7681         }
7682
7683         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7684             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7685             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7686             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7687                 stats_len_arr = bnx2_5706_stats_len_arr;
7688         else
7689                 stats_len_arr = bnx2_5708_stats_len_arr;
7690
7691         for (i = 0; i < BNX2_NUM_STATS; i++) {
7692                 unsigned long offset;
7693
7694                 if (stats_len_arr[i] == 0) {
7695                         /* skip this counter */
7696                         buf[i] = 0;
7697                         continue;
7698                 }
7699
7700                 offset = bnx2_stats_offset_arr[i];
7701                 if (stats_len_arr[i] == 4) {
7702                         /* 4-byte counter */
7703                         buf[i] = (u64) *(hw_stats + offset) +
7704                                  *(temp_stats + offset);
7705                         continue;
7706                 }
7707                 /* 8-byte counter */
7708                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7709                          *(hw_stats + offset + 1) +
7710                          (((u64) *(temp_stats + offset)) << 32) +
7711                          *(temp_stats + offset + 1);
7712         }
7713 }
7714
7715 static int
7716 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7717 {
7718         struct bnx2 *bp = netdev_priv(dev);
7719
7720         switch (state) {
7721         case ETHTOOL_ID_ACTIVE:
7722                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7723                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7724                 return 1;       /* cycle on/off once per second */
7725
7726         case ETHTOOL_ID_ON:
7727                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7728                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7729                         BNX2_EMAC_LED_100MB_OVERRIDE |
7730                         BNX2_EMAC_LED_10MB_OVERRIDE |
7731                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7732                         BNX2_EMAC_LED_TRAFFIC);
7733                 break;
7734
7735         case ETHTOOL_ID_OFF:
7736                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7737                 break;
7738
7739         case ETHTOOL_ID_INACTIVE:
7740                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7741                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7742                 break;
7743         }
7744
7745         return 0;
7746 }
7747
7748 static int
7749 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7750 {
7751         struct bnx2 *bp = netdev_priv(dev);
7752
7753         /* TSO with VLAN tag won't work with current firmware */
7754         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7755                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7756         else
7757                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7758
7759         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7760             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7761             netif_running(dev)) {
7762                 bnx2_netif_stop(bp, false);
7763                 dev->features = features;
7764                 bnx2_set_rx_mode(dev);
7765                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7766                 bnx2_netif_start(bp, false);
7767                 return 1;
7768         }
7769
7770         return 0;
7771 }
7772
7773 static void bnx2_get_channels(struct net_device *dev,
7774                               struct ethtool_channels *channels)
7775 {
7776         struct bnx2 *bp = netdev_priv(dev);
7777         u32 max_rx_rings = 1;
7778         u32 max_tx_rings = 1;
7779
7780         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7781                 max_rx_rings = RX_MAX_RINGS;
7782                 max_tx_rings = TX_MAX_RINGS;
7783         }
7784
7785         channels->max_rx = max_rx_rings;
7786         channels->max_tx = max_tx_rings;
7787         channels->max_other = 0;
7788         channels->max_combined = 0;
7789         channels->rx_count = bp->num_rx_rings;
7790         channels->tx_count = bp->num_tx_rings;
7791         channels->other_count = 0;
7792         channels->combined_count = 0;
7793 }
7794
7795 static int bnx2_set_channels(struct net_device *dev,
7796                               struct ethtool_channels *channels)
7797 {
7798         struct bnx2 *bp = netdev_priv(dev);
7799         u32 max_rx_rings = 1;
7800         u32 max_tx_rings = 1;
7801         int rc = 0;
7802
7803         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7804                 max_rx_rings = RX_MAX_RINGS;
7805                 max_tx_rings = TX_MAX_RINGS;
7806         }
7807         if (channels->rx_count > max_rx_rings ||
7808             channels->tx_count > max_tx_rings)
7809                 return -EINVAL;
7810
7811         bp->num_req_rx_rings = channels->rx_count;
7812         bp->num_req_tx_rings = channels->tx_count;
7813
7814         if (netif_running(dev))
7815                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7816                                            bp->tx_ring_size, true);
7817
7818         return rc;
7819 }
7820
7821 static const struct ethtool_ops bnx2_ethtool_ops = {
7822         .get_drvinfo            = bnx2_get_drvinfo,
7823         .get_regs_len           = bnx2_get_regs_len,
7824         .get_regs               = bnx2_get_regs,
7825         .get_wol                = bnx2_get_wol,
7826         .set_wol                = bnx2_set_wol,
7827         .nway_reset             = bnx2_nway_reset,
7828         .get_link               = bnx2_get_link,
7829         .get_eeprom_len         = bnx2_get_eeprom_len,
7830         .get_eeprom             = bnx2_get_eeprom,
7831         .set_eeprom             = bnx2_set_eeprom,
7832         .get_coalesce           = bnx2_get_coalesce,
7833         .set_coalesce           = bnx2_set_coalesce,
7834         .get_ringparam          = bnx2_get_ringparam,
7835         .set_ringparam          = bnx2_set_ringparam,
7836         .get_pauseparam         = bnx2_get_pauseparam,
7837         .set_pauseparam         = bnx2_set_pauseparam,
7838         .self_test              = bnx2_self_test,
7839         .get_strings            = bnx2_get_strings,
7840         .set_phys_id            = bnx2_set_phys_id,
7841         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7842         .get_sset_count         = bnx2_get_sset_count,
7843         .get_channels           = bnx2_get_channels,
7844         .set_channels           = bnx2_set_channels,
7845         .get_link_ksettings     = bnx2_get_link_ksettings,
7846         .set_link_ksettings     = bnx2_set_link_ksettings,
7847 };
7848
7849 /* Called with rtnl_lock */
7850 static int
7851 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7852 {
7853         struct mii_ioctl_data *data = if_mii(ifr);
7854         struct bnx2 *bp = netdev_priv(dev);
7855         int err;
7856
7857         switch(cmd) {
7858         case SIOCGMIIPHY:
7859                 data->phy_id = bp->phy_addr;
7860
7861                 /* fallthru */
7862         case SIOCGMIIREG: {
7863                 u32 mii_regval;
7864
7865                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7866                         return -EOPNOTSUPP;
7867
7868                 if (!netif_running(dev))
7869                         return -EAGAIN;
7870
7871                 spin_lock_bh(&bp->phy_lock);
7872                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7873                 spin_unlock_bh(&bp->phy_lock);
7874
7875                 data->val_out = mii_regval;
7876
7877                 return err;
7878         }
7879
7880         case SIOCSMIIREG:
7881                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7882                         return -EOPNOTSUPP;
7883
7884                 if (!netif_running(dev))
7885                         return -EAGAIN;
7886
7887                 spin_lock_bh(&bp->phy_lock);
7888                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7889                 spin_unlock_bh(&bp->phy_lock);
7890
7891                 return err;
7892
7893         default:
7894                 /* do nothing */
7895                 break;
7896         }
7897         return -EOPNOTSUPP;
7898 }
7899
7900 /* Called with rtnl_lock */
7901 static int
7902 bnx2_change_mac_addr(struct net_device *dev, void *p)
7903 {
7904         struct sockaddr *addr = p;
7905         struct bnx2 *bp = netdev_priv(dev);
7906
7907         if (!is_valid_ether_addr(addr->sa_data))
7908                 return -EADDRNOTAVAIL;
7909
7910         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7911         if (netif_running(dev))
7912                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7913
7914         return 0;
7915 }
7916
7917 /* Called with rtnl_lock */
7918 static int
7919 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7920 {
7921         struct bnx2 *bp = netdev_priv(dev);
7922
7923         dev->mtu = new_mtu;
7924         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7925                                      false);
7926 }
7927
7928 #ifdef CONFIG_NET_POLL_CONTROLLER
7929 static void
7930 poll_bnx2(struct net_device *dev)
7931 {
7932         struct bnx2 *bp = netdev_priv(dev);
7933         int i;
7934
7935         for (i = 0; i < bp->irq_nvecs; i++) {
7936                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7937
7938                 disable_irq(irq->vector);
7939                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7940                 enable_irq(irq->vector);
7941         }
7942 }
7943 #endif
7944
7945 static void
7946 bnx2_get_5709_media(struct bnx2 *bp)
7947 {
7948         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7949         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7950         u32 strap;
7951
7952         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7953                 return;
7954         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7955                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7956                 return;
7957         }
7958
7959         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7960                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7961         else
7962                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7963
7964         if (bp->func == 0) {
7965                 switch (strap) {
7966                 case 0x4:
7967                 case 0x5:
7968                 case 0x6:
7969                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7970                         return;
7971                 }
7972         } else {
7973                 switch (strap) {
7974                 case 0x1:
7975                 case 0x2:
7976                 case 0x4:
7977                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7978                         return;
7979                 }
7980         }
7981 }
7982
7983 static void
7984 bnx2_get_pci_speed(struct bnx2 *bp)
7985 {
7986         u32 reg;
7987
7988         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7989         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7990                 u32 clkreg;
7991
7992                 bp->flags |= BNX2_FLAG_PCIX;
7993
7994                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7995
7996                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7997                 switch (clkreg) {
7998                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7999                         bp->bus_speed_mhz = 133;
8000                         break;
8001
8002                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8003                         bp->bus_speed_mhz = 100;
8004                         break;
8005
8006                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8007                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8008                         bp->bus_speed_mhz = 66;
8009                         break;
8010
8011                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8012                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8013                         bp->bus_speed_mhz = 50;
8014                         break;
8015
8016                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8017                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8018                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8019                         bp->bus_speed_mhz = 33;
8020                         break;
8021                 }
8022         }
8023         else {
8024                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8025                         bp->bus_speed_mhz = 66;
8026                 else
8027                         bp->bus_speed_mhz = 33;
8028         }
8029
8030         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8031                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8032
8033 }
8034
8035 static void
8036 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8037 {
8038         int rc, i, j;
8039         u8 *data;
8040         unsigned int block_end, rosize, len;
8041
8042 #define BNX2_VPD_NVRAM_OFFSET   0x300
8043 #define BNX2_VPD_LEN            128
8044 #define BNX2_MAX_VER_SLEN       30
8045
8046         data = kmalloc(256, GFP_KERNEL);
8047         if (!data)
8048                 return;
8049
8050         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8051                              BNX2_VPD_LEN);
8052         if (rc)
8053                 goto vpd_done;
8054
8055         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8056                 data[i] = data[i + BNX2_VPD_LEN + 3];
8057                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8058                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8059                 data[i + 3] = data[i + BNX2_VPD_LEN];
8060         }
8061
8062         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8063         if (i < 0)
8064                 goto vpd_done;
8065
8066         rosize = pci_vpd_lrdt_size(&data[i]);
8067         i += PCI_VPD_LRDT_TAG_SIZE;
8068         block_end = i + rosize;
8069
8070         if (block_end > BNX2_VPD_LEN)
8071                 goto vpd_done;
8072
8073         j = pci_vpd_find_info_keyword(data, i, rosize,
8074                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8075         if (j < 0)
8076                 goto vpd_done;
8077
8078         len = pci_vpd_info_field_size(&data[j]);
8079
8080         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8081         if (j + len > block_end || len != 4 ||
8082             memcmp(&data[j], "1028", 4))
8083                 goto vpd_done;
8084
8085         j = pci_vpd_find_info_keyword(data, i, rosize,
8086                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8087         if (j < 0)
8088                 goto vpd_done;
8089
8090         len = pci_vpd_info_field_size(&data[j]);
8091
8092         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8093         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8094                 goto vpd_done;
8095
8096         memcpy(bp->fw_version, &data[j], len);
8097         bp->fw_version[len] = ' ';
8098
8099 vpd_done:
8100         kfree(data);
8101 }
8102
8103 static int
8104 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8105 {
8106         struct bnx2 *bp;
8107         int rc, i, j;
8108         u32 reg;
8109         u64 dma_mask, persist_dma_mask;
8110         int err;
8111
8112         SET_NETDEV_DEV(dev, &pdev->dev);
8113         bp = netdev_priv(dev);
8114
8115         bp->flags = 0;
8116         bp->phy_flags = 0;
8117
8118         bp->temp_stats_blk =
8119                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8120
8121         if (!bp->temp_stats_blk) {
8122                 rc = -ENOMEM;
8123                 goto err_out;
8124         }
8125
8126         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8127         rc = pci_enable_device(pdev);
8128         if (rc) {
8129                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8130                 goto err_out;
8131         }
8132
8133         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8134                 dev_err(&pdev->dev,
8135                         "Cannot find PCI device base address, aborting\n");
8136                 rc = -ENODEV;
8137                 goto err_out_disable;
8138         }
8139
8140         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8141         if (rc) {
8142                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8143                 goto err_out_disable;
8144         }
8145
8146         pci_set_master(pdev);
8147
8148         bp->pm_cap = pdev->pm_cap;
8149         if (bp->pm_cap == 0) {
8150                 dev_err(&pdev->dev,
8151                         "Cannot find power management capability, aborting\n");
8152                 rc = -EIO;
8153                 goto err_out_release;
8154         }
8155
8156         bp->dev = dev;
8157         bp->pdev = pdev;
8158
8159         spin_lock_init(&bp->phy_lock);
8160         spin_lock_init(&bp->indirect_lock);
8161 #ifdef BCM_CNIC
8162         mutex_init(&bp->cnic_lock);
8163 #endif
8164         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8165
8166         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8167                                                          TX_MAX_TSS_RINGS + 1));
8168         if (!bp->regview) {
8169                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8170                 rc = -ENOMEM;
8171                 goto err_out_release;
8172         }
8173
8174         /* Configure byte swap and enable write to the reg_window registers.
8175          * Rely on CPU to do target byte swapping on big endian systems
8176          * The chip's target access swapping will not swap all accesses
8177          */
8178         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8179                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8180                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8181
8182         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8183
8184         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8185                 if (!pci_is_pcie(pdev)) {
8186                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8187                         rc = -EIO;
8188                         goto err_out_unmap;
8189                 }
8190                 bp->flags |= BNX2_FLAG_PCIE;
8191                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8192                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8193
8194                 /* AER (Advanced Error Reporting) hooks */
8195                 err = pci_enable_pcie_error_reporting(pdev);
8196                 if (!err)
8197                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8198
8199         } else {
8200                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8201                 if (bp->pcix_cap == 0) {
8202                         dev_err(&pdev->dev,
8203                                 "Cannot find PCIX capability, aborting\n");
8204                         rc = -EIO;
8205                         goto err_out_unmap;
8206                 }
8207                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8208         }
8209
8210         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8211             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8212                 if (pdev->msix_cap)
8213                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8214         }
8215
8216         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8217             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8218                 if (pdev->msi_cap)
8219                         bp->flags |= BNX2_FLAG_MSI_CAP;
8220         }
8221
8222         /* 5708 cannot support DMA addresses > 40-bit.  */
8223         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8224                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8225         else
8226                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8227
8228         /* Configure DMA attributes. */
8229         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8230                 dev->features |= NETIF_F_HIGHDMA;
8231                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8232                 if (rc) {
8233                         dev_err(&pdev->dev,
8234                                 "pci_set_consistent_dma_mask failed, aborting\n");
8235                         goto err_out_unmap;
8236                 }
8237         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8238                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8239                 goto err_out_unmap;
8240         }
8241
8242         if (!(bp->flags & BNX2_FLAG_PCIE))
8243                 bnx2_get_pci_speed(bp);
8244
8245         /* 5706A0 may falsely detect SERR and PERR. */
8246         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8247                 reg = BNX2_RD(bp, PCI_COMMAND);
8248                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8249                 BNX2_WR(bp, PCI_COMMAND, reg);
8250         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8251                 !(bp->flags & BNX2_FLAG_PCIX)) {
8252                 dev_err(&pdev->dev,
8253                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8254                 rc = -EPERM;
8255                 goto err_out_unmap;
8256         }
8257
8258         bnx2_init_nvram(bp);
8259
8260         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8261
8262         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8263                 bp->func = 1;
8264
8265         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8266             BNX2_SHM_HDR_SIGNATURE_SIG) {
8267                 u32 off = bp->func << 2;
8268
8269                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8270         } else
8271                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8272
8273         /* Get the permanent MAC address.  First we need to make sure the
8274          * firmware is actually running.
8275          */
8276         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8277
8278         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8279             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8280                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8281                 rc = -ENODEV;
8282                 goto err_out_unmap;
8283         }
8284
8285         bnx2_read_vpd_fw_ver(bp);
8286
8287         j = strlen(bp->fw_version);
8288         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8289         for (i = 0; i < 3 && j < 24; i++) {
8290                 u8 num, k, skip0;
8291
8292                 if (i == 0) {
8293                         bp->fw_version[j++] = 'b';
8294                         bp->fw_version[j++] = 'c';
8295                         bp->fw_version[j++] = ' ';
8296                 }
8297                 num = (u8) (reg >> (24 - (i * 8)));
8298                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8299                         if (num >= k || !skip0 || k == 1) {
8300                                 bp->fw_version[j++] = (num / k) + '0';
8301                                 skip0 = 0;
8302                         }
8303                 }
8304                 if (i != 2)
8305                         bp->fw_version[j++] = '.';
8306         }
8307         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8308         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8309                 bp->wol = 1;
8310
8311         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8312                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8313
8314                 for (i = 0; i < 30; i++) {
8315                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8316                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8317                                 break;
8318                         msleep(10);
8319                 }
8320         }
8321         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8322         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8323         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8324             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8325                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8326
8327                 if (j < 32)
8328                         bp->fw_version[j++] = ' ';
8329                 for (i = 0; i < 3 && j < 28; i++) {
8330                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8331                         reg = be32_to_cpu(reg);
8332                         memcpy(&bp->fw_version[j], &reg, 4);
8333                         j += 4;
8334                 }
8335         }
8336
8337         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8338         bp->mac_addr[0] = (u8) (reg >> 8);
8339         bp->mac_addr[1] = (u8) reg;
8340
8341         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8342         bp->mac_addr[2] = (u8) (reg >> 24);
8343         bp->mac_addr[3] = (u8) (reg >> 16);
8344         bp->mac_addr[4] = (u8) (reg >> 8);
8345         bp->mac_addr[5] = (u8) reg;
8346
8347         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8348         bnx2_set_rx_ring_size(bp, 255);
8349
8350         bp->tx_quick_cons_trip_int = 2;
8351         bp->tx_quick_cons_trip = 20;
8352         bp->tx_ticks_int = 18;
8353         bp->tx_ticks = 80;
8354
8355         bp->rx_quick_cons_trip_int = 2;
8356         bp->rx_quick_cons_trip = 12;
8357         bp->rx_ticks_int = 18;
8358         bp->rx_ticks = 18;
8359
8360         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8361
8362         bp->current_interval = BNX2_TIMER_INTERVAL;
8363
8364         bp->phy_addr = 1;
8365
8366         /* allocate stats_blk */
8367         rc = bnx2_alloc_stats_blk(dev);
8368         if (rc)
8369                 goto err_out_unmap;
8370
8371         /* Disable WOL support if we are running on a SERDES chip. */
8372         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8373                 bnx2_get_5709_media(bp);
8374         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8375                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8376
8377         bp->phy_port = PORT_TP;
8378         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8379                 bp->phy_port = PORT_FIBRE;
8380                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8381                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8382                         bp->flags |= BNX2_FLAG_NO_WOL;
8383                         bp->wol = 0;
8384                 }
8385                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8386                         /* Don't do parallel detect on this board because of
8387                          * some board problems.  The link will not go down
8388                          * if we do parallel detect.
8389                          */
8390                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8391                             pdev->subsystem_device == 0x310c)
8392                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8393                 } else {
8394                         bp->phy_addr = 2;
8395                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8396                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8397                 }
8398         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8399                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8400                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8401         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8402                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8403                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8404                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8405
8406         bnx2_init_fw_cap(bp);
8407
8408         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8409             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8410             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8411             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8412                 bp->flags |= BNX2_FLAG_NO_WOL;
8413                 bp->wol = 0;
8414         }
8415
8416         if (bp->flags & BNX2_FLAG_NO_WOL)
8417                 device_set_wakeup_capable(&bp->pdev->dev, false);
8418         else
8419                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8420
8421         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8422                 bp->tx_quick_cons_trip_int =
8423                         bp->tx_quick_cons_trip;
8424                 bp->tx_ticks_int = bp->tx_ticks;
8425                 bp->rx_quick_cons_trip_int =
8426                         bp->rx_quick_cons_trip;
8427                 bp->rx_ticks_int = bp->rx_ticks;
8428                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8429                 bp->com_ticks_int = bp->com_ticks;
8430                 bp->cmd_ticks_int = bp->cmd_ticks;
8431         }
8432
8433         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8434          *
8435          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8436          * with byte enables disabled on the unused 32-bit word.  This is legal
8437          * but causes problems on the AMD 8132 which will eventually stop
8438          * responding after a while.
8439          *
8440          * AMD believes this incompatibility is unique to the 5706, and
8441          * prefers to locally disable MSI rather than globally disabling it.
8442          */
8443         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8444                 struct pci_dev *amd_8132 = NULL;
8445
8446                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8447                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8448                                                   amd_8132))) {
8449
8450                         if (amd_8132->revision >= 0x10 &&
8451                             amd_8132->revision <= 0x13) {
8452                                 disable_msi = 1;
8453                                 pci_dev_put(amd_8132);
8454                                 break;
8455                         }
8456                 }
8457         }
8458
8459         bnx2_set_default_link(bp);
8460         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8461
8462         timer_setup(&bp->timer, bnx2_timer, 0);
8463         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8464
8465 #ifdef BCM_CNIC
8466         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8467                 bp->cnic_eth_dev.max_iscsi_conn =
8468                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8469                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8470         bp->cnic_probe = bnx2_cnic_probe;
8471 #endif
8472         pci_save_state(pdev);
8473
8474         return 0;
8475
8476 err_out_unmap:
8477         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8478                 pci_disable_pcie_error_reporting(pdev);
8479                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8480         }
8481
8482         pci_iounmap(pdev, bp->regview);
8483         bp->regview = NULL;
8484
8485 err_out_release:
8486         pci_release_regions(pdev);
8487
8488 err_out_disable:
8489         pci_disable_device(pdev);
8490
8491 err_out:
8492         kfree(bp->temp_stats_blk);
8493
8494         return rc;
8495 }
8496
8497 static char *
8498 bnx2_bus_string(struct bnx2 *bp, char *str)
8499 {
8500         char *s = str;
8501
8502         if (bp->flags & BNX2_FLAG_PCIE) {
8503                 s += sprintf(s, "PCI Express");
8504         } else {
8505                 s += sprintf(s, "PCI");
8506                 if (bp->flags & BNX2_FLAG_PCIX)
8507                         s += sprintf(s, "-X");
8508                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8509                         s += sprintf(s, " 32-bit");
8510                 else
8511                         s += sprintf(s, " 64-bit");
8512                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8513         }
8514         return str;
8515 }
8516
8517 static void
8518 bnx2_del_napi(struct bnx2 *bp)
8519 {
8520         int i;
8521
8522         for (i = 0; i < bp->irq_nvecs; i++)
8523                 netif_napi_del(&bp->bnx2_napi[i].napi);
8524 }
8525
8526 static void
8527 bnx2_init_napi(struct bnx2 *bp)
8528 {
8529         int i;
8530
8531         for (i = 0; i < bp->irq_nvecs; i++) {
8532                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8533                 int (*poll)(struct napi_struct *, int);
8534
8535                 if (i == 0)
8536                         poll = bnx2_poll;
8537                 else
8538                         poll = bnx2_poll_msix;
8539
8540                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8541                 bnapi->bp = bp;
8542         }
8543 }
8544
8545 static const struct net_device_ops bnx2_netdev_ops = {
8546         .ndo_open               = bnx2_open,
8547         .ndo_start_xmit         = bnx2_start_xmit,
8548         .ndo_stop               = bnx2_close,
8549         .ndo_get_stats64        = bnx2_get_stats64,
8550         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8551         .ndo_do_ioctl           = bnx2_ioctl,
8552         .ndo_validate_addr      = eth_validate_addr,
8553         .ndo_set_mac_address    = bnx2_change_mac_addr,
8554         .ndo_change_mtu         = bnx2_change_mtu,
8555         .ndo_set_features       = bnx2_set_features,
8556         .ndo_tx_timeout         = bnx2_tx_timeout,
8557 #ifdef CONFIG_NET_POLL_CONTROLLER
8558         .ndo_poll_controller    = poll_bnx2,
8559 #endif
8560 };
8561
8562 static int
8563 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8564 {
8565         static int version_printed = 0;
8566         struct net_device *dev;
8567         struct bnx2 *bp;
8568         int rc;
8569         char str[40];
8570
8571         if (version_printed++ == 0)
8572                 pr_info("%s", version);
8573
8574         /* dev zeroed in init_etherdev */
8575         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8576         if (!dev)
8577                 return -ENOMEM;
8578
8579         rc = bnx2_init_board(pdev, dev);
8580         if (rc < 0)
8581                 goto err_free;
8582
8583         dev->netdev_ops = &bnx2_netdev_ops;
8584         dev->watchdog_timeo = TX_TIMEOUT;
8585         dev->ethtool_ops = &bnx2_ethtool_ops;
8586
8587         bp = netdev_priv(dev);
8588
8589         pci_set_drvdata(pdev, dev);
8590
8591         /*
8592          * In-flight DMA from 1st kernel could continue going in kdump kernel.
8593          * New io-page table has been created before bnx2 does reset at open stage.
8594          * We have to wait for the in-flight DMA to complete to avoid it look up
8595          * into the newly created io-page table.
8596          */
8597         if (is_kdump_kernel())
8598                 bnx2_wait_dma_complete(bp);
8599
8600         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8601
8602         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8603                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8604                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8605
8606         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8607                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8608
8609         dev->vlan_features = dev->hw_features;
8610         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8611         dev->features |= dev->hw_features;
8612         dev->priv_flags |= IFF_UNICAST_FLT;
8613         dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8614         dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8615
8616         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8617                 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8618
8619         if ((rc = register_netdev(dev))) {
8620                 dev_err(&pdev->dev, "Cannot register net device\n");
8621                 goto error;
8622         }
8623
8624         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8625                     "node addr %pM\n", board_info[ent->driver_data].name,
8626                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8627                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8628                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8629                     pdev->irq, dev->dev_addr);
8630
8631         return 0;
8632
8633 error:
8634         pci_iounmap(pdev, bp->regview);
8635         pci_release_regions(pdev);
8636         pci_disable_device(pdev);
8637 err_free:
8638         bnx2_free_stats_blk(dev);
8639         free_netdev(dev);
8640         return rc;
8641 }
8642
8643 static void
8644 bnx2_remove_one(struct pci_dev *pdev)
8645 {
8646         struct net_device *dev = pci_get_drvdata(pdev);
8647         struct bnx2 *bp = netdev_priv(dev);
8648
8649         unregister_netdev(dev);
8650
8651         del_timer_sync(&bp->timer);
8652         cancel_work_sync(&bp->reset_task);
8653
8654         pci_iounmap(bp->pdev, bp->regview);
8655
8656         bnx2_free_stats_blk(dev);
8657         kfree(bp->temp_stats_blk);
8658
8659         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8660                 pci_disable_pcie_error_reporting(pdev);
8661                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8662         }
8663
8664         bnx2_release_firmware(bp);
8665
8666         free_netdev(dev);
8667
8668         pci_release_regions(pdev);
8669         pci_disable_device(pdev);
8670 }
8671
8672 #ifdef CONFIG_PM_SLEEP
8673 static int
8674 bnx2_suspend(struct device *device)
8675 {
8676         struct pci_dev *pdev = to_pci_dev(device);
8677         struct net_device *dev = pci_get_drvdata(pdev);
8678         struct bnx2 *bp = netdev_priv(dev);
8679
8680         if (netif_running(dev)) {
8681                 cancel_work_sync(&bp->reset_task);
8682                 bnx2_netif_stop(bp, true);
8683                 netif_device_detach(dev);
8684                 del_timer_sync(&bp->timer);
8685                 bnx2_shutdown_chip(bp);
8686                 __bnx2_free_irq(bp);
8687                 bnx2_free_skbs(bp);
8688         }
8689         bnx2_setup_wol(bp);
8690         return 0;
8691 }
8692
8693 static int
8694 bnx2_resume(struct device *device)
8695 {
8696         struct pci_dev *pdev = to_pci_dev(device);
8697         struct net_device *dev = pci_get_drvdata(pdev);
8698         struct bnx2 *bp = netdev_priv(dev);
8699
8700         if (!netif_running(dev))
8701                 return 0;
8702
8703         bnx2_set_power_state(bp, PCI_D0);
8704         netif_device_attach(dev);
8705         bnx2_request_irq(bp);
8706         bnx2_init_nic(bp, 1);
8707         bnx2_netif_start(bp, true);
8708         return 0;
8709 }
8710
8711 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8712 #define BNX2_PM_OPS (&bnx2_pm_ops)
8713
8714 #else
8715
8716 #define BNX2_PM_OPS NULL
8717
8718 #endif /* CONFIG_PM_SLEEP */
8719 /**
8720  * bnx2_io_error_detected - called when PCI error is detected
8721  * @pdev: Pointer to PCI device
8722  * @state: The current pci connection state
8723  *
8724  * This function is called after a PCI bus error affecting
8725  * this device has been detected.
8726  */
8727 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8728                                                pci_channel_state_t state)
8729 {
8730         struct net_device *dev = pci_get_drvdata(pdev);
8731         struct bnx2 *bp = netdev_priv(dev);
8732
8733         rtnl_lock();
8734         netif_device_detach(dev);
8735
8736         if (state == pci_channel_io_perm_failure) {
8737                 rtnl_unlock();
8738                 return PCI_ERS_RESULT_DISCONNECT;
8739         }
8740
8741         if (netif_running(dev)) {
8742                 bnx2_netif_stop(bp, true);
8743                 del_timer_sync(&bp->timer);
8744                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8745         }
8746
8747         pci_disable_device(pdev);
8748         rtnl_unlock();
8749
8750         /* Request a slot slot reset. */
8751         return PCI_ERS_RESULT_NEED_RESET;
8752 }
8753
8754 /**
8755  * bnx2_io_slot_reset - called after the pci bus has been reset.
8756  * @pdev: Pointer to PCI device
8757  *
8758  * Restart the card from scratch, as if from a cold-boot.
8759  */
8760 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8761 {
8762         struct net_device *dev = pci_get_drvdata(pdev);
8763         struct bnx2 *bp = netdev_priv(dev);
8764         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8765         int err = 0;
8766
8767         rtnl_lock();
8768         if (pci_enable_device(pdev)) {
8769                 dev_err(&pdev->dev,
8770                         "Cannot re-enable PCI device after reset\n");
8771         } else {
8772                 pci_set_master(pdev);
8773                 pci_restore_state(pdev);
8774                 pci_save_state(pdev);
8775
8776                 if (netif_running(dev))
8777                         err = bnx2_init_nic(bp, 1);
8778
8779                 if (!err)
8780                         result = PCI_ERS_RESULT_RECOVERED;
8781         }
8782
8783         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8784                 bnx2_napi_enable(bp);
8785                 dev_close(dev);
8786         }
8787         rtnl_unlock();
8788
8789         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8790                 return result;
8791
8792         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8793         if (err) {
8794                 dev_err(&pdev->dev,
8795                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8796                          err); /* non-fatal, continue */
8797         }
8798
8799         return result;
8800 }
8801
8802 /**
8803  * bnx2_io_resume - called when traffic can start flowing again.
8804  * @pdev: Pointer to PCI device
8805  *
8806  * This callback is called when the error recovery driver tells us that
8807  * its OK to resume normal operation.
8808  */
8809 static void bnx2_io_resume(struct pci_dev *pdev)
8810 {
8811         struct net_device *dev = pci_get_drvdata(pdev);
8812         struct bnx2 *bp = netdev_priv(dev);
8813
8814         rtnl_lock();
8815         if (netif_running(dev))
8816                 bnx2_netif_start(bp, true);
8817
8818         netif_device_attach(dev);
8819         rtnl_unlock();
8820 }
8821
8822 static void bnx2_shutdown(struct pci_dev *pdev)
8823 {
8824         struct net_device *dev = pci_get_drvdata(pdev);
8825         struct bnx2 *bp;
8826
8827         if (!dev)
8828                 return;
8829
8830         bp = netdev_priv(dev);
8831         if (!bp)
8832                 return;
8833
8834         rtnl_lock();
8835         if (netif_running(dev))
8836                 dev_close(bp->dev);
8837
8838         if (system_state == SYSTEM_POWER_OFF)
8839                 bnx2_set_power_state(bp, PCI_D3hot);
8840
8841         rtnl_unlock();
8842 }
8843
8844 static const struct pci_error_handlers bnx2_err_handler = {
8845         .error_detected = bnx2_io_error_detected,
8846         .slot_reset     = bnx2_io_slot_reset,
8847         .resume         = bnx2_io_resume,
8848 };
8849
8850 static struct pci_driver bnx2_pci_driver = {
8851         .name           = DRV_MODULE_NAME,
8852         .id_table       = bnx2_pci_tbl,
8853         .probe          = bnx2_init_one,
8854         .remove         = bnx2_remove_one,
8855         .driver.pm      = BNX2_PM_OPS,
8856         .err_handler    = &bnx2_err_handler,
8857         .shutdown       = bnx2_shutdown,
8858 };
8859
8860 module_pci_driver(bnx2_pci_driver);