GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME         "bnx2"
62 #define DRV_MODULE_VERSION      "2.2.6"
63 #define DRV_MODULE_RELDATE      "January 29, 2014"
64 #define FW_MIPS_FILE_06         "/*(DEBLOBBED)*/"
65 #define FW_RV2P_FILE_06         "/*(DEBLOBBED)*/"
66 #define FW_MIPS_FILE_09         "/*(DEBLOBBED)*/"
67 #define FW_RV2P_FILE_09_Ax      "/*(DEBLOBBED)*/"
68 #define FW_RV2P_FILE_09         "/*(DEBLOBBED)*/"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] =
76         "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 /*(DEBLOBBED)*/
83
84 static int disable_msi = 0;
85
86 module_param(disable_msi, int, S_IRUGO);
87 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
88
89 typedef enum {
90         BCM5706 = 0,
91         NC370T,
92         NC370I,
93         BCM5706S,
94         NC370F,
95         BCM5708,
96         BCM5708S,
97         BCM5709,
98         BCM5709S,
99         BCM5716,
100         BCM5716S,
101 } board_t;
102
103 /* indexed by board_t, above */
104 static struct {
105         char *name;
106 } board_info[] = {
107         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
108         { "HP NC370T Multifunction Gigabit Server Adapter" },
109         { "HP NC370i Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
111         { "HP NC370F Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
118         };
119
120 static const struct pci_device_id bnx2_pci_tbl[] = {
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
139         { PCI_VENDOR_ID_BROADCOM, 0x163b,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
141         { PCI_VENDOR_ID_BROADCOM, 0x163c,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
143         { 0, }
144 };
145
146 static const struct flash_spec flash_table[] =
147 {
148 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
149 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
150         /* Slow EEPROM */
151         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
152          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
153          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
154          "EEPROM - slow"},
155         /* Expansion entry 0001 */
156         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0001"},
160         /* Saifun SA25F010 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
163          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
165          "Non-buffered flash (128kB)"},
166         /* Saifun SA25F020 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
169          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
171          "Non-buffered flash (256kB)"},
172         /* Expansion entry 0100 */
173         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
174          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176          "Entry 0100"},
177         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
178         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
179          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
181          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
182         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
183         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
184          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
185          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
186          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
187         /* Saifun SA25F005 (non-buffered flash) */
188         /* strap, cfg1, & write1 need updates */
189         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
190          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
192          "Non-buffered flash (64kB)"},
193         /* Fast EEPROM */
194         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
195          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
196          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
197          "EEPROM - fast"},
198         /* Expansion entry 1001 */
199         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
200          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1001"},
203         /* Expansion entry 1010 */
204         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
205          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
207          "Entry 1010"},
208         /* ATMEL AT45DB011B (buffered flash) */
209         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
210          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
212          "Buffered flash (128kB)"},
213         /* Expansion entry 1100 */
214         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
215          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
217          "Entry 1100"},
218         /* Expansion entry 1101 */
219         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
220          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
222          "Entry 1101"},
223         /* Ateml Expansion entry 1110 */
224         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
225          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
227          "Entry 1110 (Atmel)"},
228         /* ATMEL AT45DB021B (buffered flash) */
229         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
230          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
231          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
232          "Buffered flash (256kB)"},
233 };
234
235 static const struct flash_spec flash_5709 = {
236         .flags          = BNX2_NV_BUFFERED,
237         .page_bits      = BCM5709_FLASH_PAGE_BITS,
238         .page_size      = BCM5709_FLASH_PAGE_SIZE,
239         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
240         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
241         .name           = "5709 Buffered flash (256kB)",
242 };
243
244 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
245
246 static void bnx2_init_napi(struct bnx2 *bp);
247 static void bnx2_del_napi(struct bnx2 *bp);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251         u32 diff;
252
253         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
254         barrier();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == BNX2_TX_DESC_CNT)
263                         diff = BNX2_MAX_TX_DESC_CNT;
264         }
265         return bp->tx_ring_size - diff;
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         unsigned long flags;
272         u32 val;
273
274         spin_lock_irqsave(&bp->indirect_lock, flags);
275         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_irqrestore(&bp->indirect_lock, flags);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         unsigned long flags;
285
286         spin_lock_irqsave(&bp->indirect_lock, flags);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_irqrestore(&bp->indirect_lock, flags);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         unsigned long flags;
308
309         offset += cid_addr;
310         spin_lock_irqsave(&bp->indirect_lock, flags);
311         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
312                 int i;
313
314                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
315                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
316                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
317                 for (i = 0; i < 5; i++) {
318                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
319                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
320                                 break;
321                         udelay(5);
322                 }
323         } else {
324                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
325                 BNX2_WR(bp, BNX2_CTX_DATA, val);
326         }
327         spin_unlock_irqrestore(&bp->indirect_lock, flags);
328 }
329
330 #ifdef BCM_CNIC
331 static int
332 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
333 {
334         struct bnx2 *bp = netdev_priv(dev);
335         struct drv_ctl_io *io = &info->data.io;
336
337         switch (info->cmd) {
338         case DRV_CTL_IO_WR_CMD:
339                 bnx2_reg_wr_ind(bp, io->offset, io->data);
340                 break;
341         case DRV_CTL_IO_RD_CMD:
342                 io->data = bnx2_reg_rd_ind(bp, io->offset);
343                 break;
344         case DRV_CTL_CTX_WR_CMD:
345                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
346                 break;
347         default:
348                 return -EINVAL;
349         }
350         return 0;
351 }
352
353 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
354 {
355         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
356         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
357         int sb_id;
358
359         if (bp->flags & BNX2_FLAG_USING_MSIX) {
360                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_present = 0;
362                 sb_id = bp->irq_nvecs;
363                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
364         } else {
365                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
366                 bnapi->cnic_tag = bnapi->last_status_idx;
367                 bnapi->cnic_present = 1;
368                 sb_id = 0;
369                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
370         }
371
372         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
373         cp->irq_arr[0].status_blk = (void *)
374                 ((unsigned long) bnapi->status_blk.msi +
375                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
376         cp->irq_arr[0].status_blk_num = sb_id;
377         cp->num_irq = 1;
378 }
379
380 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
381                               void *data)
382 {
383         struct bnx2 *bp = netdev_priv(dev);
384         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
385
386         if (ops == NULL)
387                 return -EINVAL;
388
389         if (cp->drv_state & CNIC_DRV_STATE_REGD)
390                 return -EBUSY;
391
392         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
393                 return -ENODEV;
394
395         bp->cnic_data = data;
396         rcu_assign_pointer(bp->cnic_ops, ops);
397
398         cp->num_irq = 0;
399         cp->drv_state = CNIC_DRV_STATE_REGD;
400
401         bnx2_setup_cnic_irq_info(bp);
402
403         return 0;
404 }
405
406 static int bnx2_unregister_cnic(struct net_device *dev)
407 {
408         struct bnx2 *bp = netdev_priv(dev);
409         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
410         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
411
412         mutex_lock(&bp->cnic_lock);
413         cp->drv_state = 0;
414         bnapi->cnic_present = 0;
415         RCU_INIT_POINTER(bp->cnic_ops, NULL);
416         mutex_unlock(&bp->cnic_lock);
417         synchronize_rcu();
418         return 0;
419 }
420
421 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
422 {
423         struct bnx2 *bp = netdev_priv(dev);
424         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
425
426         if (!cp->max_iscsi_conn)
427                 return NULL;
428
429         cp->drv_owner = THIS_MODULE;
430         cp->chip_id = bp->chip_id;
431         cp->pdev = bp->pdev;
432         cp->io_base = bp->regview;
433         cp->drv_ctl = bnx2_drv_ctl;
434         cp->drv_register_cnic = bnx2_register_cnic;
435         cp->drv_unregister_cnic = bnx2_unregister_cnic;
436
437         return cp;
438 }
439
440 static void
441 bnx2_cnic_stop(struct bnx2 *bp)
442 {
443         struct cnic_ops *c_ops;
444         struct cnic_ctl_info info;
445
446         mutex_lock(&bp->cnic_lock);
447         c_ops = rcu_dereference_protected(bp->cnic_ops,
448                                           lockdep_is_held(&bp->cnic_lock));
449         if (c_ops) {
450                 info.cmd = CNIC_CTL_STOP_CMD;
451                 c_ops->cnic_ctl(bp->cnic_data, &info);
452         }
453         mutex_unlock(&bp->cnic_lock);
454 }
455
456 static void
457 bnx2_cnic_start(struct bnx2 *bp)
458 {
459         struct cnic_ops *c_ops;
460         struct cnic_ctl_info info;
461
462         mutex_lock(&bp->cnic_lock);
463         c_ops = rcu_dereference_protected(bp->cnic_ops,
464                                           lockdep_is_held(&bp->cnic_lock));
465         if (c_ops) {
466                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
467                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
468
469                         bnapi->cnic_tag = bnapi->last_status_idx;
470                 }
471                 info.cmd = CNIC_CTL_START_CMD;
472                 c_ops->cnic_ctl(bp->cnic_data, &info);
473         }
474         mutex_unlock(&bp->cnic_lock);
475 }
476
477 #else
478
479 static void
480 bnx2_cnic_stop(struct bnx2 *bp)
481 {
482 }
483
484 static void
485 bnx2_cnic_start(struct bnx2 *bp)
486 {
487 }
488
489 #endif
490
491 static int
492 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
493 {
494         u32 val1;
495         int i, ret;
496
497         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
498                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
499                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
500
501                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
502                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
503
504                 udelay(40);
505         }
506
507         val1 = (bp->phy_addr << 21) | (reg << 16) |
508                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
509                 BNX2_EMAC_MDIO_COMM_START_BUSY;
510         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
511
512         for (i = 0; i < 50; i++) {
513                 udelay(10);
514
515                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
516                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
517                         udelay(5);
518
519                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
520                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
521
522                         break;
523                 }
524         }
525
526         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
527                 *val = 0x0;
528                 ret = -EBUSY;
529         }
530         else {
531                 *val = val1;
532                 ret = 0;
533         }
534
535         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
536                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
537                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
538
539                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
540                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
541
542                 udelay(40);
543         }
544
545         return ret;
546 }
547
548 static int
549 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
550 {
551         u32 val1;
552         int i, ret;
553
554         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
555                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
556                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
557
558                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
559                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
560
561                 udelay(40);
562         }
563
564         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
565                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
566                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
567         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
568
569         for (i = 0; i < 50; i++) {
570                 udelay(10);
571
572                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
573                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
574                         udelay(5);
575                         break;
576                 }
577         }
578
579         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
580                 ret = -EBUSY;
581         else
582                 ret = 0;
583
584         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
585                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
586                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
587
588                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
589                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
590
591                 udelay(40);
592         }
593
594         return ret;
595 }
596
597 static void
598 bnx2_disable_int(struct bnx2 *bp)
599 {
600         int i;
601         struct bnx2_napi *bnapi;
602
603         for (i = 0; i < bp->irq_nvecs; i++) {
604                 bnapi = &bp->bnx2_napi[i];
605                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
606                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
607         }
608         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
609 }
610
611 static void
612 bnx2_enable_int(struct bnx2 *bp)
613 {
614         int i;
615         struct bnx2_napi *bnapi;
616
617         for (i = 0; i < bp->irq_nvecs; i++) {
618                 bnapi = &bp->bnx2_napi[i];
619
620                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
623                         bnapi->last_status_idx);
624
625                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
626                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
627                         bnapi->last_status_idx);
628         }
629         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
630 }
631
632 static void
633 bnx2_disable_int_sync(struct bnx2 *bp)
634 {
635         int i;
636
637         atomic_inc(&bp->intr_sem);
638         if (!netif_running(bp->dev))
639                 return;
640
641         bnx2_disable_int(bp);
642         for (i = 0; i < bp->irq_nvecs; i++)
643                 synchronize_irq(bp->irq_tbl[i].vector);
644 }
645
646 static void
647 bnx2_napi_disable(struct bnx2 *bp)
648 {
649         int i;
650
651         for (i = 0; i < bp->irq_nvecs; i++)
652                 napi_disable(&bp->bnx2_napi[i].napi);
653 }
654
655 static void
656 bnx2_napi_enable(struct bnx2 *bp)
657 {
658         int i;
659
660         for (i = 0; i < bp->irq_nvecs; i++)
661                 napi_enable(&bp->bnx2_napi[i].napi);
662 }
663
664 static void
665 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
666 {
667         if (stop_cnic)
668                 bnx2_cnic_stop(bp);
669         if (netif_running(bp->dev)) {
670                 bnx2_napi_disable(bp);
671                 netif_tx_disable(bp->dev);
672         }
673         bnx2_disable_int_sync(bp);
674         netif_carrier_off(bp->dev);     /* prevent tx timeout */
675 }
676
677 static void
678 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
679 {
680         if (atomic_dec_and_test(&bp->intr_sem)) {
681                 if (netif_running(bp->dev)) {
682                         netif_tx_wake_all_queues(bp->dev);
683                         spin_lock_bh(&bp->phy_lock);
684                         if (bp->link_up)
685                                 netif_carrier_on(bp->dev);
686                         spin_unlock_bh(&bp->phy_lock);
687                         bnx2_napi_enable(bp);
688                         bnx2_enable_int(bp);
689                         if (start_cnic)
690                                 bnx2_cnic_start(bp);
691                 }
692         }
693 }
694
695 static void
696 bnx2_free_tx_mem(struct bnx2 *bp)
697 {
698         int i;
699
700         for (i = 0; i < bp->num_tx_rings; i++) {
701                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
702                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
703
704                 if (txr->tx_desc_ring) {
705                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
706                                           txr->tx_desc_ring,
707                                           txr->tx_desc_mapping);
708                         txr->tx_desc_ring = NULL;
709                 }
710                 kfree(txr->tx_buf_ring);
711                 txr->tx_buf_ring = NULL;
712         }
713 }
714
715 static void
716 bnx2_free_rx_mem(struct bnx2 *bp)
717 {
718         int i;
719
720         for (i = 0; i < bp->num_rx_rings; i++) {
721                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
722                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
723                 int j;
724
725                 for (j = 0; j < bp->rx_max_ring; j++) {
726                         if (rxr->rx_desc_ring[j])
727                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
728                                                   rxr->rx_desc_ring[j],
729                                                   rxr->rx_desc_mapping[j]);
730                         rxr->rx_desc_ring[j] = NULL;
731                 }
732                 vfree(rxr->rx_buf_ring);
733                 rxr->rx_buf_ring = NULL;
734
735                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
736                         if (rxr->rx_pg_desc_ring[j])
737                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
738                                                   rxr->rx_pg_desc_ring[j],
739                                                   rxr->rx_pg_desc_mapping[j]);
740                         rxr->rx_pg_desc_ring[j] = NULL;
741                 }
742                 vfree(rxr->rx_pg_ring);
743                 rxr->rx_pg_ring = NULL;
744         }
745 }
746
747 static int
748 bnx2_alloc_tx_mem(struct bnx2 *bp)
749 {
750         int i;
751
752         for (i = 0; i < bp->num_tx_rings; i++) {
753                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
754                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
755
756                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
757                 if (txr->tx_buf_ring == NULL)
758                         return -ENOMEM;
759
760                 txr->tx_desc_ring =
761                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
762                                            &txr->tx_desc_mapping, GFP_KERNEL);
763                 if (txr->tx_desc_ring == NULL)
764                         return -ENOMEM;
765         }
766         return 0;
767 }
768
769 static int
770 bnx2_alloc_rx_mem(struct bnx2 *bp)
771 {
772         int i;
773
774         for (i = 0; i < bp->num_rx_rings; i++) {
775                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
776                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
777                 int j;
778
779                 rxr->rx_buf_ring =
780                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
781                 if (rxr->rx_buf_ring == NULL)
782                         return -ENOMEM;
783
784                 for (j = 0; j < bp->rx_max_ring; j++) {
785                         rxr->rx_desc_ring[j] =
786                                 dma_alloc_coherent(&bp->pdev->dev,
787                                                    RXBD_RING_SIZE,
788                                                    &rxr->rx_desc_mapping[j],
789                                                    GFP_KERNEL);
790                         if (rxr->rx_desc_ring[j] == NULL)
791                                 return -ENOMEM;
792
793                 }
794
795                 if (bp->rx_pg_ring_size) {
796                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
797                                                   bp->rx_max_pg_ring);
798                         if (rxr->rx_pg_ring == NULL)
799                                 return -ENOMEM;
800
801                 }
802
803                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
804                         rxr->rx_pg_desc_ring[j] =
805                                 dma_alloc_coherent(&bp->pdev->dev,
806                                                    RXBD_RING_SIZE,
807                                                    &rxr->rx_pg_desc_mapping[j],
808                                                    GFP_KERNEL);
809                         if (rxr->rx_pg_desc_ring[j] == NULL)
810                                 return -ENOMEM;
811
812                 }
813         }
814         return 0;
815 }
816
817 static void
818 bnx2_free_stats_blk(struct net_device *dev)
819 {
820         struct bnx2 *bp = netdev_priv(dev);
821
822         if (bp->status_blk) {
823                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
824                                   bp->status_blk,
825                                   bp->status_blk_mapping);
826                 bp->status_blk = NULL;
827                 bp->stats_blk = NULL;
828         }
829 }
830
831 static int
832 bnx2_alloc_stats_blk(struct net_device *dev)
833 {
834         int status_blk_size;
835         void *status_blk;
836         struct bnx2 *bp = netdev_priv(dev);
837
838         /* Combine status and statistics blocks into one allocation. */
839         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
840         if (bp->flags & BNX2_FLAG_MSIX_CAP)
841                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
842                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
843         bp->status_stats_size = status_blk_size +
844                                 sizeof(struct statistics_block);
845         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
846                                          &bp->status_blk_mapping, GFP_KERNEL);
847         if (status_blk == NULL)
848                 return -ENOMEM;
849
850         bp->status_blk = status_blk;
851         bp->stats_blk = status_blk + status_blk_size;
852         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
853
854         return 0;
855 }
856
857 static void
858 bnx2_free_mem(struct bnx2 *bp)
859 {
860         int i;
861         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
862
863         bnx2_free_tx_mem(bp);
864         bnx2_free_rx_mem(bp);
865
866         for (i = 0; i < bp->ctx_pages; i++) {
867                 if (bp->ctx_blk[i]) {
868                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
869                                           bp->ctx_blk[i],
870                                           bp->ctx_blk_mapping[i]);
871                         bp->ctx_blk[i] = NULL;
872                 }
873         }
874
875         if (bnapi->status_blk.msi)
876                 bnapi->status_blk.msi = NULL;
877 }
878
879 static int
880 bnx2_alloc_mem(struct bnx2 *bp)
881 {
882         int i, err;
883         struct bnx2_napi *bnapi;
884
885         bnapi = &bp->bnx2_napi[0];
886         bnapi->status_blk.msi = bp->status_blk;
887         bnapi->hw_tx_cons_ptr =
888                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
889         bnapi->hw_rx_cons_ptr =
890                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
891         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
892                 for (i = 1; i < bp->irq_nvecs; i++) {
893                         struct status_block_msix *sblk;
894
895                         bnapi = &bp->bnx2_napi[i];
896
897                         sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
898                         bnapi->status_blk.msix = sblk;
899                         bnapi->hw_tx_cons_ptr =
900                                 &sblk->status_tx_quick_consumer_index;
901                         bnapi->hw_rx_cons_ptr =
902                                 &sblk->status_rx_quick_consumer_index;
903                         bnapi->int_num = i << 24;
904                 }
905         }
906
907         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
908                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
909                 if (bp->ctx_pages == 0)
910                         bp->ctx_pages = 1;
911                 for (i = 0; i < bp->ctx_pages; i++) {
912                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
913                                                 BNX2_PAGE_SIZE,
914                                                 &bp->ctx_blk_mapping[i],
915                                                 GFP_KERNEL);
916                         if (bp->ctx_blk[i] == NULL)
917                                 goto alloc_mem_err;
918                 }
919         }
920
921         err = bnx2_alloc_rx_mem(bp);
922         if (err)
923                 goto alloc_mem_err;
924
925         err = bnx2_alloc_tx_mem(bp);
926         if (err)
927                 goto alloc_mem_err;
928
929         return 0;
930
931 alloc_mem_err:
932         bnx2_free_mem(bp);
933         return -ENOMEM;
934 }
935
936 static void
937 bnx2_report_fw_link(struct bnx2 *bp)
938 {
939         u32 fw_link_status = 0;
940
941         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
942                 return;
943
944         if (bp->link_up) {
945                 u32 bmsr;
946
947                 switch (bp->line_speed) {
948                 case SPEED_10:
949                         if (bp->duplex == DUPLEX_HALF)
950                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
951                         else
952                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
953                         break;
954                 case SPEED_100:
955                         if (bp->duplex == DUPLEX_HALF)
956                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
957                         else
958                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
959                         break;
960                 case SPEED_1000:
961                         if (bp->duplex == DUPLEX_HALF)
962                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
963                         else
964                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
965                         break;
966                 case SPEED_2500:
967                         if (bp->duplex == DUPLEX_HALF)
968                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
969                         else
970                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
971                         break;
972                 }
973
974                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
975
976                 if (bp->autoneg) {
977                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
978
979                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
980                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
981
982                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
983                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
984                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
985                         else
986                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
987                 }
988         }
989         else
990                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
991
992         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
993 }
994
995 static char *
996 bnx2_xceiver_str(struct bnx2 *bp)
997 {
998         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
999                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1000                  "Copper");
1001 }
1002
1003 static void
1004 bnx2_report_link(struct bnx2 *bp)
1005 {
1006         if (bp->link_up) {
1007                 netif_carrier_on(bp->dev);
1008                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1009                             bnx2_xceiver_str(bp),
1010                             bp->line_speed,
1011                             bp->duplex == DUPLEX_FULL ? "full" : "half");
1012
1013                 if (bp->flow_ctrl) {
1014                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
1015                                 pr_cont(", receive ");
1016                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1017                                         pr_cont("& transmit ");
1018                         }
1019                         else {
1020                                 pr_cont(", transmit ");
1021                         }
1022                         pr_cont("flow control ON");
1023                 }
1024                 pr_cont("\n");
1025         } else {
1026                 netif_carrier_off(bp->dev);
1027                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1028                            bnx2_xceiver_str(bp));
1029         }
1030
1031         bnx2_report_fw_link(bp);
1032 }
1033
1034 static void
1035 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1036 {
1037         u32 local_adv, remote_adv;
1038
1039         bp->flow_ctrl = 0;
1040         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1041                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1042
1043                 if (bp->duplex == DUPLEX_FULL) {
1044                         bp->flow_ctrl = bp->req_flow_ctrl;
1045                 }
1046                 return;
1047         }
1048
1049         if (bp->duplex != DUPLEX_FULL) {
1050                 return;
1051         }
1052
1053         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1054             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1055                 u32 val;
1056
1057                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1058                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1059                         bp->flow_ctrl |= FLOW_CTRL_TX;
1060                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1061                         bp->flow_ctrl |= FLOW_CTRL_RX;
1062                 return;
1063         }
1064
1065         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1066         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1067
1068         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1069                 u32 new_local_adv = 0;
1070                 u32 new_remote_adv = 0;
1071
1072                 if (local_adv & ADVERTISE_1000XPAUSE)
1073                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1074                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1075                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1076                 if (remote_adv & ADVERTISE_1000XPAUSE)
1077                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1078                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1079                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1080
1081                 local_adv = new_local_adv;
1082                 remote_adv = new_remote_adv;
1083         }
1084
1085         /* See Table 28B-3 of 802.3ab-1999 spec. */
1086         if (local_adv & ADVERTISE_PAUSE_CAP) {
1087                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1088                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1089                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1090                         }
1091                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1092                                 bp->flow_ctrl = FLOW_CTRL_RX;
1093                         }
1094                 }
1095                 else {
1096                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1097                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1098                         }
1099                 }
1100         }
1101         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1102                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1103                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1104
1105                         bp->flow_ctrl = FLOW_CTRL_TX;
1106                 }
1107         }
1108 }
1109
1110 static int
1111 bnx2_5709s_linkup(struct bnx2 *bp)
1112 {
1113         u32 val, speed;
1114
1115         bp->link_up = 1;
1116
1117         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1118         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1119         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120
1121         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1122                 bp->line_speed = bp->req_line_speed;
1123                 bp->duplex = bp->req_duplex;
1124                 return 0;
1125         }
1126         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1127         switch (speed) {
1128                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1129                         bp->line_speed = SPEED_10;
1130                         break;
1131                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1132                         bp->line_speed = SPEED_100;
1133                         break;
1134                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1135                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1136                         bp->line_speed = SPEED_1000;
1137                         break;
1138                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1139                         bp->line_speed = SPEED_2500;
1140                         break;
1141         }
1142         if (val & MII_BNX2_GP_TOP_AN_FD)
1143                 bp->duplex = DUPLEX_FULL;
1144         else
1145                 bp->duplex = DUPLEX_HALF;
1146         return 0;
1147 }
1148
1149 static int
1150 bnx2_5708s_linkup(struct bnx2 *bp)
1151 {
1152         u32 val;
1153
1154         bp->link_up = 1;
1155         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1156         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1157                 case BCM5708S_1000X_STAT1_SPEED_10:
1158                         bp->line_speed = SPEED_10;
1159                         break;
1160                 case BCM5708S_1000X_STAT1_SPEED_100:
1161                         bp->line_speed = SPEED_100;
1162                         break;
1163                 case BCM5708S_1000X_STAT1_SPEED_1G:
1164                         bp->line_speed = SPEED_1000;
1165                         break;
1166                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1167                         bp->line_speed = SPEED_2500;
1168                         break;
1169         }
1170         if (val & BCM5708S_1000X_STAT1_FD)
1171                 bp->duplex = DUPLEX_FULL;
1172         else
1173                 bp->duplex = DUPLEX_HALF;
1174
1175         return 0;
1176 }
1177
1178 static int
1179 bnx2_5706s_linkup(struct bnx2 *bp)
1180 {
1181         u32 bmcr, local_adv, remote_adv, common;
1182
1183         bp->link_up = 1;
1184         bp->line_speed = SPEED_1000;
1185
1186         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1187         if (bmcr & BMCR_FULLDPLX) {
1188                 bp->duplex = DUPLEX_FULL;
1189         }
1190         else {
1191                 bp->duplex = DUPLEX_HALF;
1192         }
1193
1194         if (!(bmcr & BMCR_ANENABLE)) {
1195                 return 0;
1196         }
1197
1198         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1199         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1200
1201         common = local_adv & remote_adv;
1202         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1203
1204                 if (common & ADVERTISE_1000XFULL) {
1205                         bp->duplex = DUPLEX_FULL;
1206                 }
1207                 else {
1208                         bp->duplex = DUPLEX_HALF;
1209                 }
1210         }
1211
1212         return 0;
1213 }
1214
1215 static int
1216 bnx2_copper_linkup(struct bnx2 *bp)
1217 {
1218         u32 bmcr;
1219
1220         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1221
1222         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1223         if (bmcr & BMCR_ANENABLE) {
1224                 u32 local_adv, remote_adv, common;
1225
1226                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1227                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1228
1229                 common = local_adv & (remote_adv >> 2);
1230                 if (common & ADVERTISE_1000FULL) {
1231                         bp->line_speed = SPEED_1000;
1232                         bp->duplex = DUPLEX_FULL;
1233                 }
1234                 else if (common & ADVERTISE_1000HALF) {
1235                         bp->line_speed = SPEED_1000;
1236                         bp->duplex = DUPLEX_HALF;
1237                 }
1238                 else {
1239                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1240                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1241
1242                         common = local_adv & remote_adv;
1243                         if (common & ADVERTISE_100FULL) {
1244                                 bp->line_speed = SPEED_100;
1245                                 bp->duplex = DUPLEX_FULL;
1246                         }
1247                         else if (common & ADVERTISE_100HALF) {
1248                                 bp->line_speed = SPEED_100;
1249                                 bp->duplex = DUPLEX_HALF;
1250                         }
1251                         else if (common & ADVERTISE_10FULL) {
1252                                 bp->line_speed = SPEED_10;
1253                                 bp->duplex = DUPLEX_FULL;
1254                         }
1255                         else if (common & ADVERTISE_10HALF) {
1256                                 bp->line_speed = SPEED_10;
1257                                 bp->duplex = DUPLEX_HALF;
1258                         }
1259                         else {
1260                                 bp->line_speed = 0;
1261                                 bp->link_up = 0;
1262                         }
1263                 }
1264         }
1265         else {
1266                 if (bmcr & BMCR_SPEED100) {
1267                         bp->line_speed = SPEED_100;
1268                 }
1269                 else {
1270                         bp->line_speed = SPEED_10;
1271                 }
1272                 if (bmcr & BMCR_FULLDPLX) {
1273                         bp->duplex = DUPLEX_FULL;
1274                 }
1275                 else {
1276                         bp->duplex = DUPLEX_HALF;
1277                 }
1278         }
1279
1280         if (bp->link_up) {
1281                 u32 ext_status;
1282
1283                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1284                 if (ext_status & EXT_STATUS_MDIX)
1285                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1286         }
1287
1288         return 0;
1289 }
1290
1291 static void
1292 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1293 {
1294         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1295
1296         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1297         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1298         val |= 0x02 << 8;
1299
1300         if (bp->flow_ctrl & FLOW_CTRL_TX)
1301                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1302
1303         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1304 }
1305
1306 static void
1307 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1308 {
1309         int i;
1310         u32 cid;
1311
1312         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1313                 if (i == 1)
1314                         cid = RX_RSS_CID;
1315                 bnx2_init_rx_context(bp, cid);
1316         }
1317 }
1318
1319 static void
1320 bnx2_set_mac_link(struct bnx2 *bp)
1321 {
1322         u32 val;
1323
1324         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1325         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1326                 (bp->duplex == DUPLEX_HALF)) {
1327                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1328         }
1329
1330         /* Configure the EMAC mode register. */
1331         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1332
1333         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1334                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1335                 BNX2_EMAC_MODE_25G_MODE);
1336
1337         if (bp->link_up) {
1338                 switch (bp->line_speed) {
1339                         case SPEED_10:
1340                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1341                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1342                                         break;
1343                                 }
1344                                 /* fall through */
1345                         case SPEED_100:
1346                                 val |= BNX2_EMAC_MODE_PORT_MII;
1347                                 break;
1348                         case SPEED_2500:
1349                                 val |= BNX2_EMAC_MODE_25G_MODE;
1350                                 /* fall through */
1351                         case SPEED_1000:
1352                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1353                                 break;
1354                 }
1355         }
1356         else {
1357                 val |= BNX2_EMAC_MODE_PORT_GMII;
1358         }
1359
1360         /* Set the MAC to operate in the appropriate duplex mode. */
1361         if (bp->duplex == DUPLEX_HALF)
1362                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1363         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1364
1365         /* Enable/disable rx PAUSE. */
1366         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1367
1368         if (bp->flow_ctrl & FLOW_CTRL_RX)
1369                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1370         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1371
1372         /* Enable/disable tx PAUSE. */
1373         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1374         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1375
1376         if (bp->flow_ctrl & FLOW_CTRL_TX)
1377                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1378         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1379
1380         /* Acknowledge the interrupt. */
1381         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1382
1383         bnx2_init_all_rx_contexts(bp);
1384 }
1385
1386 static void
1387 bnx2_enable_bmsr1(struct bnx2 *bp)
1388 {
1389         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1390             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1391                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1392                                MII_BNX2_BLK_ADDR_GP_STATUS);
1393 }
1394
1395 static void
1396 bnx2_disable_bmsr1(struct bnx2 *bp)
1397 {
1398         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1399             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1400                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402 }
1403
1404 static int
1405 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1406 {
1407         u32 up1;
1408         int ret = 1;
1409
1410         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411                 return 0;
1412
1413         if (bp->autoneg & AUTONEG_SPEED)
1414                 bp->advertising |= ADVERTISED_2500baseX_Full;
1415
1416         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1417                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1418
1419         bnx2_read_phy(bp, bp->mii_up1, &up1);
1420         if (!(up1 & BCM5708S_UP1_2G5)) {
1421                 up1 |= BCM5708S_UP1_2G5;
1422                 bnx2_write_phy(bp, bp->mii_up1, up1);
1423                 ret = 0;
1424         }
1425
1426         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1427                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1428                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1429
1430         return ret;
1431 }
1432
1433 static int
1434 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1435 {
1436         u32 up1;
1437         int ret = 0;
1438
1439         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1440                 return 0;
1441
1442         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1444
1445         bnx2_read_phy(bp, bp->mii_up1, &up1);
1446         if (up1 & BCM5708S_UP1_2G5) {
1447                 up1 &= ~BCM5708S_UP1_2G5;
1448                 bnx2_write_phy(bp, bp->mii_up1, up1);
1449                 ret = 1;
1450         }
1451
1452         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1453                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455
1456         return ret;
1457 }
1458
1459 static void
1460 bnx2_enable_forced_2g5(struct bnx2 *bp)
1461 {
1462         u32 uninitialized_var(bmcr);
1463         int err;
1464
1465         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1466                 return;
1467
1468         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1469                 u32 val;
1470
1471                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1472                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1473                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1474                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1475                         val |= MII_BNX2_SD_MISC1_FORCE |
1476                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1477                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1478                 }
1479
1480                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1481                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1482                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1483
1484         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1485                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1486                 if (!err)
1487                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1488         } else {
1489                 return;
1490         }
1491
1492         if (err)
1493                 return;
1494
1495         if (bp->autoneg & AUTONEG_SPEED) {
1496                 bmcr &= ~BMCR_ANENABLE;
1497                 if (bp->req_duplex == DUPLEX_FULL)
1498                         bmcr |= BMCR_FULLDPLX;
1499         }
1500         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1501 }
1502
1503 static void
1504 bnx2_disable_forced_2g5(struct bnx2 *bp)
1505 {
1506         u32 uninitialized_var(bmcr);
1507         int err;
1508
1509         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1510                 return;
1511
1512         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1513                 u32 val;
1514
1515                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1516                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1517                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1518                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1519                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1520                 }
1521
1522                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1523                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1524                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1525
1526         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1527                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1528                 if (!err)
1529                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1530         } else {
1531                 return;
1532         }
1533
1534         if (err)
1535                 return;
1536
1537         if (bp->autoneg & AUTONEG_SPEED)
1538                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1539         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1540 }
1541
1542 static void
1543 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1544 {
1545         u32 val;
1546
1547         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1548         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1549         if (start)
1550                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1551         else
1552                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1553 }
1554
1555 static int
1556 bnx2_set_link(struct bnx2 *bp)
1557 {
1558         u32 bmsr;
1559         u8 link_up;
1560
1561         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1562                 bp->link_up = 1;
1563                 return 0;
1564         }
1565
1566         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1567                 return 0;
1568
1569         link_up = bp->link_up;
1570
1571         bnx2_enable_bmsr1(bp);
1572         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1573         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1574         bnx2_disable_bmsr1(bp);
1575
1576         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1577             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1578                 u32 val, an_dbg;
1579
1580                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1581                         bnx2_5706s_force_link_dn(bp, 0);
1582                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1583                 }
1584                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1585
1586                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1587                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1588                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1589
1590                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1591                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1592                         bmsr |= BMSR_LSTATUS;
1593                 else
1594                         bmsr &= ~BMSR_LSTATUS;
1595         }
1596
1597         if (bmsr & BMSR_LSTATUS) {
1598                 bp->link_up = 1;
1599
1600                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1601                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1602                                 bnx2_5706s_linkup(bp);
1603                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1604                                 bnx2_5708s_linkup(bp);
1605                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1606                                 bnx2_5709s_linkup(bp);
1607                 }
1608                 else {
1609                         bnx2_copper_linkup(bp);
1610                 }
1611                 bnx2_resolve_flow_ctrl(bp);
1612         }
1613         else {
1614                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1615                     (bp->autoneg & AUTONEG_SPEED))
1616                         bnx2_disable_forced_2g5(bp);
1617
1618                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1619                         u32 bmcr;
1620
1621                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1622                         bmcr |= BMCR_ANENABLE;
1623                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1624
1625                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1626                 }
1627                 bp->link_up = 0;
1628         }
1629
1630         if (bp->link_up != link_up) {
1631                 bnx2_report_link(bp);
1632         }
1633
1634         bnx2_set_mac_link(bp);
1635
1636         return 0;
1637 }
1638
1639 static int
1640 bnx2_reset_phy(struct bnx2 *bp)
1641 {
1642         int i;
1643         u32 reg;
1644
1645         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1646
1647 #define PHY_RESET_MAX_WAIT 100
1648         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1649                 udelay(10);
1650
1651                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1652                 if (!(reg & BMCR_RESET)) {
1653                         udelay(20);
1654                         break;
1655                 }
1656         }
1657         if (i == PHY_RESET_MAX_WAIT) {
1658                 return -EBUSY;
1659         }
1660         return 0;
1661 }
1662
1663 static u32
1664 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1665 {
1666         u32 adv = 0;
1667
1668         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1669                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1670
1671                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1672                         adv = ADVERTISE_1000XPAUSE;
1673                 }
1674                 else {
1675                         adv = ADVERTISE_PAUSE_CAP;
1676                 }
1677         }
1678         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1679                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1680                         adv = ADVERTISE_1000XPSE_ASYM;
1681                 }
1682                 else {
1683                         adv = ADVERTISE_PAUSE_ASYM;
1684                 }
1685         }
1686         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1687                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1688                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1689                 }
1690                 else {
1691                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1692                 }
1693         }
1694         return adv;
1695 }
1696
1697 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1698
1699 static int
1700 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1701 __releases(&bp->phy_lock)
1702 __acquires(&bp->phy_lock)
1703 {
1704         u32 speed_arg = 0, pause_adv;
1705
1706         pause_adv = bnx2_phy_get_pause_adv(bp);
1707
1708         if (bp->autoneg & AUTONEG_SPEED) {
1709                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1710                 if (bp->advertising & ADVERTISED_10baseT_Half)
1711                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712                 if (bp->advertising & ADVERTISED_10baseT_Full)
1713                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714                 if (bp->advertising & ADVERTISED_100baseT_Half)
1715                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1716                 if (bp->advertising & ADVERTISED_100baseT_Full)
1717                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1718                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1719                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1720                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1721                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1722         } else {
1723                 if (bp->req_line_speed == SPEED_2500)
1724                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1725                 else if (bp->req_line_speed == SPEED_1000)
1726                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1727                 else if (bp->req_line_speed == SPEED_100) {
1728                         if (bp->req_duplex == DUPLEX_FULL)
1729                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1730                         else
1731                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1732                 } else if (bp->req_line_speed == SPEED_10) {
1733                         if (bp->req_duplex == DUPLEX_FULL)
1734                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1735                         else
1736                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1737                 }
1738         }
1739
1740         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1741                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1742         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1743                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1744
1745         if (port == PORT_TP)
1746                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1747                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1748
1749         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1750
1751         spin_unlock_bh(&bp->phy_lock);
1752         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1753         spin_lock_bh(&bp->phy_lock);
1754
1755         return 0;
1756 }
1757
1758 static int
1759 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1760 __releases(&bp->phy_lock)
1761 __acquires(&bp->phy_lock)
1762 {
1763         u32 adv, bmcr;
1764         u32 new_adv = 0;
1765
1766         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1767                 return bnx2_setup_remote_phy(bp, port);
1768
1769         if (!(bp->autoneg & AUTONEG_SPEED)) {
1770                 u32 new_bmcr;
1771                 int force_link_down = 0;
1772
1773                 if (bp->req_line_speed == SPEED_2500) {
1774                         if (!bnx2_test_and_enable_2g5(bp))
1775                                 force_link_down = 1;
1776                 } else if (bp->req_line_speed == SPEED_1000) {
1777                         if (bnx2_test_and_disable_2g5(bp))
1778                                 force_link_down = 1;
1779                 }
1780                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1781                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1782
1783                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1784                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1785                 new_bmcr |= BMCR_SPEED1000;
1786
1787                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1788                         if (bp->req_line_speed == SPEED_2500)
1789                                 bnx2_enable_forced_2g5(bp);
1790                         else if (bp->req_line_speed == SPEED_1000) {
1791                                 bnx2_disable_forced_2g5(bp);
1792                                 new_bmcr &= ~0x2000;
1793                         }
1794
1795                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1796                         if (bp->req_line_speed == SPEED_2500)
1797                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1798                         else
1799                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1800                 }
1801
1802                 if (bp->req_duplex == DUPLEX_FULL) {
1803                         adv |= ADVERTISE_1000XFULL;
1804                         new_bmcr |= BMCR_FULLDPLX;
1805                 }
1806                 else {
1807                         adv |= ADVERTISE_1000XHALF;
1808                         new_bmcr &= ~BMCR_FULLDPLX;
1809                 }
1810                 if ((new_bmcr != bmcr) || (force_link_down)) {
1811                         /* Force a link down visible on the other side */
1812                         if (bp->link_up) {
1813                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1814                                                ~(ADVERTISE_1000XFULL |
1815                                                  ADVERTISE_1000XHALF));
1816                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1817                                         BMCR_ANRESTART | BMCR_ANENABLE);
1818
1819                                 bp->link_up = 0;
1820                                 netif_carrier_off(bp->dev);
1821                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822                                 bnx2_report_link(bp);
1823                         }
1824                         bnx2_write_phy(bp, bp->mii_adv, adv);
1825                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1826                 } else {
1827                         bnx2_resolve_flow_ctrl(bp);
1828                         bnx2_set_mac_link(bp);
1829                 }
1830                 return 0;
1831         }
1832
1833         bnx2_test_and_enable_2g5(bp);
1834
1835         if (bp->advertising & ADVERTISED_1000baseT_Full)
1836                 new_adv |= ADVERTISE_1000XFULL;
1837
1838         new_adv |= bnx2_phy_get_pause_adv(bp);
1839
1840         bnx2_read_phy(bp, bp->mii_adv, &adv);
1841         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1842
1843         bp->serdes_an_pending = 0;
1844         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1845                 /* Force a link down visible on the other side */
1846                 if (bp->link_up) {
1847                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1848                         spin_unlock_bh(&bp->phy_lock);
1849                         msleep(20);
1850                         spin_lock_bh(&bp->phy_lock);
1851                 }
1852
1853                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1854                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1855                         BMCR_ANENABLE);
1856                 /* Speed up link-up time when the link partner
1857                  * does not autonegotiate which is very common
1858                  * in blade servers. Some blade servers use
1859                  * IPMI for kerboard input and it's important
1860                  * to minimize link disruptions. Autoneg. involves
1861                  * exchanging base pages plus 3 next pages and
1862                  * normally completes in about 120 msec.
1863                  */
1864                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1865                 bp->serdes_an_pending = 1;
1866                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1867         } else {
1868                 bnx2_resolve_flow_ctrl(bp);
1869                 bnx2_set_mac_link(bp);
1870         }
1871
1872         return 0;
1873 }
1874
1875 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1876         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1877                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1878                 (ADVERTISED_1000baseT_Full)
1879
1880 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1881         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1882         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1883         ADVERTISED_1000baseT_Full)
1884
1885 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1886         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1887
1888 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1889
1890 static void
1891 bnx2_set_default_remote_link(struct bnx2 *bp)
1892 {
1893         u32 link;
1894
1895         if (bp->phy_port == PORT_TP)
1896                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1897         else
1898                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1899
1900         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1901                 bp->req_line_speed = 0;
1902                 bp->autoneg |= AUTONEG_SPEED;
1903                 bp->advertising = ADVERTISED_Autoneg;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1905                         bp->advertising |= ADVERTISED_10baseT_Half;
1906                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1907                         bp->advertising |= ADVERTISED_10baseT_Full;
1908                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1909                         bp->advertising |= ADVERTISED_100baseT_Half;
1910                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1911                         bp->advertising |= ADVERTISED_100baseT_Full;
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1913                         bp->advertising |= ADVERTISED_1000baseT_Full;
1914                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1915                         bp->advertising |= ADVERTISED_2500baseX_Full;
1916         } else {
1917                 bp->autoneg = 0;
1918                 bp->advertising = 0;
1919                 bp->req_duplex = DUPLEX_FULL;
1920                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1921                         bp->req_line_speed = SPEED_10;
1922                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1923                                 bp->req_duplex = DUPLEX_HALF;
1924                 }
1925                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1926                         bp->req_line_speed = SPEED_100;
1927                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1928                                 bp->req_duplex = DUPLEX_HALF;
1929                 }
1930                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1931                         bp->req_line_speed = SPEED_1000;
1932                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1933                         bp->req_line_speed = SPEED_2500;
1934         }
1935 }
1936
1937 static void
1938 bnx2_set_default_link(struct bnx2 *bp)
1939 {
1940         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1941                 bnx2_set_default_remote_link(bp);
1942                 return;
1943         }
1944
1945         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1946         bp->req_line_speed = 0;
1947         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1948                 u32 reg;
1949
1950                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1951
1952                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1953                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1954                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1955                         bp->autoneg = 0;
1956                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1957                         bp->req_duplex = DUPLEX_FULL;
1958                 }
1959         } else
1960                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1961 }
1962
1963 static void
1964 bnx2_send_heart_beat(struct bnx2 *bp)
1965 {
1966         u32 msg;
1967         u32 addr;
1968
1969         spin_lock(&bp->indirect_lock);
1970         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1971         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1972         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1973         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1974         spin_unlock(&bp->indirect_lock);
1975 }
1976
1977 static void
1978 bnx2_remote_phy_event(struct bnx2 *bp)
1979 {
1980         u32 msg;
1981         u8 link_up = bp->link_up;
1982         u8 old_port;
1983
1984         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1985
1986         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1987                 bnx2_send_heart_beat(bp);
1988
1989         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1990
1991         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1992                 bp->link_up = 0;
1993         else {
1994                 u32 speed;
1995
1996                 bp->link_up = 1;
1997                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1998                 bp->duplex = DUPLEX_FULL;
1999                 switch (speed) {
2000                         case BNX2_LINK_STATUS_10HALF:
2001                                 bp->duplex = DUPLEX_HALF;
2002                                 /* fall through */
2003                         case BNX2_LINK_STATUS_10FULL:
2004                                 bp->line_speed = SPEED_10;
2005                                 break;
2006                         case BNX2_LINK_STATUS_100HALF:
2007                                 bp->duplex = DUPLEX_HALF;
2008                                 /* fall through */
2009                         case BNX2_LINK_STATUS_100BASE_T4:
2010                         case BNX2_LINK_STATUS_100FULL:
2011                                 bp->line_speed = SPEED_100;
2012                                 break;
2013                         case BNX2_LINK_STATUS_1000HALF:
2014                                 bp->duplex = DUPLEX_HALF;
2015                                 /* fall through */
2016                         case BNX2_LINK_STATUS_1000FULL:
2017                                 bp->line_speed = SPEED_1000;
2018                                 break;
2019                         case BNX2_LINK_STATUS_2500HALF:
2020                                 bp->duplex = DUPLEX_HALF;
2021                                 /* fall through */
2022                         case BNX2_LINK_STATUS_2500FULL:
2023                                 bp->line_speed = SPEED_2500;
2024                                 break;
2025                         default:
2026                                 bp->line_speed = 0;
2027                                 break;
2028                 }
2029
2030                 bp->flow_ctrl = 0;
2031                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2032                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2033                         if (bp->duplex == DUPLEX_FULL)
2034                                 bp->flow_ctrl = bp->req_flow_ctrl;
2035                 } else {
2036                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2037                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2038                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2039                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2040                 }
2041
2042                 old_port = bp->phy_port;
2043                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2044                         bp->phy_port = PORT_FIBRE;
2045                 else
2046                         bp->phy_port = PORT_TP;
2047
2048                 if (old_port != bp->phy_port)
2049                         bnx2_set_default_link(bp);
2050
2051         }
2052         if (bp->link_up != link_up)
2053                 bnx2_report_link(bp);
2054
2055         bnx2_set_mac_link(bp);
2056 }
2057
2058 static int
2059 bnx2_set_remote_link(struct bnx2 *bp)
2060 {
2061         u32 evt_code;
2062
2063         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2064         switch (evt_code) {
2065                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2066                         bnx2_remote_phy_event(bp);
2067                         break;
2068                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2069                 default:
2070                         bnx2_send_heart_beat(bp);
2071                         break;
2072         }
2073         return 0;
2074 }
2075
2076 static int
2077 bnx2_setup_copper_phy(struct bnx2 *bp)
2078 __releases(&bp->phy_lock)
2079 __acquires(&bp->phy_lock)
2080 {
2081         u32 bmcr, adv_reg, new_adv = 0;
2082         u32 new_bmcr;
2083
2084         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2085
2086         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2087         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2088                     ADVERTISE_PAUSE_ASYM);
2089
2090         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2091
2092         if (bp->autoneg & AUTONEG_SPEED) {
2093                 u32 adv1000_reg;
2094                 u32 new_adv1000 = 0;
2095
2096                 new_adv |= bnx2_phy_get_pause_adv(bp);
2097
2098                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2099                 adv1000_reg &= PHY_ALL_1000_SPEED;
2100
2101                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2102                 if ((adv1000_reg != new_adv1000) ||
2103                         (adv_reg != new_adv) ||
2104                         ((bmcr & BMCR_ANENABLE) == 0)) {
2105
2106                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2107                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2108                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2109                                 BMCR_ANENABLE);
2110                 }
2111                 else if (bp->link_up) {
2112                         /* Flow ctrl may have changed from auto to forced */
2113                         /* or vice-versa. */
2114
2115                         bnx2_resolve_flow_ctrl(bp);
2116                         bnx2_set_mac_link(bp);
2117                 }
2118                 return 0;
2119         }
2120
2121         /* advertise nothing when forcing speed */
2122         if (adv_reg != new_adv)
2123                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2124
2125         new_bmcr = 0;
2126         if (bp->req_line_speed == SPEED_100) {
2127                 new_bmcr |= BMCR_SPEED100;
2128         }
2129         if (bp->req_duplex == DUPLEX_FULL) {
2130                 new_bmcr |= BMCR_FULLDPLX;
2131         }
2132         if (new_bmcr != bmcr) {
2133                 u32 bmsr;
2134
2135                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137
2138                 if (bmsr & BMSR_LSTATUS) {
2139                         /* Force link down */
2140                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2141                         spin_unlock_bh(&bp->phy_lock);
2142                         msleep(50);
2143                         spin_lock_bh(&bp->phy_lock);
2144
2145                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2146                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2147                 }
2148
2149                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2150
2151                 /* Normally, the new speed is setup after the link has
2152                  * gone down and up again. In some cases, link will not go
2153                  * down so we need to set up the new speed here.
2154                  */
2155                 if (bmsr & BMSR_LSTATUS) {
2156                         bp->line_speed = bp->req_line_speed;
2157                         bp->duplex = bp->req_duplex;
2158                         bnx2_resolve_flow_ctrl(bp);
2159                         bnx2_set_mac_link(bp);
2160                 }
2161         } else {
2162                 bnx2_resolve_flow_ctrl(bp);
2163                 bnx2_set_mac_link(bp);
2164         }
2165         return 0;
2166 }
2167
2168 static int
2169 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2170 __releases(&bp->phy_lock)
2171 __acquires(&bp->phy_lock)
2172 {
2173         if (bp->loopback == MAC_LOOPBACK)
2174                 return 0;
2175
2176         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2177                 return bnx2_setup_serdes_phy(bp, port);
2178         }
2179         else {
2180                 return bnx2_setup_copper_phy(bp);
2181         }
2182 }
2183
2184 static int
2185 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2186 {
2187         u32 val;
2188
2189         bp->mii_bmcr = MII_BMCR + 0x10;
2190         bp->mii_bmsr = MII_BMSR + 0x10;
2191         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2192         bp->mii_adv = MII_ADVERTISE + 0x10;
2193         bp->mii_lpa = MII_LPA + 0x10;
2194         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2197         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2198
2199         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2200         if (reset_phy)
2201                 bnx2_reset_phy(bp);
2202
2203         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2204
2205         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2206         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2207         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2208         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2209
2210         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2211         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2212         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2213                 val |= BCM5708S_UP1_2G5;
2214         else
2215                 val &= ~BCM5708S_UP1_2G5;
2216         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2217
2218         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2219         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2220         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2221         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2222
2223         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2224
2225         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2226               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2227         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2228
2229         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2230
2231         return 0;
2232 }
2233
2234 static int
2235 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2236 {
2237         u32 val;
2238
2239         if (reset_phy)
2240                 bnx2_reset_phy(bp);
2241
2242         bp->mii_up1 = BCM5708S_UP1;
2243
2244         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2245         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2246         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247
2248         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2249         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2250         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2251
2252         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2253         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2254         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2255
2256         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2257                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2258                 val |= BCM5708S_UP1_2G5;
2259                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2260         }
2261
2262         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2263             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2264             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2265                 /* increase tx signal amplitude */
2266                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2267                                BCM5708S_BLK_ADDR_TX_MISC);
2268                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2269                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2270                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2271                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2272         }
2273
2274         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2275               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2276
2277         if (val) {
2278                 u32 is_backplane;
2279
2280                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2281                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2282                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2283                                        BCM5708S_BLK_ADDR_TX_MISC);
2284                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2285                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2286                                        BCM5708S_BLK_ADDR_DIG);
2287                 }
2288         }
2289         return 0;
2290 }
2291
2292 static int
2293 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2294 {
2295         if (reset_phy)
2296                 bnx2_reset_phy(bp);
2297
2298         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2299
2300         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2301                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2302
2303         if (bp->dev->mtu > 1500) {
2304                 u32 val;
2305
2306                 /* Set extended packet length bit */
2307                 bnx2_write_phy(bp, 0x18, 0x7);
2308                 bnx2_read_phy(bp, 0x18, &val);
2309                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2310
2311                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2312                 bnx2_read_phy(bp, 0x1c, &val);
2313                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2314         }
2315         else {
2316                 u32 val;
2317
2318                 bnx2_write_phy(bp, 0x18, 0x7);
2319                 bnx2_read_phy(bp, 0x18, &val);
2320                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2321
2322                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2323                 bnx2_read_phy(bp, 0x1c, &val);
2324                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2325         }
2326
2327         return 0;
2328 }
2329
2330 static int
2331 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2332 {
2333         u32 val;
2334
2335         if (reset_phy)
2336                 bnx2_reset_phy(bp);
2337
2338         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2339                 bnx2_write_phy(bp, 0x18, 0x0c00);
2340                 bnx2_write_phy(bp, 0x17, 0x000a);
2341                 bnx2_write_phy(bp, 0x15, 0x310b);
2342                 bnx2_write_phy(bp, 0x17, 0x201f);
2343                 bnx2_write_phy(bp, 0x15, 0x9506);
2344                 bnx2_write_phy(bp, 0x17, 0x401f);
2345                 bnx2_write_phy(bp, 0x15, 0x14e2);
2346                 bnx2_write_phy(bp, 0x18, 0x0400);
2347         }
2348
2349         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2350                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2351                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2352                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2353                 val &= ~(1 << 8);
2354                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2355         }
2356
2357         if (bp->dev->mtu > 1500) {
2358                 /* Set extended packet length bit */
2359                 bnx2_write_phy(bp, 0x18, 0x7);
2360                 bnx2_read_phy(bp, 0x18, &val);
2361                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2362
2363                 bnx2_read_phy(bp, 0x10, &val);
2364                 bnx2_write_phy(bp, 0x10, val | 0x1);
2365         }
2366         else {
2367                 bnx2_write_phy(bp, 0x18, 0x7);
2368                 bnx2_read_phy(bp, 0x18, &val);
2369                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2370
2371                 bnx2_read_phy(bp, 0x10, &val);
2372                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2373         }
2374
2375         /* ethernet@wirespeed */
2376         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2377         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2378         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2379
2380         /* auto-mdix */
2381         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2382                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2383
2384         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2385         return 0;
2386 }
2387
2388
2389 static int
2390 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2391 __releases(&bp->phy_lock)
2392 __acquires(&bp->phy_lock)
2393 {
2394         u32 val;
2395         int rc = 0;
2396
2397         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2398         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2399
2400         bp->mii_bmcr = MII_BMCR;
2401         bp->mii_bmsr = MII_BMSR;
2402         bp->mii_bmsr1 = MII_BMSR;
2403         bp->mii_adv = MII_ADVERTISE;
2404         bp->mii_lpa = MII_LPA;
2405
2406         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2407
2408         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2409                 goto setup_phy;
2410
2411         bnx2_read_phy(bp, MII_PHYSID1, &val);
2412         bp->phy_id = val << 16;
2413         bnx2_read_phy(bp, MII_PHYSID2, &val);
2414         bp->phy_id |= val & 0xffff;
2415
2416         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2417                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2418                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2419                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2420                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2421                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2422                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2423         }
2424         else {
2425                 rc = bnx2_init_copper_phy(bp, reset_phy);
2426         }
2427
2428 setup_phy:
2429         if (!rc)
2430                 rc = bnx2_setup_phy(bp, bp->phy_port);
2431
2432         return rc;
2433 }
2434
2435 static int
2436 bnx2_set_mac_loopback(struct bnx2 *bp)
2437 {
2438         u32 mac_mode;
2439
2440         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2441         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2442         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2443         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2444         bp->link_up = 1;
2445         return 0;
2446 }
2447
2448 static int bnx2_test_link(struct bnx2 *);
2449
2450 static int
2451 bnx2_set_phy_loopback(struct bnx2 *bp)
2452 {
2453         u32 mac_mode;
2454         int rc, i;
2455
2456         spin_lock_bh(&bp->phy_lock);
2457         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2458                             BMCR_SPEED1000);
2459         spin_unlock_bh(&bp->phy_lock);
2460         if (rc)
2461                 return rc;
2462
2463         for (i = 0; i < 10; i++) {
2464                 if (bnx2_test_link(bp) == 0)
2465                         break;
2466                 msleep(100);
2467         }
2468
2469         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2470         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2471                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2472                       BNX2_EMAC_MODE_25G_MODE);
2473
2474         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2475         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2476         bp->link_up = 1;
2477         return 0;
2478 }
2479
2480 static void
2481 bnx2_dump_mcp_state(struct bnx2 *bp)
2482 {
2483         struct net_device *dev = bp->dev;
2484         u32 mcp_p0, mcp_p1;
2485
2486         netdev_err(dev, "<--- start MCP states dump --->\n");
2487         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2488                 mcp_p0 = BNX2_MCP_STATE_P0;
2489                 mcp_p1 = BNX2_MCP_STATE_P1;
2490         } else {
2491                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2492                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2493         }
2494         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2495                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2496         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2497                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2498                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2499                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2500         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2501                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2502                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2503                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2504         netdev_err(dev, "DEBUG: shmem states:\n");
2505         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2506                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2507                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2508                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2509         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2510         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2511                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2512                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2513         pr_cont(" condition[%08x]\n",
2514                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2515         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2516         DP_SHMEM_LINE(bp, 0x3cc);
2517         DP_SHMEM_LINE(bp, 0x3dc);
2518         DP_SHMEM_LINE(bp, 0x3ec);
2519         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2520         netdev_err(dev, "<--- end MCP states dump --->\n");
2521 }
2522
2523 static int
2524 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2525 {
2526         int i;
2527         u32 val;
2528
2529         bp->fw_wr_seq++;
2530         msg_data |= bp->fw_wr_seq;
2531         bp->fw_last_msg = msg_data;
2532
2533         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2534
2535         if (!ack)
2536                 return 0;
2537
2538         /* wait for an acknowledgement. */
2539         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2540                 msleep(10);
2541
2542                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2543
2544                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2545                         break;
2546         }
2547         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2548                 return 0;
2549
2550         /* If we timed out, inform the firmware that this is the case. */
2551         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2552                 msg_data &= ~BNX2_DRV_MSG_CODE;
2553                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2554
2555                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2556                 if (!silent) {
2557                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2558                         bnx2_dump_mcp_state(bp);
2559                 }
2560
2561                 return -EBUSY;
2562         }
2563
2564         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2565                 return -EIO;
2566
2567         return 0;
2568 }
2569
2570 static int
2571 bnx2_init_5709_context(struct bnx2 *bp)
2572 {
2573         int i, ret = 0;
2574         u32 val;
2575
2576         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2577         val |= (BNX2_PAGE_BITS - 8) << 16;
2578         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2579         for (i = 0; i < 10; i++) {
2580                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2581                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2582                         break;
2583                 udelay(2);
2584         }
2585         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2586                 return -EBUSY;
2587
2588         for (i = 0; i < bp->ctx_pages; i++) {
2589                 int j;
2590
2591                 if (bp->ctx_blk[i])
2592                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2593                 else
2594                         return -ENOMEM;
2595
2596                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2597                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2598                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2599                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2600                         (u64) bp->ctx_blk_mapping[i] >> 32);
2601                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2602                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2603                 for (j = 0; j < 10; j++) {
2604
2605                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2606                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2607                                 break;
2608                         udelay(5);
2609                 }
2610                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2611                         ret = -EBUSY;
2612                         break;
2613                 }
2614         }
2615         return ret;
2616 }
2617
2618 static void
2619 bnx2_init_context(struct bnx2 *bp)
2620 {
2621         u32 vcid;
2622
2623         vcid = 96;
2624         while (vcid) {
2625                 u32 vcid_addr, pcid_addr, offset;
2626                 int i;
2627
2628                 vcid--;
2629
2630                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2631                         u32 new_vcid;
2632
2633                         vcid_addr = GET_PCID_ADDR(vcid);
2634                         if (vcid & 0x8) {
2635                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2636                         }
2637                         else {
2638                                 new_vcid = vcid;
2639                         }
2640                         pcid_addr = GET_PCID_ADDR(new_vcid);
2641                 }
2642                 else {
2643                         vcid_addr = GET_CID_ADDR(vcid);
2644                         pcid_addr = vcid_addr;
2645                 }
2646
2647                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2648                         vcid_addr += (i << PHY_CTX_SHIFT);
2649                         pcid_addr += (i << PHY_CTX_SHIFT);
2650
2651                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2652                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2653
2654                         /* Zero out the context. */
2655                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2656                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2657                 }
2658         }
2659 }
2660
2661 static int
2662 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2663 {
2664         u16 *good_mbuf;
2665         u32 good_mbuf_cnt;
2666         u32 val;
2667
2668         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2669         if (good_mbuf == NULL)
2670                 return -ENOMEM;
2671
2672         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2673                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2674
2675         good_mbuf_cnt = 0;
2676
2677         /* Allocate a bunch of mbufs and save the good ones in an array. */
2678         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2679         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2680                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2681                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2682
2683                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2684
2685                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2686
2687                 /* The addresses with Bit 9 set are bad memory blocks. */
2688                 if (!(val & (1 << 9))) {
2689                         good_mbuf[good_mbuf_cnt] = (u16) val;
2690                         good_mbuf_cnt++;
2691                 }
2692
2693                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2694         }
2695
2696         /* Free the good ones back to the mbuf pool thus discarding
2697          * all the bad ones. */
2698         while (good_mbuf_cnt) {
2699                 good_mbuf_cnt--;
2700
2701                 val = good_mbuf[good_mbuf_cnt];
2702                 val = (val << 9) | val | 1;
2703
2704                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2705         }
2706         kfree(good_mbuf);
2707         return 0;
2708 }
2709
2710 static void
2711 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2712 {
2713         u32 val;
2714
2715         val = (mac_addr[0] << 8) | mac_addr[1];
2716
2717         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2718
2719         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2720                 (mac_addr[4] << 8) | mac_addr[5];
2721
2722         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2723 }
2724
2725 static inline int
2726 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2727 {
2728         dma_addr_t mapping;
2729         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2730         struct bnx2_rx_bd *rxbd =
2731                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2732         struct page *page = alloc_page(gfp);
2733
2734         if (!page)
2735                 return -ENOMEM;
2736         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2737                                PCI_DMA_FROMDEVICE);
2738         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2739                 __free_page(page);
2740                 return -EIO;
2741         }
2742
2743         rx_pg->page = page;
2744         dma_unmap_addr_set(rx_pg, mapping, mapping);
2745         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2746         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2747         return 0;
2748 }
2749
2750 static void
2751 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2752 {
2753         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2754         struct page *page = rx_pg->page;
2755
2756         if (!page)
2757                 return;
2758
2759         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2760                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2761
2762         __free_page(page);
2763         rx_pg->page = NULL;
2764 }
2765
2766 static inline int
2767 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2768 {
2769         u8 *data;
2770         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2771         dma_addr_t mapping;
2772         struct bnx2_rx_bd *rxbd =
2773                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2774
2775         data = kmalloc(bp->rx_buf_size, gfp);
2776         if (!data)
2777                 return -ENOMEM;
2778
2779         mapping = dma_map_single(&bp->pdev->dev,
2780                                  get_l2_fhdr(data),
2781                                  bp->rx_buf_use_size,
2782                                  PCI_DMA_FROMDEVICE);
2783         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2784                 kfree(data);
2785                 return -EIO;
2786         }
2787
2788         rx_buf->data = data;
2789         dma_unmap_addr_set(rx_buf, mapping, mapping);
2790
2791         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2792         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2793
2794         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2795
2796         return 0;
2797 }
2798
2799 static int
2800 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2801 {
2802         struct status_block *sblk = bnapi->status_blk.msi;
2803         u32 new_link_state, old_link_state;
2804         int is_set = 1;
2805
2806         new_link_state = sblk->status_attn_bits & event;
2807         old_link_state = sblk->status_attn_bits_ack & event;
2808         if (new_link_state != old_link_state) {
2809                 if (new_link_state)
2810                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2811                 else
2812                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2813         } else
2814                 is_set = 0;
2815
2816         return is_set;
2817 }
2818
2819 static void
2820 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2821 {
2822         spin_lock(&bp->phy_lock);
2823
2824         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2825                 bnx2_set_link(bp);
2826         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2827                 bnx2_set_remote_link(bp);
2828
2829         spin_unlock(&bp->phy_lock);
2830
2831 }
2832
2833 static inline u16
2834 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2835 {
2836         u16 cons;
2837
2838         /* Tell compiler that status block fields can change. */
2839         barrier();
2840         cons = *bnapi->hw_tx_cons_ptr;
2841         barrier();
2842         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2843                 cons++;
2844         return cons;
2845 }
2846
2847 static int
2848 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2849 {
2850         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2851         u16 hw_cons, sw_cons, sw_ring_cons;
2852         int tx_pkt = 0, index;
2853         unsigned int tx_bytes = 0;
2854         struct netdev_queue *txq;
2855
2856         index = (bnapi - bp->bnx2_napi);
2857         txq = netdev_get_tx_queue(bp->dev, index);
2858
2859         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2860         sw_cons = txr->tx_cons;
2861
2862         while (sw_cons != hw_cons) {
2863                 struct bnx2_sw_tx_bd *tx_buf;
2864                 struct sk_buff *skb;
2865                 int i, last;
2866
2867                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2868
2869                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2870                 skb = tx_buf->skb;
2871
2872                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2873                 prefetch(&skb->end);
2874
2875                 /* partial BD completions possible with TSO packets */
2876                 if (tx_buf->is_gso) {
2877                         u16 last_idx, last_ring_idx;
2878
2879                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2880                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2881                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2882                                 last_idx++;
2883                         }
2884                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2885                                 break;
2886                         }
2887                 }
2888
2889                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2890                         skb_headlen(skb), PCI_DMA_TODEVICE);
2891
2892                 tx_buf->skb = NULL;
2893                 last = tx_buf->nr_frags;
2894
2895                 for (i = 0; i < last; i++) {
2896                         struct bnx2_sw_tx_bd *tx_buf;
2897
2898                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2899
2900                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2901                         dma_unmap_page(&bp->pdev->dev,
2902                                 dma_unmap_addr(tx_buf, mapping),
2903                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2904                                 PCI_DMA_TODEVICE);
2905                 }
2906
2907                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2908
2909                 tx_bytes += skb->len;
2910                 dev_kfree_skb_any(skb);
2911                 tx_pkt++;
2912                 if (tx_pkt == budget)
2913                         break;
2914
2915                 if (hw_cons == sw_cons)
2916                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2917         }
2918
2919         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2920         txr->hw_tx_cons = hw_cons;
2921         txr->tx_cons = sw_cons;
2922
2923         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2924          * before checking for netif_tx_queue_stopped().  Without the
2925          * memory barrier, there is a small possibility that bnx2_start_xmit()
2926          * will miss it and cause the queue to be stopped forever.
2927          */
2928         smp_mb();
2929
2930         if (unlikely(netif_tx_queue_stopped(txq)) &&
2931                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2932                 __netif_tx_lock(txq, smp_processor_id());
2933                 if ((netif_tx_queue_stopped(txq)) &&
2934                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2935                         netif_tx_wake_queue(txq);
2936                 __netif_tx_unlock(txq);
2937         }
2938
2939         return tx_pkt;
2940 }
2941
2942 static void
2943 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2944                         struct sk_buff *skb, int count)
2945 {
2946         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2947         struct bnx2_rx_bd *cons_bd, *prod_bd;
2948         int i;
2949         u16 hw_prod, prod;
2950         u16 cons = rxr->rx_pg_cons;
2951
2952         cons_rx_pg = &rxr->rx_pg_ring[cons];
2953
2954         /* The caller was unable to allocate a new page to replace the
2955          * last one in the frags array, so we need to recycle that page
2956          * and then free the skb.
2957          */
2958         if (skb) {
2959                 struct page *page;
2960                 struct skb_shared_info *shinfo;
2961
2962                 shinfo = skb_shinfo(skb);
2963                 shinfo->nr_frags--;
2964                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2965                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2966
2967                 cons_rx_pg->page = page;
2968                 dev_kfree_skb(skb);
2969         }
2970
2971         hw_prod = rxr->rx_pg_prod;
2972
2973         for (i = 0; i < count; i++) {
2974                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2975
2976                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2977                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2978                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2979                                                 [BNX2_RX_IDX(cons)];
2980                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2981                                                 [BNX2_RX_IDX(prod)];
2982
2983                 if (prod != cons) {
2984                         prod_rx_pg->page = cons_rx_pg->page;
2985                         cons_rx_pg->page = NULL;
2986                         dma_unmap_addr_set(prod_rx_pg, mapping,
2987                                 dma_unmap_addr(cons_rx_pg, mapping));
2988
2989                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2990                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2991
2992                 }
2993                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2994                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2995         }
2996         rxr->rx_pg_prod = hw_prod;
2997         rxr->rx_pg_cons = cons;
2998 }
2999
3000 static inline void
3001 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3002                    u8 *data, u16 cons, u16 prod)
3003 {
3004         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3005         struct bnx2_rx_bd *cons_bd, *prod_bd;
3006
3007         cons_rx_buf = &rxr->rx_buf_ring[cons];
3008         prod_rx_buf = &rxr->rx_buf_ring[prod];
3009
3010         dma_sync_single_for_device(&bp->pdev->dev,
3011                 dma_unmap_addr(cons_rx_buf, mapping),
3012                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3013
3014         rxr->rx_prod_bseq += bp->rx_buf_use_size;
3015
3016         prod_rx_buf->data = data;
3017
3018         if (cons == prod)
3019                 return;
3020
3021         dma_unmap_addr_set(prod_rx_buf, mapping,
3022                         dma_unmap_addr(cons_rx_buf, mapping));
3023
3024         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3025         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3026         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3027         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3028 }
3029
3030 static struct sk_buff *
3031 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3032             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3033             u32 ring_idx)
3034 {
3035         int err;
3036         u16 prod = ring_idx & 0xffff;
3037         struct sk_buff *skb;
3038
3039         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3040         if (unlikely(err)) {
3041                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3042 error:
3043                 if (hdr_len) {
3044                         unsigned int raw_len = len + 4;
3045                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3046
3047                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3048                 }
3049                 return NULL;
3050         }
3051
3052         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3053                          PCI_DMA_FROMDEVICE);
3054         skb = build_skb(data, 0);
3055         if (!skb) {
3056                 kfree(data);
3057                 goto error;
3058         }
3059         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3060         if (hdr_len == 0) {
3061                 skb_put(skb, len);
3062                 return skb;
3063         } else {
3064                 unsigned int i, frag_len, frag_size, pages;
3065                 struct bnx2_sw_pg *rx_pg;
3066                 u16 pg_cons = rxr->rx_pg_cons;
3067                 u16 pg_prod = rxr->rx_pg_prod;
3068
3069                 frag_size = len + 4 - hdr_len;
3070                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3071                 skb_put(skb, hdr_len);
3072
3073                 for (i = 0; i < pages; i++) {
3074                         dma_addr_t mapping_old;
3075
3076                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3077                         if (unlikely(frag_len <= 4)) {
3078                                 unsigned int tail = 4 - frag_len;
3079
3080                                 rxr->rx_pg_cons = pg_cons;
3081                                 rxr->rx_pg_prod = pg_prod;
3082                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3083                                                         pages - i);
3084                                 skb->len -= tail;
3085                                 if (i == 0) {
3086                                         skb->tail -= tail;
3087                                 } else {
3088                                         skb_frag_t *frag =
3089                                                 &skb_shinfo(skb)->frags[i - 1];
3090                                         skb_frag_size_sub(frag, tail);
3091                                         skb->data_len -= tail;
3092                                 }
3093                                 return skb;
3094                         }
3095                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3096
3097                         /* Don't unmap yet.  If we're unable to allocate a new
3098                          * page, we need to recycle the page and the DMA addr.
3099                          */
3100                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3101                         if (i == pages - 1)
3102                                 frag_len -= 4;
3103
3104                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3105                         rx_pg->page = NULL;
3106
3107                         err = bnx2_alloc_rx_page(bp, rxr,
3108                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3109                                                  GFP_ATOMIC);
3110                         if (unlikely(err)) {
3111                                 rxr->rx_pg_cons = pg_cons;
3112                                 rxr->rx_pg_prod = pg_prod;
3113                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3114                                                         pages - i);
3115                                 return NULL;
3116                         }
3117
3118                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3119                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3120
3121                         frag_size -= frag_len;
3122                         skb->data_len += frag_len;
3123                         skb->truesize += PAGE_SIZE;
3124                         skb->len += frag_len;
3125
3126                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3127                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3128                 }
3129                 rxr->rx_pg_prod = pg_prod;
3130                 rxr->rx_pg_cons = pg_cons;
3131         }
3132         return skb;
3133 }
3134
3135 static inline u16
3136 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3137 {
3138         u16 cons;
3139
3140         /* Tell compiler that status block fields can change. */
3141         barrier();
3142         cons = *bnapi->hw_rx_cons_ptr;
3143         barrier();
3144         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3145                 cons++;
3146         return cons;
3147 }
3148
3149 static int
3150 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3151 {
3152         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3153         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3154         struct l2_fhdr *rx_hdr;
3155         int rx_pkt = 0, pg_ring_used = 0;
3156
3157         if (budget <= 0)
3158                 return rx_pkt;
3159
3160         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3161         sw_cons = rxr->rx_cons;
3162         sw_prod = rxr->rx_prod;
3163
3164         /* Memory barrier necessary as speculative reads of the rx
3165          * buffer can be ahead of the index in the status block
3166          */
3167         rmb();
3168         while (sw_cons != hw_cons) {
3169                 unsigned int len, hdr_len;
3170                 u32 status;
3171                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3172                 struct sk_buff *skb;
3173                 dma_addr_t dma_addr;
3174                 u8 *data;
3175                 u16 next_ring_idx;
3176
3177                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3178                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3179
3180                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3181                 data = rx_buf->data;
3182                 rx_buf->data = NULL;
3183
3184                 rx_hdr = get_l2_fhdr(data);
3185                 prefetch(rx_hdr);
3186
3187                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3188
3189                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3190                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3191                         PCI_DMA_FROMDEVICE);
3192
3193                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3194                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3195                 prefetch(get_l2_fhdr(next_rx_buf->data));
3196
3197                 len = rx_hdr->l2_fhdr_pkt_len;
3198                 status = rx_hdr->l2_fhdr_status;
3199
3200                 hdr_len = 0;
3201                 if (status & L2_FHDR_STATUS_SPLIT) {
3202                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3203                         pg_ring_used = 1;
3204                 } else if (len > bp->rx_jumbo_thresh) {
3205                         hdr_len = bp->rx_jumbo_thresh;
3206                         pg_ring_used = 1;
3207                 }
3208
3209                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3210                                        L2_FHDR_ERRORS_PHY_DECODE |
3211                                        L2_FHDR_ERRORS_ALIGNMENT |
3212                                        L2_FHDR_ERRORS_TOO_SHORT |
3213                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3214
3215                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3216                                           sw_ring_prod);
3217                         if (pg_ring_used) {
3218                                 int pages;
3219
3220                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3221
3222                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3223                         }
3224                         goto next_rx;
3225                 }
3226
3227                 len -= 4;
3228
3229                 if (len <= bp->rx_copy_thresh) {
3230                         skb = netdev_alloc_skb(bp->dev, len + 6);
3231                         if (skb == NULL) {
3232                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3233                                                   sw_ring_prod);
3234                                 goto next_rx;
3235                         }
3236
3237                         /* aligned copy */
3238                         memcpy(skb->data,
3239                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3240                                len + 6);
3241                         skb_reserve(skb, 6);
3242                         skb_put(skb, len);
3243
3244                         bnx2_reuse_rx_data(bp, rxr, data,
3245                                 sw_ring_cons, sw_ring_prod);
3246
3247                 } else {
3248                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3249                                           (sw_ring_cons << 16) | sw_ring_prod);
3250                         if (!skb)
3251                                 goto next_rx;
3252                 }
3253                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3254                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3255                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3256
3257                 skb->protocol = eth_type_trans(skb, bp->dev);
3258
3259                 if (len > (bp->dev->mtu + ETH_HLEN) &&
3260                     skb->protocol != htons(0x8100) &&
3261                     skb->protocol != htons(ETH_P_8021AD)) {
3262
3263                         dev_kfree_skb(skb);
3264                         goto next_rx;
3265
3266                 }
3267
3268                 skb_checksum_none_assert(skb);
3269                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3270                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3271                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3272
3273                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3274                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3275                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3276                 }
3277                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3278                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3279                      L2_FHDR_STATUS_USE_RXHASH))
3280                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3281                                      PKT_HASH_TYPE_L3);
3282
3283                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3284                 napi_gro_receive(&bnapi->napi, skb);
3285                 rx_pkt++;
3286
3287 next_rx:
3288                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3289                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3290
3291                 if ((rx_pkt == budget))
3292                         break;
3293
3294                 /* Refresh hw_cons to see if there is new work */
3295                 if (sw_cons == hw_cons) {
3296                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3297                         rmb();
3298                 }
3299         }
3300         rxr->rx_cons = sw_cons;
3301         rxr->rx_prod = sw_prod;
3302
3303         if (pg_ring_used)
3304                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3305
3306         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3307
3308         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3309
3310         mmiowb();
3311
3312         return rx_pkt;
3313
3314 }
3315
3316 /* MSI ISR - The only difference between this and the INTx ISR
3317  * is that the MSI interrupt is always serviced.
3318  */
3319 static irqreturn_t
3320 bnx2_msi(int irq, void *dev_instance)
3321 {
3322         struct bnx2_napi *bnapi = dev_instance;
3323         struct bnx2 *bp = bnapi->bp;
3324
3325         prefetch(bnapi->status_blk.msi);
3326         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3327                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3328                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3329
3330         /* Return here if interrupt is disabled. */
3331         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3332                 return IRQ_HANDLED;
3333
3334         napi_schedule(&bnapi->napi);
3335
3336         return IRQ_HANDLED;
3337 }
3338
3339 static irqreturn_t
3340 bnx2_msi_1shot(int irq, void *dev_instance)
3341 {
3342         struct bnx2_napi *bnapi = dev_instance;
3343         struct bnx2 *bp = bnapi->bp;
3344
3345         prefetch(bnapi->status_blk.msi);
3346
3347         /* Return here if interrupt is disabled. */
3348         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3349                 return IRQ_HANDLED;
3350
3351         napi_schedule(&bnapi->napi);
3352
3353         return IRQ_HANDLED;
3354 }
3355
3356 static irqreturn_t
3357 bnx2_interrupt(int irq, void *dev_instance)
3358 {
3359         struct bnx2_napi *bnapi = dev_instance;
3360         struct bnx2 *bp = bnapi->bp;
3361         struct status_block *sblk = bnapi->status_blk.msi;
3362
3363         /* When using INTx, it is possible for the interrupt to arrive
3364          * at the CPU before the status block posted prior to the
3365          * interrupt. Reading a register will flush the status block.
3366          * When using MSI, the MSI message will always complete after
3367          * the status block write.
3368          */
3369         if ((sblk->status_idx == bnapi->last_status_idx) &&
3370             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3371              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3372                 return IRQ_NONE;
3373
3374         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3375                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3376                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3377
3378         /* Read back to deassert IRQ immediately to avoid too many
3379          * spurious interrupts.
3380          */
3381         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3382
3383         /* Return here if interrupt is shared and is disabled. */
3384         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3385                 return IRQ_HANDLED;
3386
3387         if (napi_schedule_prep(&bnapi->napi)) {
3388                 bnapi->last_status_idx = sblk->status_idx;
3389                 __napi_schedule(&bnapi->napi);
3390         }
3391
3392         return IRQ_HANDLED;
3393 }
3394
3395 static inline int
3396 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3397 {
3398         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3399         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3400
3401         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3402             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3403                 return 1;
3404         return 0;
3405 }
3406
3407 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3408                                  STATUS_ATTN_BITS_TIMER_ABORT)
3409
3410 static inline int
3411 bnx2_has_work(struct bnx2_napi *bnapi)
3412 {
3413         struct status_block *sblk = bnapi->status_blk.msi;
3414
3415         if (bnx2_has_fast_work(bnapi))
3416                 return 1;
3417
3418 #ifdef BCM_CNIC
3419         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3420                 return 1;
3421 #endif
3422
3423         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3424             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3425                 return 1;
3426
3427         return 0;
3428 }
3429
3430 static void
3431 bnx2_chk_missed_msi(struct bnx2 *bp)
3432 {
3433         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3434         u32 msi_ctrl;
3435
3436         if (bnx2_has_work(bnapi)) {
3437                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3438                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3439                         return;
3440
3441                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3442                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3443                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3444                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3445                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3446                 }
3447         }
3448
3449         bp->idle_chk_status_idx = bnapi->last_status_idx;
3450 }
3451
3452 #ifdef BCM_CNIC
3453 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3454 {
3455         struct cnic_ops *c_ops;
3456
3457         if (!bnapi->cnic_present)
3458                 return;
3459
3460         rcu_read_lock();
3461         c_ops = rcu_dereference(bp->cnic_ops);
3462         if (c_ops)
3463                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3464                                                       bnapi->status_blk.msi);
3465         rcu_read_unlock();
3466 }
3467 #endif
3468
3469 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3470 {
3471         struct status_block *sblk = bnapi->status_blk.msi;
3472         u32 status_attn_bits = sblk->status_attn_bits;
3473         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3474
3475         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3476             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3477
3478                 bnx2_phy_int(bp, bnapi);
3479
3480                 /* This is needed to take care of transient status
3481                  * during link changes.
3482                  */
3483                 BNX2_WR(bp, BNX2_HC_COMMAND,
3484                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3485                 BNX2_RD(bp, BNX2_HC_COMMAND);
3486         }
3487 }
3488
3489 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3490                           int work_done, int budget)
3491 {
3492         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3493         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3494
3495         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3496                 bnx2_tx_int(bp, bnapi, 0);
3497
3498         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3499                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3500
3501         return work_done;
3502 }
3503
3504 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3505 {
3506         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3507         struct bnx2 *bp = bnapi->bp;
3508         int work_done = 0;
3509         struct status_block_msix *sblk = bnapi->status_blk.msix;
3510
3511         while (1) {
3512                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3513                 if (unlikely(work_done >= budget))
3514                         break;
3515
3516                 bnapi->last_status_idx = sblk->status_idx;
3517                 /* status idx must be read before checking for more work. */
3518                 rmb();
3519                 if (likely(!bnx2_has_fast_work(bnapi))) {
3520
3521                         napi_complete(napi);
3522                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3523                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524                                 bnapi->last_status_idx);
3525                         break;
3526                 }
3527         }
3528         return work_done;
3529 }
3530
3531 static int bnx2_poll(struct napi_struct *napi, int budget)
3532 {
3533         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3534         struct bnx2 *bp = bnapi->bp;
3535         int work_done = 0;
3536         struct status_block *sblk = bnapi->status_blk.msi;
3537
3538         while (1) {
3539                 bnx2_poll_link(bp, bnapi);
3540
3541                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3542
3543 #ifdef BCM_CNIC
3544                 bnx2_poll_cnic(bp, bnapi);
3545 #endif
3546
3547                 /* bnapi->last_status_idx is used below to tell the hw how
3548                  * much work has been processed, so we must read it before
3549                  * checking for more work.
3550                  */
3551                 bnapi->last_status_idx = sblk->status_idx;
3552
3553                 if (unlikely(work_done >= budget))
3554                         break;
3555
3556                 rmb();
3557                 if (likely(!bnx2_has_work(bnapi))) {
3558                         napi_complete(napi);
3559                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3560                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562                                         bnapi->last_status_idx);
3563                                 break;
3564                         }
3565                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3566                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3567                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3568                                 bnapi->last_status_idx);
3569
3570                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3571                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3572                                 bnapi->last_status_idx);
3573                         break;
3574                 }
3575         }
3576
3577         return work_done;
3578 }
3579
3580 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3581  * from set_multicast.
3582  */
3583 static void
3584 bnx2_set_rx_mode(struct net_device *dev)
3585 {
3586         struct bnx2 *bp = netdev_priv(dev);
3587         u32 rx_mode, sort_mode;
3588         struct netdev_hw_addr *ha;
3589         int i;
3590
3591         if (!netif_running(dev))
3592                 return;
3593
3594         spin_lock_bh(&bp->phy_lock);
3595
3596         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3597                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3598         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3599         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3600              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3601                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3602         if (dev->flags & IFF_PROMISC) {
3603                 /* Promiscuous mode. */
3604                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3605                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3606                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3607         }
3608         else if (dev->flags & IFF_ALLMULTI) {
3609                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3610                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3611                                 0xffffffff);
3612                 }
3613                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3614         }
3615         else {
3616                 /* Accept one or more multicast(s). */
3617                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3618                 u32 regidx;
3619                 u32 bit;
3620                 u32 crc;
3621
3622                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3623
3624                 netdev_for_each_mc_addr(ha, dev) {
3625                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3626                         bit = crc & 0xff;
3627                         regidx = (bit & 0xe0) >> 5;
3628                         bit &= 0x1f;
3629                         mc_filter[regidx] |= (1 << bit);
3630                 }
3631
3632                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3633                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3634                                 mc_filter[i]);
3635                 }
3636
3637                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3638         }
3639
3640         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3641                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3642                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3643                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3644         } else if (!(dev->flags & IFF_PROMISC)) {
3645                 /* Add all entries into to the match filter list */
3646                 i = 0;
3647                 netdev_for_each_uc_addr(ha, dev) {
3648                         bnx2_set_mac_addr(bp, ha->addr,
3649                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3650                         sort_mode |= (1 <<
3651                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3652                         i++;
3653                 }
3654
3655         }
3656
3657         if (rx_mode != bp->rx_mode) {
3658                 bp->rx_mode = rx_mode;
3659                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3660         }
3661
3662         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3663         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3664         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3665
3666         spin_unlock_bh(&bp->phy_lock);
3667 }
3668
3669 static int
3670 check_fw_section(const struct firmware *fw,
3671                  const struct bnx2_fw_file_section *section,
3672                  u32 alignment, bool non_empty)
3673 {
3674         u32 offset = be32_to_cpu(section->offset);
3675         u32 len = be32_to_cpu(section->len);
3676
3677         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3678                 return -EINVAL;
3679         if ((non_empty && len == 0) || len > fw->size - offset ||
3680             len & (alignment - 1))
3681                 return -EINVAL;
3682         return 0;
3683 }
3684
3685 static int
3686 check_mips_fw_entry(const struct firmware *fw,
3687                     const struct bnx2_mips_fw_file_entry *entry)
3688 {
3689         if (check_fw_section(fw, &entry->text, 4, true) ||
3690             check_fw_section(fw, &entry->data, 4, false) ||
3691             check_fw_section(fw, &entry->rodata, 4, false))
3692                 return -EINVAL;
3693         return 0;
3694 }
3695
3696 static void bnx2_release_firmware(struct bnx2 *bp)
3697 {
3698         if (bp->rv2p_firmware) {
3699                 release_firmware(bp->mips_firmware);
3700                 release_firmware(bp->rv2p_firmware);
3701                 bp->rv2p_firmware = NULL;
3702         }
3703 }
3704
3705 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3706 {
3707         const char *mips_fw_file, *rv2p_fw_file;
3708         const struct bnx2_mips_fw_file *mips_fw;
3709         const struct bnx2_rv2p_fw_file *rv2p_fw;
3710         int rc;
3711
3712         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3713                 mips_fw_file = FW_MIPS_FILE_09;
3714                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3715                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3716                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3717                 else
3718                         rv2p_fw_file = FW_RV2P_FILE_09;
3719         } else {
3720                 mips_fw_file = FW_MIPS_FILE_06;
3721                 rv2p_fw_file = FW_RV2P_FILE_06;
3722         }
3723
3724         rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3725         if (rc) {
3726                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3727                 goto out;
3728         }
3729
3730         rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3731         if (rc) {
3732                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3733                 goto err_release_mips_firmware;
3734         }
3735         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3736         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3737         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3738             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3739             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3740             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3741             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3742             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3743                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3744                 rc = -EINVAL;
3745                 goto err_release_firmware;
3746         }
3747         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3748             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3749             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3750                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3751                 rc = -EINVAL;
3752                 goto err_release_firmware;
3753         }
3754 out:
3755         return rc;
3756
3757 err_release_firmware:
3758         release_firmware(bp->rv2p_firmware);
3759         bp->rv2p_firmware = NULL;
3760 err_release_mips_firmware:
3761         release_firmware(bp->mips_firmware);
3762         goto out;
3763 }
3764
3765 static int bnx2_request_firmware(struct bnx2 *bp)
3766 {
3767         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3768 }
3769
3770 static u32
3771 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3772 {
3773         switch (idx) {
3774         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3775                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3776                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3777                 break;
3778         }
3779         return rv2p_code;
3780 }
3781
3782 static int
3783 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3784              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3785 {
3786         u32 rv2p_code_len, file_offset;
3787         __be32 *rv2p_code;
3788         int i;
3789         u32 val, cmd, addr;
3790
3791         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3792         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3793
3794         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3795
3796         if (rv2p_proc == RV2P_PROC1) {
3797                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3798                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3799         } else {
3800                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3801                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3802         }
3803
3804         for (i = 0; i < rv2p_code_len; i += 8) {
3805                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3806                 rv2p_code++;
3807                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3808                 rv2p_code++;
3809
3810                 val = (i / 8) | cmd;
3811                 BNX2_WR(bp, addr, val);
3812         }
3813
3814         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3815         for (i = 0; i < 8; i++) {
3816                 u32 loc, code;
3817
3818                 loc = be32_to_cpu(fw_entry->fixup[i]);
3819                 if (loc && ((loc * 4) < rv2p_code_len)) {
3820                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3821                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3822                         code = be32_to_cpu(*(rv2p_code + loc));
3823                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3824                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3825
3826                         val = (loc / 2) | cmd;
3827                         BNX2_WR(bp, addr, val);
3828                 }
3829         }
3830
3831         /* Reset the processor, un-stall is done later. */
3832         if (rv2p_proc == RV2P_PROC1) {
3833                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3834         }
3835         else {
3836                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3837         }
3838
3839         return 0;
3840 }
3841
3842 static int
3843 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3844             const struct bnx2_mips_fw_file_entry *fw_entry)
3845 {
3846         u32 addr, len, file_offset;
3847         __be32 *data;
3848         u32 offset;
3849         u32 val;
3850
3851         /* Halt the CPU. */
3852         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3853         val |= cpu_reg->mode_value_halt;
3854         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3855         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3856
3857         /* Load the Text area. */
3858         addr = be32_to_cpu(fw_entry->text.addr);
3859         len = be32_to_cpu(fw_entry->text.len);
3860         file_offset = be32_to_cpu(fw_entry->text.offset);
3861         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3862
3863         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3864         if (len) {
3865                 int j;
3866
3867                 for (j = 0; j < (len / 4); j++, offset += 4)
3868                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3869         }
3870
3871         /* Load the Data area. */
3872         addr = be32_to_cpu(fw_entry->data.addr);
3873         len = be32_to_cpu(fw_entry->data.len);
3874         file_offset = be32_to_cpu(fw_entry->data.offset);
3875         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3876
3877         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3878         if (len) {
3879                 int j;
3880
3881                 for (j = 0; j < (len / 4); j++, offset += 4)
3882                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3883         }
3884
3885         /* Load the Read-Only area. */
3886         addr = be32_to_cpu(fw_entry->rodata.addr);
3887         len = be32_to_cpu(fw_entry->rodata.len);
3888         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3889         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3890
3891         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3892         if (len) {
3893                 int j;
3894
3895                 for (j = 0; j < (len / 4); j++, offset += 4)
3896                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3897         }
3898
3899         /* Clear the pre-fetch instruction. */
3900         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3901
3902         val = be32_to_cpu(fw_entry->start_addr);
3903         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3904
3905         /* Start the CPU. */
3906         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3907         val &= ~cpu_reg->mode_value_halt;
3908         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3909         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3910
3911         return 0;
3912 }
3913
3914 static int
3915 bnx2_init_cpus(struct bnx2 *bp)
3916 {
3917         const struct bnx2_mips_fw_file *mips_fw =
3918                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3919         const struct bnx2_rv2p_fw_file *rv2p_fw =
3920                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3921         int rc;
3922
3923         /* Initialize the RV2P processor. */
3924         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3925         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3926
3927         /* Initialize the RX Processor. */
3928         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3929         if (rc)
3930                 goto init_cpu_err;
3931
3932         /* Initialize the TX Processor. */
3933         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3934         if (rc)
3935                 goto init_cpu_err;
3936
3937         /* Initialize the TX Patch-up Processor. */
3938         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3939         if (rc)
3940                 goto init_cpu_err;
3941
3942         /* Initialize the Completion Processor. */
3943         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3944         if (rc)
3945                 goto init_cpu_err;
3946
3947         /* Initialize the Command Processor. */
3948         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3949
3950 init_cpu_err:
3951         return rc;
3952 }
3953
3954 static void
3955 bnx2_setup_wol(struct bnx2 *bp)
3956 {
3957         int i;
3958         u32 val, wol_msg;
3959
3960         if (bp->wol) {
3961                 u32 advertising;
3962                 u8 autoneg;
3963
3964                 autoneg = bp->autoneg;
3965                 advertising = bp->advertising;
3966
3967                 if (bp->phy_port == PORT_TP) {
3968                         bp->autoneg = AUTONEG_SPEED;
3969                         bp->advertising = ADVERTISED_10baseT_Half |
3970                                 ADVERTISED_10baseT_Full |
3971                                 ADVERTISED_100baseT_Half |
3972                                 ADVERTISED_100baseT_Full |
3973                                 ADVERTISED_Autoneg;
3974                 }
3975
3976                 spin_lock_bh(&bp->phy_lock);
3977                 bnx2_setup_phy(bp, bp->phy_port);
3978                 spin_unlock_bh(&bp->phy_lock);
3979
3980                 bp->autoneg = autoneg;
3981                 bp->advertising = advertising;
3982
3983                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3984
3985                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3986
3987                 /* Enable port mode. */
3988                 val &= ~BNX2_EMAC_MODE_PORT;
3989                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3990                        BNX2_EMAC_MODE_ACPI_RCVD |
3991                        BNX2_EMAC_MODE_MPKT;
3992                 if (bp->phy_port == PORT_TP) {
3993                         val |= BNX2_EMAC_MODE_PORT_MII;
3994                 } else {
3995                         val |= BNX2_EMAC_MODE_PORT_GMII;
3996                         if (bp->line_speed == SPEED_2500)
3997                                 val |= BNX2_EMAC_MODE_25G_MODE;
3998                 }
3999
4000                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4001
4002                 /* receive all multicast */
4003                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4004                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4005                                 0xffffffff);
4006                 }
4007                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4008
4009                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4010                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4011                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4012                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4013
4014                 /* Need to enable EMAC and RPM for WOL. */
4015                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4016                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4017                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4018                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4019
4020                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4021                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4022                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4023
4024                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4025         } else {
4026                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4027         }
4028
4029         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4030                 u32 val;
4031
4032                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4033                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4034                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4035                         return;
4036                 }
4037                 /* Tell firmware not to power down the PHY yet, otherwise
4038                  * the chip will take a long time to respond to MMIO reads.
4039                  */
4040                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4041                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4042                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4043                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4044                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4045         }
4046
4047 }
4048
4049 static int
4050 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4051 {
4052         switch (state) {
4053         case PCI_D0: {
4054                 u32 val;
4055
4056                 pci_enable_wake(bp->pdev, PCI_D0, false);
4057                 pci_set_power_state(bp->pdev, PCI_D0);
4058
4059                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4060                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4061                 val &= ~BNX2_EMAC_MODE_MPKT;
4062                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4063
4064                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4065                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4066                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4067                 break;
4068         }
4069         case PCI_D3hot: {
4070                 bnx2_setup_wol(bp);
4071                 pci_wake_from_d3(bp->pdev, bp->wol);
4072                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4073                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4074
4075                         if (bp->wol)
4076                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4077                         break;
4078
4079                 }
4080                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4081                         u32 val;
4082
4083                         /* Tell firmware not to power down the PHY yet,
4084                          * otherwise the other port may not respond to
4085                          * MMIO reads.
4086                          */
4087                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4088                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4089                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4090                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4091                 }
4092                 pci_set_power_state(bp->pdev, PCI_D3hot);
4093
4094                 /* No more memory access after this point until
4095                  * device is brought back to D0.
4096                  */
4097                 break;
4098         }
4099         default:
4100                 return -EINVAL;
4101         }
4102         return 0;
4103 }
4104
4105 static int
4106 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4107 {
4108         u32 val;
4109         int j;
4110
4111         /* Request access to the flash interface. */
4112         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4113         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4114                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4115                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4116                         break;
4117
4118                 udelay(5);
4119         }
4120
4121         if (j >= NVRAM_TIMEOUT_COUNT)
4122                 return -EBUSY;
4123
4124         return 0;
4125 }
4126
4127 static int
4128 bnx2_release_nvram_lock(struct bnx2 *bp)
4129 {
4130         int j;
4131         u32 val;
4132
4133         /* Relinquish nvram interface. */
4134         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4135
4136         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4137                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4138                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4139                         break;
4140
4141                 udelay(5);
4142         }
4143
4144         if (j >= NVRAM_TIMEOUT_COUNT)
4145                 return -EBUSY;
4146
4147         return 0;
4148 }
4149
4150
4151 static int
4152 bnx2_enable_nvram_write(struct bnx2 *bp)
4153 {
4154         u32 val;
4155
4156         val = BNX2_RD(bp, BNX2_MISC_CFG);
4157         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4158
4159         if (bp->flash_info->flags & BNX2_NV_WREN) {
4160                 int j;
4161
4162                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4163                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4164                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4165
4166                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4167                         udelay(5);
4168
4169                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4170                         if (val & BNX2_NVM_COMMAND_DONE)
4171                                 break;
4172                 }
4173
4174                 if (j >= NVRAM_TIMEOUT_COUNT)
4175                         return -EBUSY;
4176         }
4177         return 0;
4178 }
4179
4180 static void
4181 bnx2_disable_nvram_write(struct bnx2 *bp)
4182 {
4183         u32 val;
4184
4185         val = BNX2_RD(bp, BNX2_MISC_CFG);
4186         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4187 }
4188
4189
4190 static void
4191 bnx2_enable_nvram_access(struct bnx2 *bp)
4192 {
4193         u32 val;
4194
4195         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4196         /* Enable both bits, even on read. */
4197         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4198                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4199 }
4200
4201 static void
4202 bnx2_disable_nvram_access(struct bnx2 *bp)
4203 {
4204         u32 val;
4205
4206         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4207         /* Disable both bits, even after read. */
4208         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4209                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4210                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4211 }
4212
4213 static int
4214 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4215 {
4216         u32 cmd;
4217         int j;
4218
4219         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4220                 /* Buffered flash, no erase needed */
4221                 return 0;
4222
4223         /* Build an erase command */
4224         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4225               BNX2_NVM_COMMAND_DOIT;
4226
4227         /* Need to clear DONE bit separately. */
4228         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4229
4230         /* Address of the NVRAM to read from. */
4231         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4232
4233         /* Issue an erase command. */
4234         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4235
4236         /* Wait for completion. */
4237         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4238                 u32 val;
4239
4240                 udelay(5);
4241
4242                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4243                 if (val & BNX2_NVM_COMMAND_DONE)
4244                         break;
4245         }
4246
4247         if (j >= NVRAM_TIMEOUT_COUNT)
4248                 return -EBUSY;
4249
4250         return 0;
4251 }
4252
4253 static int
4254 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4255 {
4256         u32 cmd;
4257         int j;
4258
4259         /* Build the command word. */
4260         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4261
4262         /* Calculate an offset of a buffered flash, not needed for 5709. */
4263         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4264                 offset = ((offset / bp->flash_info->page_size) <<
4265                            bp->flash_info->page_bits) +
4266                           (offset % bp->flash_info->page_size);
4267         }
4268
4269         /* Need to clear DONE bit separately. */
4270         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4271
4272         /* Address of the NVRAM to read from. */
4273         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4274
4275         /* Issue a read command. */
4276         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4277
4278         /* Wait for completion. */
4279         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4280                 u32 val;
4281
4282                 udelay(5);
4283
4284                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4285                 if (val & BNX2_NVM_COMMAND_DONE) {
4286                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4287                         memcpy(ret_val, &v, 4);
4288                         break;
4289                 }
4290         }
4291         if (j >= NVRAM_TIMEOUT_COUNT)
4292                 return -EBUSY;
4293
4294         return 0;
4295 }
4296
4297
4298 static int
4299 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4300 {
4301         u32 cmd;
4302         __be32 val32;
4303         int j;
4304
4305         /* Build the command word. */
4306         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4307
4308         /* Calculate an offset of a buffered flash, not needed for 5709. */
4309         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4310                 offset = ((offset / bp->flash_info->page_size) <<
4311                           bp->flash_info->page_bits) +
4312                          (offset % bp->flash_info->page_size);
4313         }
4314
4315         /* Need to clear DONE bit separately. */
4316         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4317
4318         memcpy(&val32, val, 4);
4319
4320         /* Write the data. */
4321         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4322
4323         /* Address of the NVRAM to write to. */
4324         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4325
4326         /* Issue the write command. */
4327         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4328
4329         /* Wait for completion. */
4330         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4331                 udelay(5);
4332
4333                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4334                         break;
4335         }
4336         if (j >= NVRAM_TIMEOUT_COUNT)
4337                 return -EBUSY;
4338
4339         return 0;
4340 }
4341
4342 static int
4343 bnx2_init_nvram(struct bnx2 *bp)
4344 {
4345         u32 val;
4346         int j, entry_count, rc = 0;
4347         const struct flash_spec *flash;
4348
4349         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4350                 bp->flash_info = &flash_5709;
4351                 goto get_flash_size;
4352         }
4353
4354         /* Determine the selected interface. */
4355         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4356
4357         entry_count = ARRAY_SIZE(flash_table);
4358
4359         if (val & 0x40000000) {
4360
4361                 /* Flash interface has been reconfigured */
4362                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4363                      j++, flash++) {
4364                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4365                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4366                                 bp->flash_info = flash;
4367                                 break;
4368                         }
4369                 }
4370         }
4371         else {
4372                 u32 mask;
4373                 /* Not yet been reconfigured */
4374
4375                 if (val & (1 << 23))
4376                         mask = FLASH_BACKUP_STRAP_MASK;
4377                 else
4378                         mask = FLASH_STRAP_MASK;
4379
4380                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4381                         j++, flash++) {
4382
4383                         if ((val & mask) == (flash->strapping & mask)) {
4384                                 bp->flash_info = flash;
4385
4386                                 /* Request access to the flash interface. */
4387                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4388                                         return rc;
4389
4390                                 /* Enable access to flash interface */
4391                                 bnx2_enable_nvram_access(bp);
4392
4393                                 /* Reconfigure the flash interface */
4394                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4395                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4396                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4397                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4398
4399                                 /* Disable access to flash interface */
4400                                 bnx2_disable_nvram_access(bp);
4401                                 bnx2_release_nvram_lock(bp);
4402
4403                                 break;
4404                         }
4405                 }
4406         } /* if (val & 0x40000000) */
4407
4408         if (j == entry_count) {
4409                 bp->flash_info = NULL;
4410                 pr_alert("Unknown flash/EEPROM type\n");
4411                 return -ENODEV;
4412         }
4413
4414 get_flash_size:
4415         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4416         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4417         if (val)
4418                 bp->flash_size = val;
4419         else
4420                 bp->flash_size = bp->flash_info->total_size;
4421
4422         return rc;
4423 }
4424
4425 static int
4426 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4427                 int buf_size)
4428 {
4429         int rc = 0;
4430         u32 cmd_flags, offset32, len32, extra;
4431
4432         if (buf_size == 0)
4433                 return 0;
4434
4435         /* Request access to the flash interface. */
4436         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4437                 return rc;
4438
4439         /* Enable access to flash interface */
4440         bnx2_enable_nvram_access(bp);
4441
4442         len32 = buf_size;
4443         offset32 = offset;
4444         extra = 0;
4445
4446         cmd_flags = 0;
4447
4448         if (offset32 & 3) {
4449                 u8 buf[4];
4450                 u32 pre_len;
4451
4452                 offset32 &= ~3;
4453                 pre_len = 4 - (offset & 3);
4454
4455                 if (pre_len >= len32) {
4456                         pre_len = len32;
4457                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4458                                     BNX2_NVM_COMMAND_LAST;
4459                 }
4460                 else {
4461                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4462                 }
4463
4464                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4465
4466                 if (rc)
4467                         return rc;
4468
4469                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4470
4471                 offset32 += 4;
4472                 ret_buf += pre_len;
4473                 len32 -= pre_len;
4474         }
4475         if (len32 & 3) {
4476                 extra = 4 - (len32 & 3);
4477                 len32 = (len32 + 4) & ~3;
4478         }
4479
4480         if (len32 == 4) {
4481                 u8 buf[4];
4482
4483                 if (cmd_flags)
4484                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4485                 else
4486                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4487                                     BNX2_NVM_COMMAND_LAST;
4488
4489                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4490
4491                 memcpy(ret_buf, buf, 4 - extra);
4492         }
4493         else if (len32 > 0) {
4494                 u8 buf[4];
4495
4496                 /* Read the first word. */
4497                 if (cmd_flags)
4498                         cmd_flags = 0;
4499                 else
4500                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4501
4502                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4503
4504                 /* Advance to the next dword. */
4505                 offset32 += 4;
4506                 ret_buf += 4;
4507                 len32 -= 4;
4508
4509                 while (len32 > 4 && rc == 0) {
4510                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4511
4512                         /* Advance to the next dword. */
4513                         offset32 += 4;
4514                         ret_buf += 4;
4515                         len32 -= 4;
4516                 }
4517
4518                 if (rc)
4519                         return rc;
4520
4521                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4522                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4523
4524                 memcpy(ret_buf, buf, 4 - extra);
4525         }
4526
4527         /* Disable access to flash interface */
4528         bnx2_disable_nvram_access(bp);
4529
4530         bnx2_release_nvram_lock(bp);
4531
4532         return rc;
4533 }
4534
4535 static int
4536 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4537                 int buf_size)
4538 {
4539         u32 written, offset32, len32;
4540         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4541         int rc = 0;
4542         int align_start, align_end;
4543
4544         buf = data_buf;
4545         offset32 = offset;
4546         len32 = buf_size;
4547         align_start = align_end = 0;
4548
4549         if ((align_start = (offset32 & 3))) {
4550                 offset32 &= ~3;
4551                 len32 += align_start;
4552                 if (len32 < 4)
4553                         len32 = 4;
4554                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4555                         return rc;
4556         }
4557
4558         if (len32 & 3) {
4559                 align_end = 4 - (len32 & 3);
4560                 len32 += align_end;
4561                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4562                         return rc;
4563         }
4564
4565         if (align_start || align_end) {
4566                 align_buf = kmalloc(len32, GFP_KERNEL);
4567                 if (align_buf == NULL)
4568                         return -ENOMEM;
4569                 if (align_start) {
4570                         memcpy(align_buf, start, 4);
4571                 }
4572                 if (align_end) {
4573                         memcpy(align_buf + len32 - 4, end, 4);
4574                 }
4575                 memcpy(align_buf + align_start, data_buf, buf_size);
4576                 buf = align_buf;
4577         }
4578
4579         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4580                 flash_buffer = kmalloc(264, GFP_KERNEL);
4581                 if (flash_buffer == NULL) {
4582                         rc = -ENOMEM;
4583                         goto nvram_write_end;
4584                 }
4585         }
4586
4587         written = 0;
4588         while ((written < len32) && (rc == 0)) {
4589                 u32 page_start, page_end, data_start, data_end;
4590                 u32 addr, cmd_flags;
4591                 int i;
4592
4593                 /* Find the page_start addr */
4594                 page_start = offset32 + written;
4595                 page_start -= (page_start % bp->flash_info->page_size);
4596                 /* Find the page_end addr */
4597                 page_end = page_start + bp->flash_info->page_size;
4598                 /* Find the data_start addr */
4599                 data_start = (written == 0) ? offset32 : page_start;
4600                 /* Find the data_end addr */
4601                 data_end = (page_end > offset32 + len32) ?
4602                         (offset32 + len32) : page_end;
4603
4604                 /* Request access to the flash interface. */
4605                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4606                         goto nvram_write_end;
4607
4608                 /* Enable access to flash interface */
4609                 bnx2_enable_nvram_access(bp);
4610
4611                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4612                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4613                         int j;
4614
4615                         /* Read the whole page into the buffer
4616                          * (non-buffer flash only) */
4617                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4618                                 if (j == (bp->flash_info->page_size - 4)) {
4619                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4620                                 }
4621                                 rc = bnx2_nvram_read_dword(bp,
4622                                         page_start + j,
4623                                         &flash_buffer[j],
4624                                         cmd_flags);
4625
4626                                 if (rc)
4627                                         goto nvram_write_end;
4628
4629                                 cmd_flags = 0;
4630                         }
4631                 }
4632
4633                 /* Enable writes to flash interface (unlock write-protect) */
4634                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4635                         goto nvram_write_end;
4636
4637                 /* Loop to write back the buffer data from page_start to
4638                  * data_start */
4639                 i = 0;
4640                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4641                         /* Erase the page */
4642                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4643                                 goto nvram_write_end;
4644
4645                         /* Re-enable the write again for the actual write */
4646                         bnx2_enable_nvram_write(bp);
4647
4648                         for (addr = page_start; addr < data_start;
4649                                 addr += 4, i += 4) {
4650
4651                                 rc = bnx2_nvram_write_dword(bp, addr,
4652                                         &flash_buffer[i], cmd_flags);
4653
4654                                 if (rc != 0)
4655                                         goto nvram_write_end;
4656
4657                                 cmd_flags = 0;
4658                         }
4659                 }
4660
4661                 /* Loop to write the new data from data_start to data_end */
4662                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4663                         if ((addr == page_end - 4) ||
4664                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4665                                  (addr == data_end - 4))) {
4666
4667                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4668                         }
4669                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4670                                 cmd_flags);
4671
4672                         if (rc != 0)
4673                                 goto nvram_write_end;
4674
4675                         cmd_flags = 0;
4676                         buf += 4;
4677                 }
4678
4679                 /* Loop to write back the buffer data from data_end
4680                  * to page_end */
4681                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4682                         for (addr = data_end; addr < page_end;
4683                                 addr += 4, i += 4) {
4684
4685                                 if (addr == page_end-4) {
4686                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4687                                 }
4688                                 rc = bnx2_nvram_write_dword(bp, addr,
4689                                         &flash_buffer[i], cmd_flags);
4690
4691                                 if (rc != 0)
4692                                         goto nvram_write_end;
4693
4694                                 cmd_flags = 0;
4695                         }
4696                 }
4697
4698                 /* Disable writes to flash interface (lock write-protect) */
4699                 bnx2_disable_nvram_write(bp);
4700
4701                 /* Disable access to flash interface */
4702                 bnx2_disable_nvram_access(bp);
4703                 bnx2_release_nvram_lock(bp);
4704
4705                 /* Increment written */
4706                 written += data_end - data_start;
4707         }
4708
4709 nvram_write_end:
4710         kfree(flash_buffer);
4711         kfree(align_buf);
4712         return rc;
4713 }
4714
4715 static void
4716 bnx2_init_fw_cap(struct bnx2 *bp)
4717 {
4718         u32 val, sig = 0;
4719
4720         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4721         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4722
4723         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4724                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4725
4726         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4727         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4728                 return;
4729
4730         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4731                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4732                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4733         }
4734
4735         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4736             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4737                 u32 link;
4738
4739                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4740
4741                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4742                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4743                         bp->phy_port = PORT_FIBRE;
4744                 else
4745                         bp->phy_port = PORT_TP;
4746
4747                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4748                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4749         }
4750
4751         if (netif_running(bp->dev) && sig)
4752                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4753 }
4754
4755 static void
4756 bnx2_setup_msix_tbl(struct bnx2 *bp)
4757 {
4758         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4759
4760         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4761         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4762 }
4763
4764 static void
4765 bnx2_wait_dma_complete(struct bnx2 *bp)
4766 {
4767         u32 val;
4768         int i;
4769
4770         /*
4771          * Wait for the current PCI transaction to complete before
4772          * issuing a reset.
4773          */
4774         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4775             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4776                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4777                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4778                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4779                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4780                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4781                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4782                 udelay(5);
4783         } else {  /* 5709 */
4784                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4785                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4786                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4787                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4788
4789                 for (i = 0; i < 100; i++) {
4790                         msleep(1);
4791                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4792                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4793                                 break;
4794                 }
4795         }
4796
4797         return;
4798 }
4799
4800
4801 static int
4802 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4803 {
4804         u32 val;
4805         int i, rc = 0;
4806         u8 old_port;
4807
4808         /* Wait for the current PCI transaction to complete before
4809          * issuing a reset. */
4810         bnx2_wait_dma_complete(bp);
4811
4812         /* Wait for the firmware to tell us it is ok to issue a reset. */
4813         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4814
4815         /* Deposit a driver reset signature so the firmware knows that
4816          * this is a soft reset. */
4817         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4818                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4819
4820         /* Do a dummy read to force the chip to complete all current transaction
4821          * before we issue a reset. */
4822         val = BNX2_RD(bp, BNX2_MISC_ID);
4823
4824         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4825                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4826                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4827                 udelay(5);
4828
4829                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4830                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4831
4832                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4833
4834         } else {
4835                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4836                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4837                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4838
4839                 /* Chip reset. */
4840                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4841
4842                 /* Reading back any register after chip reset will hang the
4843                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4844                  * of margin for write posting.
4845                  */
4846                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4847                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4848                         msleep(20);
4849
4850                 /* Reset takes approximate 30 usec */
4851                 for (i = 0; i < 10; i++) {
4852                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4853                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4854                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4855                                 break;
4856                         udelay(10);
4857                 }
4858
4859                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4860                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4861                         pr_err("Chip reset did not complete\n");
4862                         return -EBUSY;
4863                 }
4864         }
4865
4866         /* Make sure byte swapping is properly configured. */
4867         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4868         if (val != 0x01020304) {
4869                 pr_err("Chip not in correct endian mode\n");
4870                 return -ENODEV;
4871         }
4872
4873         /* Wait for the firmware to finish its initialization. */
4874         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4875         if (rc)
4876                 return rc;
4877
4878         spin_lock_bh(&bp->phy_lock);
4879         old_port = bp->phy_port;
4880         bnx2_init_fw_cap(bp);
4881         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4882             old_port != bp->phy_port)
4883                 bnx2_set_default_remote_link(bp);
4884         spin_unlock_bh(&bp->phy_lock);
4885
4886         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4887                 /* Adjust the voltage regular to two steps lower.  The default
4888                  * of this register is 0x0000000e. */
4889                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4890
4891                 /* Remove bad rbuf memory from the free pool. */
4892                 rc = bnx2_alloc_bad_rbuf(bp);
4893         }
4894
4895         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4896                 bnx2_setup_msix_tbl(bp);
4897                 /* Prevent MSIX table reads and write from timing out */
4898                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4899                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4900         }
4901
4902         return rc;
4903 }
4904
4905 static int
4906 bnx2_init_chip(struct bnx2 *bp)
4907 {
4908         u32 val, mtu;
4909         int rc, i;
4910
4911         /* Make sure the interrupt is not active. */
4912         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4913
4914         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4915               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4916 #ifdef __BIG_ENDIAN
4917               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4918 #endif
4919               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4920               DMA_READ_CHANS << 12 |
4921               DMA_WRITE_CHANS << 16;
4922
4923         val |= (0x2 << 20) | (1 << 11);
4924
4925         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4926                 val |= (1 << 23);
4927
4928         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4929             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4930             !(bp->flags & BNX2_FLAG_PCIX))
4931                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4932
4933         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4934
4935         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4936                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4937                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4938                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4939         }
4940
4941         if (bp->flags & BNX2_FLAG_PCIX) {
4942                 u16 val16;
4943
4944                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4945                                      &val16);
4946                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4947                                       val16 & ~PCI_X_CMD_ERO);
4948         }
4949
4950         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4951                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4952                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4953                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4954
4955         /* Initialize context mapping and zero out the quick contexts.  The
4956          * context block must have already been enabled. */
4957         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4958                 rc = bnx2_init_5709_context(bp);
4959                 if (rc)
4960                         return rc;
4961         } else
4962                 bnx2_init_context(bp);
4963
4964         if ((rc = bnx2_init_cpus(bp)) != 0)
4965                 return rc;
4966
4967         bnx2_init_nvram(bp);
4968
4969         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4970
4971         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4972         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4973         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4974         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4975                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4976                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4977                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4978         }
4979
4980         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4981
4982         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4983         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4984         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4985
4986         val = (BNX2_PAGE_BITS - 8) << 24;
4987         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4988
4989         /* Configure page size. */
4990         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4991         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4992         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4993         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4994
4995         val = bp->mac_addr[0] +
4996               (bp->mac_addr[1] << 8) +
4997               (bp->mac_addr[2] << 16) +
4998               bp->mac_addr[3] +
4999               (bp->mac_addr[4] << 8) +
5000               (bp->mac_addr[5] << 16);
5001         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
5002
5003         /* Program the MTU.  Also include 4 bytes for CRC32. */
5004         mtu = bp->dev->mtu;
5005         val = mtu + ETH_HLEN + ETH_FCS_LEN;
5006         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
5007                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5008         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5009
5010         if (mtu < 1500)
5011                 mtu = 1500;
5012
5013         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5014         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5015         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5016
5017         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5018         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5019                 bp->bnx2_napi[i].last_status_idx = 0;
5020
5021         bp->idle_chk_status_idx = 0xffff;
5022
5023         /* Set up how to generate a link change interrupt. */
5024         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5025
5026         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5027                 (u64) bp->status_blk_mapping & 0xffffffff);
5028         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5029
5030         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5031                 (u64) bp->stats_blk_mapping & 0xffffffff);
5032         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5033                 (u64) bp->stats_blk_mapping >> 32);
5034
5035         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5036                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5037
5038         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5039                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5040
5041         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5042                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5043
5044         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5045
5046         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5047
5048         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5049                 (bp->com_ticks_int << 16) | bp->com_ticks);
5050
5051         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5052                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5053
5054         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5055                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5056         else
5057                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5058         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5059
5060         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5061                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5062         else {
5063                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5064                       BNX2_HC_CONFIG_COLLECT_STATS;
5065         }
5066
5067         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5068                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5069                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5070
5071                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5072         }
5073
5074         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5075                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5076
5077         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5078
5079         if (bp->rx_ticks < 25)
5080                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5081         else
5082                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5083
5084         for (i = 1; i < bp->irq_nvecs; i++) {
5085                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5086                            BNX2_HC_SB_CONFIG_1;
5087
5088                 BNX2_WR(bp, base,
5089                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5090                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5091                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5092
5093                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5094                         (bp->tx_quick_cons_trip_int << 16) |
5095                          bp->tx_quick_cons_trip);
5096
5097                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5098                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5099
5100                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5101                         (bp->rx_quick_cons_trip_int << 16) |
5102                         bp->rx_quick_cons_trip);
5103
5104                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5105                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5106         }
5107
5108         /* Clear internal stats counters. */
5109         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5110
5111         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5112
5113         /* Initialize the receive filter. */
5114         bnx2_set_rx_mode(bp->dev);
5115
5116         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5117                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5118                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5119                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5120         }
5121         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5122                           1, 0);
5123
5124         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5125         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5126
5127         udelay(20);
5128
5129         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5130
5131         return rc;
5132 }
5133
5134 static void
5135 bnx2_clear_ring_states(struct bnx2 *bp)
5136 {
5137         struct bnx2_napi *bnapi;
5138         struct bnx2_tx_ring_info *txr;
5139         struct bnx2_rx_ring_info *rxr;
5140         int i;
5141
5142         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5143                 bnapi = &bp->bnx2_napi[i];
5144                 txr = &bnapi->tx_ring;
5145                 rxr = &bnapi->rx_ring;
5146
5147                 txr->tx_cons = 0;
5148                 txr->hw_tx_cons = 0;
5149                 rxr->rx_prod_bseq = 0;
5150                 rxr->rx_prod = 0;
5151                 rxr->rx_cons = 0;
5152                 rxr->rx_pg_prod = 0;
5153                 rxr->rx_pg_cons = 0;
5154         }
5155 }
5156
5157 static void
5158 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5159 {
5160         u32 val, offset0, offset1, offset2, offset3;
5161         u32 cid_addr = GET_CID_ADDR(cid);
5162
5163         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5164                 offset0 = BNX2_L2CTX_TYPE_XI;
5165                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5166                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5167                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5168         } else {
5169                 offset0 = BNX2_L2CTX_TYPE;
5170                 offset1 = BNX2_L2CTX_CMD_TYPE;
5171                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5172                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5173         }
5174         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5175         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5176
5177         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5178         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5179
5180         val = (u64) txr->tx_desc_mapping >> 32;
5181         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5182
5183         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5184         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5185 }
5186
5187 static void
5188 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5189 {
5190         struct bnx2_tx_bd *txbd;
5191         u32 cid = TX_CID;
5192         struct bnx2_napi *bnapi;
5193         struct bnx2_tx_ring_info *txr;
5194
5195         bnapi = &bp->bnx2_napi[ring_num];
5196         txr = &bnapi->tx_ring;
5197
5198         if (ring_num == 0)
5199                 cid = TX_CID;
5200         else
5201                 cid = TX_TSS_CID + ring_num - 1;
5202
5203         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5204
5205         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5206
5207         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5208         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5209
5210         txr->tx_prod = 0;
5211         txr->tx_prod_bseq = 0;
5212
5213         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5214         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5215
5216         bnx2_init_tx_context(bp, cid, txr);
5217 }
5218
5219 static void
5220 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5221                      u32 buf_size, int num_rings)
5222 {
5223         int i;
5224         struct bnx2_rx_bd *rxbd;
5225
5226         for (i = 0; i < num_rings; i++) {
5227                 int j;
5228
5229                 rxbd = &rx_ring[i][0];
5230                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5231                         rxbd->rx_bd_len = buf_size;
5232                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5233                 }
5234                 if (i == (num_rings - 1))
5235                         j = 0;
5236                 else
5237                         j = i + 1;
5238                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5239                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5240         }
5241 }
5242
5243 static void
5244 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5245 {
5246         int i;
5247         u16 prod, ring_prod;
5248         u32 cid, rx_cid_addr, val;
5249         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5250         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5251
5252         if (ring_num == 0)
5253                 cid = RX_CID;
5254         else
5255                 cid = RX_RSS_CID + ring_num - 1;
5256
5257         rx_cid_addr = GET_CID_ADDR(cid);
5258
5259         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5260                              bp->rx_buf_use_size, bp->rx_max_ring);
5261
5262         bnx2_init_rx_context(bp, cid);
5263
5264         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5265                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5266                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5267         }
5268
5269         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5270         if (bp->rx_pg_ring_size) {
5271                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5272                                      rxr->rx_pg_desc_mapping,
5273                                      PAGE_SIZE, bp->rx_max_pg_ring);
5274                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5275                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5276                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5277                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5278
5279                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5280                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5281
5282                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5283                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5284
5285                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5286                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5287         }
5288
5289         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5290         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5291
5292         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5293         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5294
5295         ring_prod = prod = rxr->rx_pg_prod;
5296         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5297                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5298                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5299                                     ring_num, i, bp->rx_pg_ring_size);
5300                         break;
5301                 }
5302                 prod = BNX2_NEXT_RX_BD(prod);
5303                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5304         }
5305         rxr->rx_pg_prod = prod;
5306
5307         ring_prod = prod = rxr->rx_prod;
5308         for (i = 0; i < bp->rx_ring_size; i++) {
5309                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5310                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5311                                     ring_num, i, bp->rx_ring_size);
5312                         break;
5313                 }
5314                 prod = BNX2_NEXT_RX_BD(prod);
5315                 ring_prod = BNX2_RX_RING_IDX(prod);
5316         }
5317         rxr->rx_prod = prod;
5318
5319         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5320         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5321         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5322
5323         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5324         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5325
5326         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5327 }
5328
5329 static void
5330 bnx2_init_all_rings(struct bnx2 *bp)
5331 {
5332         int i;
5333         u32 val;
5334
5335         bnx2_clear_ring_states(bp);
5336
5337         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5338         for (i = 0; i < bp->num_tx_rings; i++)
5339                 bnx2_init_tx_ring(bp, i);
5340
5341         if (bp->num_tx_rings > 1)
5342                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5343                         (TX_TSS_CID << 7));
5344
5345         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5346         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5347
5348         for (i = 0; i < bp->num_rx_rings; i++)
5349                 bnx2_init_rx_ring(bp, i);
5350
5351         if (bp->num_rx_rings > 1) {
5352                 u32 tbl_32 = 0;
5353
5354                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5355                         int shift = (i % 8) << 2;
5356
5357                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5358                         if ((i % 8) == 7) {
5359                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5360                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5361                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5362                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5363                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5364                                 tbl_32 = 0;
5365                         }
5366                 }
5367
5368                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5369                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5370
5371                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5372
5373         }
5374 }
5375
5376 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5377 {
5378         u32 max, num_rings = 1;
5379
5380         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5381                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5382                 num_rings++;
5383         }
5384         /* round to next power of 2 */
5385         max = max_size;
5386         while ((max & num_rings) == 0)
5387                 max >>= 1;
5388
5389         if (num_rings != max)
5390                 max <<= 1;
5391
5392         return max;
5393 }
5394
5395 static void
5396 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5397 {
5398         u32 rx_size, rx_space, jumbo_size;
5399
5400         /* 8 for CRC and VLAN */
5401         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5402
5403         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5404                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5405
5406         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5407         bp->rx_pg_ring_size = 0;
5408         bp->rx_max_pg_ring = 0;
5409         bp->rx_max_pg_ring_idx = 0;
5410         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5411                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5412
5413                 jumbo_size = size * pages;
5414                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5415                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5416
5417                 bp->rx_pg_ring_size = jumbo_size;
5418                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5419                                                         BNX2_MAX_RX_PG_RINGS);
5420                 bp->rx_max_pg_ring_idx =
5421                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5422                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5423                 bp->rx_copy_thresh = 0;
5424         }
5425
5426         bp->rx_buf_use_size = rx_size;
5427         /* hw alignment + build_skb() overhead*/
5428         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5429                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5430         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5431         bp->rx_ring_size = size;
5432         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5433         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5434 }
5435
5436 static void
5437 bnx2_free_tx_skbs(struct bnx2 *bp)
5438 {
5439         int i;
5440
5441         for (i = 0; i < bp->num_tx_rings; i++) {
5442                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5443                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5444                 int j;
5445
5446                 if (txr->tx_buf_ring == NULL)
5447                         continue;
5448
5449                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5450                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5451                         struct sk_buff *skb = tx_buf->skb;
5452                         int k, last;
5453
5454                         if (skb == NULL) {
5455                                 j = BNX2_NEXT_TX_BD(j);
5456                                 continue;
5457                         }
5458
5459                         dma_unmap_single(&bp->pdev->dev,
5460                                          dma_unmap_addr(tx_buf, mapping),
5461                                          skb_headlen(skb),
5462                                          PCI_DMA_TODEVICE);
5463
5464                         tx_buf->skb = NULL;
5465
5466                         last = tx_buf->nr_frags;
5467                         j = BNX2_NEXT_TX_BD(j);
5468                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5469                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5470                                 dma_unmap_page(&bp->pdev->dev,
5471                                         dma_unmap_addr(tx_buf, mapping),
5472                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5473                                         PCI_DMA_TODEVICE);
5474                         }
5475                         dev_kfree_skb(skb);
5476                 }
5477                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5478         }
5479 }
5480
5481 static void
5482 bnx2_free_rx_skbs(struct bnx2 *bp)
5483 {
5484         int i;
5485
5486         for (i = 0; i < bp->num_rx_rings; i++) {
5487                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5488                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5489                 int j;
5490
5491                 if (rxr->rx_buf_ring == NULL)
5492                         return;
5493
5494                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5495                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5496                         u8 *data = rx_buf->data;
5497
5498                         if (data == NULL)
5499                                 continue;
5500
5501                         dma_unmap_single(&bp->pdev->dev,
5502                                          dma_unmap_addr(rx_buf, mapping),
5503                                          bp->rx_buf_use_size,
5504                                          PCI_DMA_FROMDEVICE);
5505
5506                         rx_buf->data = NULL;
5507
5508                         kfree(data);
5509                 }
5510                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5511                         bnx2_free_rx_page(bp, rxr, j);
5512         }
5513 }
5514
5515 static void
5516 bnx2_free_skbs(struct bnx2 *bp)
5517 {
5518         bnx2_free_tx_skbs(bp);
5519         bnx2_free_rx_skbs(bp);
5520 }
5521
5522 static int
5523 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5524 {
5525         int rc;
5526
5527         rc = bnx2_reset_chip(bp, reset_code);
5528         bnx2_free_skbs(bp);
5529         if (rc)
5530                 return rc;
5531
5532         if ((rc = bnx2_init_chip(bp)) != 0)
5533                 return rc;
5534
5535         bnx2_init_all_rings(bp);
5536         return 0;
5537 }
5538
5539 static int
5540 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5541 {
5542         int rc;
5543
5544         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5545                 return rc;
5546
5547         spin_lock_bh(&bp->phy_lock);
5548         bnx2_init_phy(bp, reset_phy);
5549         bnx2_set_link(bp);
5550         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5551                 bnx2_remote_phy_event(bp);
5552         spin_unlock_bh(&bp->phy_lock);
5553         return 0;
5554 }
5555
5556 static int
5557 bnx2_shutdown_chip(struct bnx2 *bp)
5558 {
5559         u32 reset_code;
5560
5561         if (bp->flags & BNX2_FLAG_NO_WOL)
5562                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5563         else if (bp->wol)
5564                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5565         else
5566                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5567
5568         return bnx2_reset_chip(bp, reset_code);
5569 }
5570
5571 static int
5572 bnx2_test_registers(struct bnx2 *bp)
5573 {
5574         int ret;
5575         int i, is_5709;
5576         static const struct {
5577                 u16   offset;
5578                 u16   flags;
5579 #define BNX2_FL_NOT_5709        1
5580                 u32   rw_mask;
5581                 u32   ro_mask;
5582         } reg_tbl[] = {
5583                 { 0x006c, 0, 0x00000000, 0x0000003f },
5584                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5585                 { 0x0094, 0, 0x00000000, 0x00000000 },
5586
5587                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5588                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5590                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5591                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5592                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5593                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5594                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5595                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5596
5597                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5598                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5599                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5600                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5601                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5602                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5603
5604                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5605                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5606                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5607
5608                 { 0x1000, 0, 0x00000000, 0x00000001 },
5609                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5610
5611                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5612                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5613                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5614                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5615                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5616                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5617                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5618                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5619                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5620                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5621
5622                 { 0x1800, 0, 0x00000000, 0x00000001 },
5623                 { 0x1804, 0, 0x00000000, 0x00000003 },
5624
5625                 { 0x2800, 0, 0x00000000, 0x00000001 },
5626                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5627                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5628                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5629                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5630                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5631                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5632                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5633                 { 0x2840, 0, 0x00000000, 0xffffffff },
5634                 { 0x2844, 0, 0x00000000, 0xffffffff },
5635                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5636                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5637
5638                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5639                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5640
5641                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5642                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5643                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5644                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5645                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5646                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5647                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5648                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5649                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5650
5651                 { 0x5004, 0, 0x00000000, 0x0000007f },
5652                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5653
5654                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5655                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5656                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5657                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5658                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5659                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5660                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5661                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5662                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5663
5664                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5665                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5666                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5667                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5668                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5669                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5670                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5671                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5672                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5673                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5674                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5675                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5676                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5677                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5678                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5679                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5680                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5681                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5682                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5683                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5684                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5685                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5686                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5687
5688                 { 0xffff, 0, 0x00000000, 0x00000000 },
5689         };
5690
5691         ret = 0;
5692         is_5709 = 0;
5693         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5694                 is_5709 = 1;
5695
5696         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5697                 u32 offset, rw_mask, ro_mask, save_val, val;
5698                 u16 flags = reg_tbl[i].flags;
5699
5700                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5701                         continue;
5702
5703                 offset = (u32) reg_tbl[i].offset;
5704                 rw_mask = reg_tbl[i].rw_mask;
5705                 ro_mask = reg_tbl[i].ro_mask;
5706
5707                 save_val = readl(bp->regview + offset);
5708
5709                 writel(0, bp->regview + offset);
5710
5711                 val = readl(bp->regview + offset);
5712                 if ((val & rw_mask) != 0) {
5713                         goto reg_test_err;
5714                 }
5715
5716                 if ((val & ro_mask) != (save_val & ro_mask)) {
5717                         goto reg_test_err;
5718                 }
5719
5720                 writel(0xffffffff, bp->regview + offset);
5721
5722                 val = readl(bp->regview + offset);
5723                 if ((val & rw_mask) != rw_mask) {
5724                         goto reg_test_err;
5725                 }
5726
5727                 if ((val & ro_mask) != (save_val & ro_mask)) {
5728                         goto reg_test_err;
5729                 }
5730
5731                 writel(save_val, bp->regview + offset);
5732                 continue;
5733
5734 reg_test_err:
5735                 writel(save_val, bp->regview + offset);
5736                 ret = -ENODEV;
5737                 break;
5738         }
5739         return ret;
5740 }
5741
5742 static int
5743 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5744 {
5745         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5746                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5747         int i;
5748
5749         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5750                 u32 offset;
5751
5752                 for (offset = 0; offset < size; offset += 4) {
5753
5754                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5755
5756                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5757                                 test_pattern[i]) {
5758                                 return -ENODEV;
5759                         }
5760                 }
5761         }
5762         return 0;
5763 }
5764
5765 static int
5766 bnx2_test_memory(struct bnx2 *bp)
5767 {
5768         int ret = 0;
5769         int i;
5770         static struct mem_entry {
5771                 u32   offset;
5772                 u32   len;
5773         } mem_tbl_5706[] = {
5774                 { 0x60000,  0x4000 },
5775                 { 0xa0000,  0x3000 },
5776                 { 0xe0000,  0x4000 },
5777                 { 0x120000, 0x4000 },
5778                 { 0x1a0000, 0x4000 },
5779                 { 0x160000, 0x4000 },
5780                 { 0xffffffff, 0    },
5781         },
5782         mem_tbl_5709[] = {
5783                 { 0x60000,  0x4000 },
5784                 { 0xa0000,  0x3000 },
5785                 { 0xe0000,  0x4000 },
5786                 { 0x120000, 0x4000 },
5787                 { 0x1a0000, 0x4000 },
5788                 { 0xffffffff, 0    },
5789         };
5790         struct mem_entry *mem_tbl;
5791
5792         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5793                 mem_tbl = mem_tbl_5709;
5794         else
5795                 mem_tbl = mem_tbl_5706;
5796
5797         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5798                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5799                         mem_tbl[i].len)) != 0) {
5800                         return ret;
5801                 }
5802         }
5803
5804         return ret;
5805 }
5806
5807 #define BNX2_MAC_LOOPBACK       0
5808 #define BNX2_PHY_LOOPBACK       1
5809
5810 static int
5811 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5812 {
5813         unsigned int pkt_size, num_pkts, i;
5814         struct sk_buff *skb;
5815         u8 *data;
5816         unsigned char *packet;
5817         u16 rx_start_idx, rx_idx;
5818         dma_addr_t map;
5819         struct bnx2_tx_bd *txbd;
5820         struct bnx2_sw_bd *rx_buf;
5821         struct l2_fhdr *rx_hdr;
5822         int ret = -ENODEV;
5823         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5824         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5825         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5826
5827         tx_napi = bnapi;
5828
5829         txr = &tx_napi->tx_ring;
5830         rxr = &bnapi->rx_ring;
5831         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5832                 bp->loopback = MAC_LOOPBACK;
5833                 bnx2_set_mac_loopback(bp);
5834         }
5835         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5836                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5837                         return 0;
5838
5839                 bp->loopback = PHY_LOOPBACK;
5840                 bnx2_set_phy_loopback(bp);
5841         }
5842         else
5843                 return -EINVAL;
5844
5845         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5846         skb = netdev_alloc_skb(bp->dev, pkt_size);
5847         if (!skb)
5848                 return -ENOMEM;
5849         packet = skb_put(skb, pkt_size);
5850         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5851         memset(packet + ETH_ALEN, 0x0, 8);
5852         for (i = 14; i < pkt_size; i++)
5853                 packet[i] = (unsigned char) (i & 0xff);
5854
5855         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5856                              PCI_DMA_TODEVICE);
5857         if (dma_mapping_error(&bp->pdev->dev, map)) {
5858                 dev_kfree_skb(skb);
5859                 return -EIO;
5860         }
5861
5862         BNX2_WR(bp, BNX2_HC_COMMAND,
5863                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5864
5865         BNX2_RD(bp, BNX2_HC_COMMAND);
5866
5867         udelay(5);
5868         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5869
5870         num_pkts = 0;
5871
5872         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5873
5874         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5875         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5876         txbd->tx_bd_mss_nbytes = pkt_size;
5877         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5878
5879         num_pkts++;
5880         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5881         txr->tx_prod_bseq += pkt_size;
5882
5883         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5884         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5885
5886         udelay(100);
5887
5888         BNX2_WR(bp, BNX2_HC_COMMAND,
5889                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5890
5891         BNX2_RD(bp, BNX2_HC_COMMAND);
5892
5893         udelay(5);
5894
5895         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5896         dev_kfree_skb(skb);
5897
5898         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5899                 goto loopback_test_done;
5900
5901         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5902         if (rx_idx != rx_start_idx + num_pkts) {
5903                 goto loopback_test_done;
5904         }
5905
5906         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5907         data = rx_buf->data;
5908
5909         rx_hdr = get_l2_fhdr(data);
5910         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5911
5912         dma_sync_single_for_cpu(&bp->pdev->dev,
5913                 dma_unmap_addr(rx_buf, mapping),
5914                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5915
5916         if (rx_hdr->l2_fhdr_status &
5917                 (L2_FHDR_ERRORS_BAD_CRC |
5918                 L2_FHDR_ERRORS_PHY_DECODE |
5919                 L2_FHDR_ERRORS_ALIGNMENT |
5920                 L2_FHDR_ERRORS_TOO_SHORT |
5921                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5922
5923                 goto loopback_test_done;
5924         }
5925
5926         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5927                 goto loopback_test_done;
5928         }
5929
5930         for (i = 14; i < pkt_size; i++) {
5931                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5932                         goto loopback_test_done;
5933                 }
5934         }
5935
5936         ret = 0;
5937
5938 loopback_test_done:
5939         bp->loopback = 0;
5940         return ret;
5941 }
5942
5943 #define BNX2_MAC_LOOPBACK_FAILED        1
5944 #define BNX2_PHY_LOOPBACK_FAILED        2
5945 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5946                                          BNX2_PHY_LOOPBACK_FAILED)
5947
5948 static int
5949 bnx2_test_loopback(struct bnx2 *bp)
5950 {
5951         int rc = 0;
5952
5953         if (!netif_running(bp->dev))
5954                 return BNX2_LOOPBACK_FAILED;
5955
5956         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5957         spin_lock_bh(&bp->phy_lock);
5958         bnx2_init_phy(bp, 1);
5959         spin_unlock_bh(&bp->phy_lock);
5960         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5961                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5962         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5963                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5964         return rc;
5965 }
5966
5967 #define NVRAM_SIZE 0x200
5968 #define CRC32_RESIDUAL 0xdebb20e3
5969
5970 static int
5971 bnx2_test_nvram(struct bnx2 *bp)
5972 {
5973         __be32 buf[NVRAM_SIZE / 4];
5974         u8 *data = (u8 *) buf;
5975         int rc = 0;
5976         u32 magic, csum;
5977
5978         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5979                 goto test_nvram_done;
5980
5981         magic = be32_to_cpu(buf[0]);
5982         if (magic != 0x669955aa) {
5983                 rc = -ENODEV;
5984                 goto test_nvram_done;
5985         }
5986
5987         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5988                 goto test_nvram_done;
5989
5990         csum = ether_crc_le(0x100, data);
5991         if (csum != CRC32_RESIDUAL) {
5992                 rc = -ENODEV;
5993                 goto test_nvram_done;
5994         }
5995
5996         csum = ether_crc_le(0x100, data + 0x100);
5997         if (csum != CRC32_RESIDUAL) {
5998                 rc = -ENODEV;
5999         }
6000
6001 test_nvram_done:
6002         return rc;
6003 }
6004
6005 static int
6006 bnx2_test_link(struct bnx2 *bp)
6007 {
6008         u32 bmsr;
6009
6010         if (!netif_running(bp->dev))
6011                 return -ENODEV;
6012
6013         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6014                 if (bp->link_up)
6015                         return 0;
6016                 return -ENODEV;
6017         }
6018         spin_lock_bh(&bp->phy_lock);
6019         bnx2_enable_bmsr1(bp);
6020         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6021         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6022         bnx2_disable_bmsr1(bp);
6023         spin_unlock_bh(&bp->phy_lock);
6024
6025         if (bmsr & BMSR_LSTATUS) {
6026                 return 0;
6027         }
6028         return -ENODEV;
6029 }
6030
6031 static int
6032 bnx2_test_intr(struct bnx2 *bp)
6033 {
6034         int i;
6035         u16 status_idx;
6036
6037         if (!netif_running(bp->dev))
6038                 return -ENODEV;
6039
6040         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6041
6042         /* This register is not touched during run-time. */
6043         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6044         BNX2_RD(bp, BNX2_HC_COMMAND);
6045
6046         for (i = 0; i < 10; i++) {
6047                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6048                         status_idx) {
6049
6050                         break;
6051                 }
6052
6053                 msleep_interruptible(10);
6054         }
6055         if (i < 10)
6056                 return 0;
6057
6058         return -ENODEV;
6059 }
6060
6061 /* Determining link for parallel detection. */
6062 static int
6063 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6064 {
6065         u32 mode_ctl, an_dbg, exp;
6066
6067         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6068                 return 0;
6069
6070         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6071         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6072
6073         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6074                 return 0;
6075
6076         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6077         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6078         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6079
6080         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6081                 return 0;
6082
6083         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6084         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6085         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6086
6087         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6088                 return 0;
6089
6090         return 1;
6091 }
6092
6093 static void
6094 bnx2_5706_serdes_timer(struct bnx2 *bp)
6095 {
6096         int check_link = 1;
6097
6098         spin_lock(&bp->phy_lock);
6099         if (bp->serdes_an_pending) {
6100                 bp->serdes_an_pending--;
6101                 check_link = 0;
6102         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6103                 u32 bmcr;
6104
6105                 bp->current_interval = BNX2_TIMER_INTERVAL;
6106
6107                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6108
6109                 if (bmcr & BMCR_ANENABLE) {
6110                         if (bnx2_5706_serdes_has_link(bp)) {
6111                                 bmcr &= ~BMCR_ANENABLE;
6112                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6113                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6114                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6115                         }
6116                 }
6117         }
6118         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6119                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6120                 u32 phy2;
6121
6122                 bnx2_write_phy(bp, 0x17, 0x0f01);
6123                 bnx2_read_phy(bp, 0x15, &phy2);
6124                 if (phy2 & 0x20) {
6125                         u32 bmcr;
6126
6127                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6128                         bmcr |= BMCR_ANENABLE;
6129                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6130
6131                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6132                 }
6133         } else
6134                 bp->current_interval = BNX2_TIMER_INTERVAL;
6135
6136         if (check_link) {
6137                 u32 val;
6138
6139                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6140                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6141                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6142
6143                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6144                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6145                                 bnx2_5706s_force_link_dn(bp, 1);
6146                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6147                         } else
6148                                 bnx2_set_link(bp);
6149                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6150                         bnx2_set_link(bp);
6151         }
6152         spin_unlock(&bp->phy_lock);
6153 }
6154
6155 static void
6156 bnx2_5708_serdes_timer(struct bnx2 *bp)
6157 {
6158         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6159                 return;
6160
6161         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6162                 bp->serdes_an_pending = 0;
6163                 return;
6164         }
6165
6166         spin_lock(&bp->phy_lock);
6167         if (bp->serdes_an_pending)
6168                 bp->serdes_an_pending--;
6169         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6170                 u32 bmcr;
6171
6172                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6173                 if (bmcr & BMCR_ANENABLE) {
6174                         bnx2_enable_forced_2g5(bp);
6175                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6176                 } else {
6177                         bnx2_disable_forced_2g5(bp);
6178                         bp->serdes_an_pending = 2;
6179                         bp->current_interval = BNX2_TIMER_INTERVAL;
6180                 }
6181
6182         } else
6183                 bp->current_interval = BNX2_TIMER_INTERVAL;
6184
6185         spin_unlock(&bp->phy_lock);
6186 }
6187
6188 static void
6189 bnx2_timer(unsigned long data)
6190 {
6191         struct bnx2 *bp = (struct bnx2 *) data;
6192
6193         if (!netif_running(bp->dev))
6194                 return;
6195
6196         if (atomic_read(&bp->intr_sem) != 0)
6197                 goto bnx2_restart_timer;
6198
6199         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6200              BNX2_FLAG_USING_MSI)
6201                 bnx2_chk_missed_msi(bp);
6202
6203         bnx2_send_heart_beat(bp);
6204
6205         bp->stats_blk->stat_FwRxDrop =
6206                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6207
6208         /* workaround occasional corrupted counters */
6209         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6210                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6211                         BNX2_HC_COMMAND_STATS_NOW);
6212
6213         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6214                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6215                         bnx2_5706_serdes_timer(bp);
6216                 else
6217                         bnx2_5708_serdes_timer(bp);
6218         }
6219
6220 bnx2_restart_timer:
6221         mod_timer(&bp->timer, jiffies + bp->current_interval);
6222 }
6223
6224 static int
6225 bnx2_request_irq(struct bnx2 *bp)
6226 {
6227         unsigned long flags;
6228         struct bnx2_irq *irq;
6229         int rc = 0, i;
6230
6231         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6232                 flags = 0;
6233         else
6234                 flags = IRQF_SHARED;
6235
6236         for (i = 0; i < bp->irq_nvecs; i++) {
6237                 irq = &bp->irq_tbl[i];
6238                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6239                                  &bp->bnx2_napi[i]);
6240                 if (rc)
6241                         break;
6242                 irq->requested = 1;
6243         }
6244         return rc;
6245 }
6246
6247 static void
6248 __bnx2_free_irq(struct bnx2 *bp)
6249 {
6250         struct bnx2_irq *irq;
6251         int i;
6252
6253         for (i = 0; i < bp->irq_nvecs; i++) {
6254                 irq = &bp->irq_tbl[i];
6255                 if (irq->requested)
6256                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6257                 irq->requested = 0;
6258         }
6259 }
6260
6261 static void
6262 bnx2_free_irq(struct bnx2 *bp)
6263 {
6264
6265         __bnx2_free_irq(bp);
6266         if (bp->flags & BNX2_FLAG_USING_MSI)
6267                 pci_disable_msi(bp->pdev);
6268         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6269                 pci_disable_msix(bp->pdev);
6270
6271         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6272 }
6273
6274 static void
6275 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6276 {
6277         int i, total_vecs;
6278         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6279         struct net_device *dev = bp->dev;
6280         const int len = sizeof(bp->irq_tbl[0].name);
6281
6282         bnx2_setup_msix_tbl(bp);
6283         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6284         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6285         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6286
6287         /*  Need to flush the previous three writes to ensure MSI-X
6288          *  is setup properly */
6289         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6290
6291         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6292                 msix_ent[i].entry = i;
6293                 msix_ent[i].vector = 0;
6294         }
6295
6296         total_vecs = msix_vecs;
6297 #ifdef BCM_CNIC
6298         total_vecs++;
6299 #endif
6300         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6301                                            BNX2_MIN_MSIX_VEC, total_vecs);
6302         if (total_vecs < 0)
6303                 return;
6304
6305         msix_vecs = total_vecs;
6306 #ifdef BCM_CNIC
6307         msix_vecs--;
6308 #endif
6309         bp->irq_nvecs = msix_vecs;
6310         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6311         for (i = 0; i < total_vecs; i++) {
6312                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6313                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6314                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6315         }
6316 }
6317
6318 static int
6319 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6320 {
6321         int cpus = netif_get_num_default_rss_queues();
6322         int msix_vecs;
6323
6324         if (!bp->num_req_rx_rings)
6325                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6326         else if (!bp->num_req_tx_rings)
6327                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6328         else
6329                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6330
6331         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6332
6333         bp->irq_tbl[0].handler = bnx2_interrupt;
6334         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6335         bp->irq_nvecs = 1;
6336         bp->irq_tbl[0].vector = bp->pdev->irq;
6337
6338         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6339                 bnx2_enable_msix(bp, msix_vecs);
6340
6341         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6342             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6343                 if (pci_enable_msi(bp->pdev) == 0) {
6344                         bp->flags |= BNX2_FLAG_USING_MSI;
6345                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6346                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6347                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6348                         } else
6349                                 bp->irq_tbl[0].handler = bnx2_msi;
6350
6351                         bp->irq_tbl[0].vector = bp->pdev->irq;
6352                 }
6353         }
6354
6355         if (!bp->num_req_tx_rings)
6356                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6357         else
6358                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6359
6360         if (!bp->num_req_rx_rings)
6361                 bp->num_rx_rings = bp->irq_nvecs;
6362         else
6363                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6364
6365         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6366
6367         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6368 }
6369
6370 /* Called with rtnl_lock */
6371 static int
6372 bnx2_open(struct net_device *dev)
6373 {
6374         struct bnx2 *bp = netdev_priv(dev);
6375         int rc;
6376
6377         rc = bnx2_request_firmware(bp);
6378         if (rc < 0)
6379                 goto out;
6380
6381         netif_carrier_off(dev);
6382
6383         bnx2_disable_int(bp);
6384
6385         rc = bnx2_setup_int_mode(bp, disable_msi);
6386         if (rc)
6387                 goto open_err;
6388         bnx2_init_napi(bp);
6389         bnx2_napi_enable(bp);
6390         rc = bnx2_alloc_mem(bp);
6391         if (rc)
6392                 goto open_err;
6393
6394         rc = bnx2_request_irq(bp);
6395         if (rc)
6396                 goto open_err;
6397
6398         rc = bnx2_init_nic(bp, 1);
6399         if (rc)
6400                 goto open_err;
6401
6402         mod_timer(&bp->timer, jiffies + bp->current_interval);
6403
6404         atomic_set(&bp->intr_sem, 0);
6405
6406         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6407
6408         bnx2_enable_int(bp);
6409
6410         if (bp->flags & BNX2_FLAG_USING_MSI) {
6411                 /* Test MSI to make sure it is working
6412                  * If MSI test fails, go back to INTx mode
6413                  */
6414                 if (bnx2_test_intr(bp) != 0) {
6415                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6416
6417                         bnx2_disable_int(bp);
6418                         bnx2_free_irq(bp);
6419
6420                         bnx2_setup_int_mode(bp, 1);
6421
6422                         rc = bnx2_init_nic(bp, 0);
6423
6424                         if (!rc)
6425                                 rc = bnx2_request_irq(bp);
6426
6427                         if (rc) {
6428                                 del_timer_sync(&bp->timer);
6429                                 goto open_err;
6430                         }
6431                         bnx2_enable_int(bp);
6432                 }
6433         }
6434         if (bp->flags & BNX2_FLAG_USING_MSI)
6435                 netdev_info(dev, "using MSI\n");
6436         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6437                 netdev_info(dev, "using MSIX\n");
6438
6439         netif_tx_start_all_queues(dev);
6440 out:
6441         return rc;
6442
6443 open_err:
6444         bnx2_napi_disable(bp);
6445         bnx2_free_skbs(bp);
6446         bnx2_free_irq(bp);
6447         bnx2_free_mem(bp);
6448         bnx2_del_napi(bp);
6449         bnx2_release_firmware(bp);
6450         goto out;
6451 }
6452
6453 static void
6454 bnx2_reset_task(struct work_struct *work)
6455 {
6456         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6457         int rc;
6458         u16 pcicmd;
6459
6460         rtnl_lock();
6461         if (!netif_running(bp->dev)) {
6462                 rtnl_unlock();
6463                 return;
6464         }
6465
6466         bnx2_netif_stop(bp, true);
6467
6468         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6469         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6470                 /* in case PCI block has reset */
6471                 pci_restore_state(bp->pdev);
6472                 pci_save_state(bp->pdev);
6473         }
6474         rc = bnx2_init_nic(bp, 1);
6475         if (rc) {
6476                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6477                 bnx2_napi_enable(bp);
6478                 dev_close(bp->dev);
6479                 rtnl_unlock();
6480                 return;
6481         }
6482
6483         atomic_set(&bp->intr_sem, 1);
6484         bnx2_netif_start(bp, true);
6485         rtnl_unlock();
6486 }
6487
6488 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6489
6490 static void
6491 bnx2_dump_ftq(struct bnx2 *bp)
6492 {
6493         int i;
6494         u32 reg, bdidx, cid, valid;
6495         struct net_device *dev = bp->dev;
6496         static const struct ftq_reg {
6497                 char *name;
6498                 u32 off;
6499         } ftq_arr[] = {
6500                 BNX2_FTQ_ENTRY(RV2P_P),
6501                 BNX2_FTQ_ENTRY(RV2P_T),
6502                 BNX2_FTQ_ENTRY(RV2P_M),
6503                 BNX2_FTQ_ENTRY(TBDR_),
6504                 BNX2_FTQ_ENTRY(TDMA_),
6505                 BNX2_FTQ_ENTRY(TXP_),
6506                 BNX2_FTQ_ENTRY(TXP_),
6507                 BNX2_FTQ_ENTRY(TPAT_),
6508                 BNX2_FTQ_ENTRY(RXP_C),
6509                 BNX2_FTQ_ENTRY(RXP_),
6510                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6511                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6512                 BNX2_FTQ_ENTRY(COM_COMQ_),
6513                 BNX2_FTQ_ENTRY(CP_CPQ_),
6514         };
6515
6516         netdev_err(dev, "<--- start FTQ dump --->\n");
6517         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6518                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6519                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6520
6521         netdev_err(dev, "CPU states:\n");
6522         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6523                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6524                            reg, bnx2_reg_rd_ind(bp, reg),
6525                            bnx2_reg_rd_ind(bp, reg + 4),
6526                            bnx2_reg_rd_ind(bp, reg + 8),
6527                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6528                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6529                            bnx2_reg_rd_ind(bp, reg + 0x20));
6530
6531         netdev_err(dev, "<--- end FTQ dump --->\n");
6532         netdev_err(dev, "<--- start TBDC dump --->\n");
6533         netdev_err(dev, "TBDC free cnt: %ld\n",
6534                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6535         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6536         for (i = 0; i < 0x20; i++) {
6537                 int j = 0;
6538
6539                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6540                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6541                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6542                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6543                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6544                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6545                         j++;
6546
6547                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6548                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6549                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6550                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6551                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6552                            bdidx >> 24, (valid >> 8) & 0x0ff);
6553         }
6554         netdev_err(dev, "<--- end TBDC dump --->\n");
6555 }
6556
6557 static void
6558 bnx2_dump_state(struct bnx2 *bp)
6559 {
6560         struct net_device *dev = bp->dev;
6561         u32 val1, val2;
6562
6563         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6564         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6565                    atomic_read(&bp->intr_sem), val1);
6566         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6567         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6568         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6569         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6570                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6571                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6572         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6573                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6574         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6575                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6576         if (bp->flags & BNX2_FLAG_USING_MSIX)
6577                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6578                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6579 }
6580
6581 static void
6582 bnx2_tx_timeout(struct net_device *dev)
6583 {
6584         struct bnx2 *bp = netdev_priv(dev);
6585
6586         bnx2_dump_ftq(bp);
6587         bnx2_dump_state(bp);
6588         bnx2_dump_mcp_state(bp);
6589
6590         /* This allows the netif to be shutdown gracefully before resetting */
6591         schedule_work(&bp->reset_task);
6592 }
6593
6594 /* Called with netif_tx_lock.
6595  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6596  * netif_wake_queue().
6597  */
6598 static netdev_tx_t
6599 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6600 {
6601         struct bnx2 *bp = netdev_priv(dev);
6602         dma_addr_t mapping;
6603         struct bnx2_tx_bd *txbd;
6604         struct bnx2_sw_tx_bd *tx_buf;
6605         u32 len, vlan_tag_flags, last_frag, mss;
6606         u16 prod, ring_prod;
6607         int i;
6608         struct bnx2_napi *bnapi;
6609         struct bnx2_tx_ring_info *txr;
6610         struct netdev_queue *txq;
6611
6612         /*  Determine which tx ring we will be placed on */
6613         i = skb_get_queue_mapping(skb);
6614         bnapi = &bp->bnx2_napi[i];
6615         txr = &bnapi->tx_ring;
6616         txq = netdev_get_tx_queue(dev, i);
6617
6618         if (unlikely(bnx2_tx_avail(bp, txr) <
6619             (skb_shinfo(skb)->nr_frags + 1))) {
6620                 netif_tx_stop_queue(txq);
6621                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6622
6623                 return NETDEV_TX_BUSY;
6624         }
6625         len = skb_headlen(skb);
6626         prod = txr->tx_prod;
6627         ring_prod = BNX2_TX_RING_IDX(prod);
6628
6629         vlan_tag_flags = 0;
6630         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6631                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6632         }
6633
6634         if (skb_vlan_tag_present(skb)) {
6635                 vlan_tag_flags |=
6636                         (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6637         }
6638
6639         if ((mss = skb_shinfo(skb)->gso_size)) {
6640                 u32 tcp_opt_len;
6641                 struct iphdr *iph;
6642
6643                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6644
6645                 tcp_opt_len = tcp_optlen(skb);
6646
6647                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6648                         u32 tcp_off = skb_transport_offset(skb) -
6649                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6650
6651                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6652                                           TX_BD_FLAGS_SW_FLAGS;
6653                         if (likely(tcp_off == 0))
6654                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6655                         else {
6656                                 tcp_off >>= 3;
6657                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6658                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6659                                                   ((tcp_off & 0x10) <<
6660                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6661                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6662                         }
6663                 } else {
6664                         iph = ip_hdr(skb);
6665                         if (tcp_opt_len || (iph->ihl > 5)) {
6666                                 vlan_tag_flags |= ((iph->ihl - 5) +
6667                                                    (tcp_opt_len >> 2)) << 8;
6668                         }
6669                 }
6670         } else
6671                 mss = 0;
6672
6673         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6674         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6675                 dev_kfree_skb_any(skb);
6676                 return NETDEV_TX_OK;
6677         }
6678
6679         tx_buf = &txr->tx_buf_ring[ring_prod];
6680         tx_buf->skb = skb;
6681         dma_unmap_addr_set(tx_buf, mapping, mapping);
6682
6683         txbd = &txr->tx_desc_ring[ring_prod];
6684
6685         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6686         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6687         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6688         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6689
6690         last_frag = skb_shinfo(skb)->nr_frags;
6691         tx_buf->nr_frags = last_frag;
6692         tx_buf->is_gso = skb_is_gso(skb);
6693
6694         for (i = 0; i < last_frag; i++) {
6695                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6696
6697                 prod = BNX2_NEXT_TX_BD(prod);
6698                 ring_prod = BNX2_TX_RING_IDX(prod);
6699                 txbd = &txr->tx_desc_ring[ring_prod];
6700
6701                 len = skb_frag_size(frag);
6702                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6703                                            DMA_TO_DEVICE);
6704                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6705                         goto dma_error;
6706                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6707                                    mapping);
6708
6709                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6710                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6711                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6712                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6713
6714         }
6715         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6716
6717         /* Sync BD data before updating TX mailbox */
6718         wmb();
6719
6720         netdev_tx_sent_queue(txq, skb->len);
6721
6722         prod = BNX2_NEXT_TX_BD(prod);
6723         txr->tx_prod_bseq += skb->len;
6724
6725         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6726         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6727
6728         mmiowb();
6729
6730         txr->tx_prod = prod;
6731
6732         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6733                 netif_tx_stop_queue(txq);
6734
6735                 /* netif_tx_stop_queue() must be done before checking
6736                  * tx index in bnx2_tx_avail() below, because in
6737                  * bnx2_tx_int(), we update tx index before checking for
6738                  * netif_tx_queue_stopped().
6739                  */
6740                 smp_mb();
6741                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6742                         netif_tx_wake_queue(txq);
6743         }
6744
6745         return NETDEV_TX_OK;
6746 dma_error:
6747         /* save value of frag that failed */
6748         last_frag = i;
6749
6750         /* start back at beginning and unmap skb */
6751         prod = txr->tx_prod;
6752         ring_prod = BNX2_TX_RING_IDX(prod);
6753         tx_buf = &txr->tx_buf_ring[ring_prod];
6754         tx_buf->skb = NULL;
6755         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6756                          skb_headlen(skb), PCI_DMA_TODEVICE);
6757
6758         /* unmap remaining mapped pages */
6759         for (i = 0; i < last_frag; i++) {
6760                 prod = BNX2_NEXT_TX_BD(prod);
6761                 ring_prod = BNX2_TX_RING_IDX(prod);
6762                 tx_buf = &txr->tx_buf_ring[ring_prod];
6763                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6764                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6765                                PCI_DMA_TODEVICE);
6766         }
6767
6768         dev_kfree_skb_any(skb);
6769         return NETDEV_TX_OK;
6770 }
6771
6772 /* Called with rtnl_lock */
6773 static int
6774 bnx2_close(struct net_device *dev)
6775 {
6776         struct bnx2 *bp = netdev_priv(dev);
6777
6778         bnx2_disable_int_sync(bp);
6779         bnx2_napi_disable(bp);
6780         netif_tx_disable(dev);
6781         del_timer_sync(&bp->timer);
6782         bnx2_shutdown_chip(bp);
6783         bnx2_free_irq(bp);
6784         bnx2_free_skbs(bp);
6785         bnx2_free_mem(bp);
6786         bnx2_del_napi(bp);
6787         bp->link_up = 0;
6788         netif_carrier_off(bp->dev);
6789         return 0;
6790 }
6791
6792 static void
6793 bnx2_save_stats(struct bnx2 *bp)
6794 {
6795         u32 *hw_stats = (u32 *) bp->stats_blk;
6796         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6797         int i;
6798
6799         /* The 1st 10 counters are 64-bit counters */
6800         for (i = 0; i < 20; i += 2) {
6801                 u32 hi;
6802                 u64 lo;
6803
6804                 hi = temp_stats[i] + hw_stats[i];
6805                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6806                 if (lo > 0xffffffff)
6807                         hi++;
6808                 temp_stats[i] = hi;
6809                 temp_stats[i + 1] = lo & 0xffffffff;
6810         }
6811
6812         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6813                 temp_stats[i] += hw_stats[i];
6814 }
6815
6816 #define GET_64BIT_NET_STATS64(ctr)              \
6817         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6818
6819 #define GET_64BIT_NET_STATS(ctr)                                \
6820         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6821         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6822
6823 #define GET_32BIT_NET_STATS(ctr)                                \
6824         (unsigned long) (bp->stats_blk->ctr +                   \
6825                          bp->temp_stats_blk->ctr)
6826
6827 static struct rtnl_link_stats64 *
6828 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6829 {
6830         struct bnx2 *bp = netdev_priv(dev);
6831
6832         if (bp->stats_blk == NULL)
6833                 return net_stats;
6834
6835         net_stats->rx_packets =
6836                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6837                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6838                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6839
6840         net_stats->tx_packets =
6841                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6842                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6843                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6844
6845         net_stats->rx_bytes =
6846                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6847
6848         net_stats->tx_bytes =
6849                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6850
6851         net_stats->multicast =
6852                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6853
6854         net_stats->collisions =
6855                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6856
6857         net_stats->rx_length_errors =
6858                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6859                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6860
6861         net_stats->rx_over_errors =
6862                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6863                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6864
6865         net_stats->rx_frame_errors =
6866                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6867
6868         net_stats->rx_crc_errors =
6869                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6870
6871         net_stats->rx_errors = net_stats->rx_length_errors +
6872                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6873                 net_stats->rx_crc_errors;
6874
6875         net_stats->tx_aborted_errors =
6876                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6877                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6878
6879         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6880             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6881                 net_stats->tx_carrier_errors = 0;
6882         else {
6883                 net_stats->tx_carrier_errors =
6884                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6885         }
6886
6887         net_stats->tx_errors =
6888                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6889                 net_stats->tx_aborted_errors +
6890                 net_stats->tx_carrier_errors;
6891
6892         net_stats->rx_missed_errors =
6893                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6894                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6895                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6896
6897         return net_stats;
6898 }
6899
6900 /* All ethtool functions called with rtnl_lock */
6901
6902 static int
6903 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6904 {
6905         struct bnx2 *bp = netdev_priv(dev);
6906         int support_serdes = 0, support_copper = 0;
6907
6908         cmd->supported = SUPPORTED_Autoneg;
6909         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6910                 support_serdes = 1;
6911                 support_copper = 1;
6912         } else if (bp->phy_port == PORT_FIBRE)
6913                 support_serdes = 1;
6914         else
6915                 support_copper = 1;
6916
6917         if (support_serdes) {
6918                 cmd->supported |= SUPPORTED_1000baseT_Full |
6919                         SUPPORTED_FIBRE;
6920                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6921                         cmd->supported |= SUPPORTED_2500baseX_Full;
6922
6923         }
6924         if (support_copper) {
6925                 cmd->supported |= SUPPORTED_10baseT_Half |
6926                         SUPPORTED_10baseT_Full |
6927                         SUPPORTED_100baseT_Half |
6928                         SUPPORTED_100baseT_Full |
6929                         SUPPORTED_1000baseT_Full |
6930                         SUPPORTED_TP;
6931
6932         }
6933
6934         spin_lock_bh(&bp->phy_lock);
6935         cmd->port = bp->phy_port;
6936         cmd->advertising = bp->advertising;
6937
6938         if (bp->autoneg & AUTONEG_SPEED) {
6939                 cmd->autoneg = AUTONEG_ENABLE;
6940         } else {
6941                 cmd->autoneg = AUTONEG_DISABLE;
6942         }
6943
6944         if (netif_carrier_ok(dev)) {
6945                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6946                 cmd->duplex = bp->duplex;
6947                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6948                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6949                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6950                         else
6951                                 cmd->eth_tp_mdix = ETH_TP_MDI;
6952                 }
6953         }
6954         else {
6955                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6956                 cmd->duplex = DUPLEX_UNKNOWN;
6957         }
6958         spin_unlock_bh(&bp->phy_lock);
6959
6960         cmd->transceiver = XCVR_INTERNAL;
6961         cmd->phy_address = bp->phy_addr;
6962
6963         return 0;
6964 }
6965
6966 static int
6967 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6968 {
6969         struct bnx2 *bp = netdev_priv(dev);
6970         u8 autoneg = bp->autoneg;
6971         u8 req_duplex = bp->req_duplex;
6972         u16 req_line_speed = bp->req_line_speed;
6973         u32 advertising = bp->advertising;
6974         int err = -EINVAL;
6975
6976         spin_lock_bh(&bp->phy_lock);
6977
6978         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6979                 goto err_out_unlock;
6980
6981         if (cmd->port != bp->phy_port &&
6982             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6983                 goto err_out_unlock;
6984
6985         /* If device is down, we can store the settings only if the user
6986          * is setting the currently active port.
6987          */
6988         if (!netif_running(dev) && cmd->port != bp->phy_port)
6989                 goto err_out_unlock;
6990
6991         if (cmd->autoneg == AUTONEG_ENABLE) {
6992                 autoneg |= AUTONEG_SPEED;
6993
6994                 advertising = cmd->advertising;
6995                 if (cmd->port == PORT_TP) {
6996                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6997                         if (!advertising)
6998                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6999                 } else {
7000                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7001                         if (!advertising)
7002                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7003                 }
7004                 advertising |= ADVERTISED_Autoneg;
7005         }
7006         else {
7007                 u32 speed = ethtool_cmd_speed(cmd);
7008                 if (cmd->port == PORT_FIBRE) {
7009                         if ((speed != SPEED_1000 &&
7010                              speed != SPEED_2500) ||
7011                             (cmd->duplex != DUPLEX_FULL))
7012                                 goto err_out_unlock;
7013
7014                         if (speed == SPEED_2500 &&
7015                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7016                                 goto err_out_unlock;
7017                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7018                         goto err_out_unlock;
7019
7020                 autoneg &= ~AUTONEG_SPEED;
7021                 req_line_speed = speed;
7022                 req_duplex = cmd->duplex;
7023                 advertising = 0;
7024         }
7025
7026         bp->autoneg = autoneg;
7027         bp->advertising = advertising;
7028         bp->req_line_speed = req_line_speed;
7029         bp->req_duplex = req_duplex;
7030
7031         err = 0;
7032         /* If device is down, the new settings will be picked up when it is
7033          * brought up.
7034          */
7035         if (netif_running(dev))
7036                 err = bnx2_setup_phy(bp, cmd->port);
7037
7038 err_out_unlock:
7039         spin_unlock_bh(&bp->phy_lock);
7040
7041         return err;
7042 }
7043
7044 static void
7045 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7046 {
7047         struct bnx2 *bp = netdev_priv(dev);
7048
7049         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7050         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7051         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7052         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7053 }
7054
7055 #define BNX2_REGDUMP_LEN                (32 * 1024)
7056
7057 static int
7058 bnx2_get_regs_len(struct net_device *dev)
7059 {
7060         return BNX2_REGDUMP_LEN;
7061 }
7062
7063 static void
7064 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7065 {
7066         u32 *p = _p, i, offset;
7067         u8 *orig_p = _p;
7068         struct bnx2 *bp = netdev_priv(dev);
7069         static const u32 reg_boundaries[] = {
7070                 0x0000, 0x0098, 0x0400, 0x045c,
7071                 0x0800, 0x0880, 0x0c00, 0x0c10,
7072                 0x0c30, 0x0d08, 0x1000, 0x101c,
7073                 0x1040, 0x1048, 0x1080, 0x10a4,
7074                 0x1400, 0x1490, 0x1498, 0x14f0,
7075                 0x1500, 0x155c, 0x1580, 0x15dc,
7076                 0x1600, 0x1658, 0x1680, 0x16d8,
7077                 0x1800, 0x1820, 0x1840, 0x1854,
7078                 0x1880, 0x1894, 0x1900, 0x1984,
7079                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7080                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7081                 0x2000, 0x2030, 0x23c0, 0x2400,
7082                 0x2800, 0x2820, 0x2830, 0x2850,
7083                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7084                 0x3c00, 0x3c94, 0x4000, 0x4010,
7085                 0x4080, 0x4090, 0x43c0, 0x4458,
7086                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7087                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7088                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7089                 0x5fc0, 0x6000, 0x6400, 0x6428,
7090                 0x6800, 0x6848, 0x684c, 0x6860,
7091                 0x6888, 0x6910, 0x8000
7092         };
7093
7094         regs->version = 0;
7095
7096         memset(p, 0, BNX2_REGDUMP_LEN);
7097
7098         if (!netif_running(bp->dev))
7099                 return;
7100
7101         i = 0;
7102         offset = reg_boundaries[0];
7103         p += offset;
7104         while (offset < BNX2_REGDUMP_LEN) {
7105                 *p++ = BNX2_RD(bp, offset);
7106                 offset += 4;
7107                 if (offset == reg_boundaries[i + 1]) {
7108                         offset = reg_boundaries[i + 2];
7109                         p = (u32 *) (orig_p + offset);
7110                         i += 2;
7111                 }
7112         }
7113 }
7114
7115 static void
7116 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7117 {
7118         struct bnx2 *bp = netdev_priv(dev);
7119
7120         if (bp->flags & BNX2_FLAG_NO_WOL) {
7121                 wol->supported = 0;
7122                 wol->wolopts = 0;
7123         }
7124         else {
7125                 wol->supported = WAKE_MAGIC;
7126                 if (bp->wol)
7127                         wol->wolopts = WAKE_MAGIC;
7128                 else
7129                         wol->wolopts = 0;
7130         }
7131         memset(&wol->sopass, 0, sizeof(wol->sopass));
7132 }
7133
7134 static int
7135 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7136 {
7137         struct bnx2 *bp = netdev_priv(dev);
7138
7139         if (wol->wolopts & ~WAKE_MAGIC)
7140                 return -EINVAL;
7141
7142         if (wol->wolopts & WAKE_MAGIC) {
7143                 if (bp->flags & BNX2_FLAG_NO_WOL)
7144                         return -EINVAL;
7145
7146                 bp->wol = 1;
7147         }
7148         else {
7149                 bp->wol = 0;
7150         }
7151
7152         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7153
7154         return 0;
7155 }
7156
7157 static int
7158 bnx2_nway_reset(struct net_device *dev)
7159 {
7160         struct bnx2 *bp = netdev_priv(dev);
7161         u32 bmcr;
7162
7163         if (!netif_running(dev))
7164                 return -EAGAIN;
7165
7166         if (!(bp->autoneg & AUTONEG_SPEED)) {
7167                 return -EINVAL;
7168         }
7169
7170         spin_lock_bh(&bp->phy_lock);
7171
7172         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7173                 int rc;
7174
7175                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7176                 spin_unlock_bh(&bp->phy_lock);
7177                 return rc;
7178         }
7179
7180         /* Force a link down visible on the other side */
7181         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7182                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7183                 spin_unlock_bh(&bp->phy_lock);
7184
7185                 msleep(20);
7186
7187                 spin_lock_bh(&bp->phy_lock);
7188
7189                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7190                 bp->serdes_an_pending = 1;
7191                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7192         }
7193
7194         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7195         bmcr &= ~BMCR_LOOPBACK;
7196         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7197
7198         spin_unlock_bh(&bp->phy_lock);
7199
7200         return 0;
7201 }
7202
7203 static u32
7204 bnx2_get_link(struct net_device *dev)
7205 {
7206         struct bnx2 *bp = netdev_priv(dev);
7207
7208         return bp->link_up;
7209 }
7210
7211 static int
7212 bnx2_get_eeprom_len(struct net_device *dev)
7213 {
7214         struct bnx2 *bp = netdev_priv(dev);
7215
7216         if (bp->flash_info == NULL)
7217                 return 0;
7218
7219         return (int) bp->flash_size;
7220 }
7221
7222 static int
7223 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7224                 u8 *eebuf)
7225 {
7226         struct bnx2 *bp = netdev_priv(dev);
7227         int rc;
7228
7229         /* parameters already validated in ethtool_get_eeprom */
7230
7231         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7232
7233         return rc;
7234 }
7235
7236 static int
7237 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7238                 u8 *eebuf)
7239 {
7240         struct bnx2 *bp = netdev_priv(dev);
7241         int rc;
7242
7243         /* parameters already validated in ethtool_set_eeprom */
7244
7245         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7246
7247         return rc;
7248 }
7249
7250 static int
7251 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7252 {
7253         struct bnx2 *bp = netdev_priv(dev);
7254
7255         memset(coal, 0, sizeof(struct ethtool_coalesce));
7256
7257         coal->rx_coalesce_usecs = bp->rx_ticks;
7258         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7259         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7260         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7261
7262         coal->tx_coalesce_usecs = bp->tx_ticks;
7263         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7264         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7265         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7266
7267         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7268
7269         return 0;
7270 }
7271
7272 static int
7273 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7274 {
7275         struct bnx2 *bp = netdev_priv(dev);
7276
7277         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7278         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7279
7280         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7281         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7282
7283         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7284         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7285
7286         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7287         if (bp->rx_quick_cons_trip_int > 0xff)
7288                 bp->rx_quick_cons_trip_int = 0xff;
7289
7290         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7291         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7292
7293         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7294         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7295
7296         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7297         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7298
7299         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7300         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7301                 0xff;
7302
7303         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7304         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7305                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7306                         bp->stats_ticks = USEC_PER_SEC;
7307         }
7308         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7309                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7311
7312         if (netif_running(bp->dev)) {
7313                 bnx2_netif_stop(bp, true);
7314                 bnx2_init_nic(bp, 0);
7315                 bnx2_netif_start(bp, true);
7316         }
7317
7318         return 0;
7319 }
7320
7321 static void
7322 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7323 {
7324         struct bnx2 *bp = netdev_priv(dev);
7325
7326         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7327         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7328
7329         ering->rx_pending = bp->rx_ring_size;
7330         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7331
7332         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7333         ering->tx_pending = bp->tx_ring_size;
7334 }
7335
7336 static int
7337 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7338 {
7339         if (netif_running(bp->dev)) {
7340                 /* Reset will erase chipset stats; save them */
7341                 bnx2_save_stats(bp);
7342
7343                 bnx2_netif_stop(bp, true);
7344                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7345                 if (reset_irq) {
7346                         bnx2_free_irq(bp);
7347                         bnx2_del_napi(bp);
7348                 } else {
7349                         __bnx2_free_irq(bp);
7350                 }
7351                 bnx2_free_skbs(bp);
7352                 bnx2_free_mem(bp);
7353         }
7354
7355         bnx2_set_rx_ring_size(bp, rx);
7356         bp->tx_ring_size = tx;
7357
7358         if (netif_running(bp->dev)) {
7359                 int rc = 0;
7360
7361                 if (reset_irq) {
7362                         rc = bnx2_setup_int_mode(bp, disable_msi);
7363                         bnx2_init_napi(bp);
7364                 }
7365
7366                 if (!rc)
7367                         rc = bnx2_alloc_mem(bp);
7368
7369                 if (!rc)
7370                         rc = bnx2_request_irq(bp);
7371
7372                 if (!rc)
7373                         rc = bnx2_init_nic(bp, 0);
7374
7375                 if (rc) {
7376                         bnx2_napi_enable(bp);
7377                         dev_close(bp->dev);
7378                         return rc;
7379                 }
7380 #ifdef BCM_CNIC
7381                 mutex_lock(&bp->cnic_lock);
7382                 /* Let cnic know about the new status block. */
7383                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7384                         bnx2_setup_cnic_irq_info(bp);
7385                 mutex_unlock(&bp->cnic_lock);
7386 #endif
7387                 bnx2_netif_start(bp, true);
7388         }
7389         return 0;
7390 }
7391
7392 static int
7393 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7394 {
7395         struct bnx2 *bp = netdev_priv(dev);
7396         int rc;
7397
7398         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7399                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7400                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7401
7402                 return -EINVAL;
7403         }
7404         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7405                                    false);
7406         return rc;
7407 }
7408
7409 static void
7410 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7411 {
7412         struct bnx2 *bp = netdev_priv(dev);
7413
7414         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7415         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7416         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7417 }
7418
7419 static int
7420 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7421 {
7422         struct bnx2 *bp = netdev_priv(dev);
7423
7424         bp->req_flow_ctrl = 0;
7425         if (epause->rx_pause)
7426                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7427         if (epause->tx_pause)
7428                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7429
7430         if (epause->autoneg) {
7431                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7432         }
7433         else {
7434                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7435         }
7436
7437         if (netif_running(dev)) {
7438                 spin_lock_bh(&bp->phy_lock);
7439                 bnx2_setup_phy(bp, bp->phy_port);
7440                 spin_unlock_bh(&bp->phy_lock);
7441         }
7442
7443         return 0;
7444 }
7445
7446 static struct {
7447         char string[ETH_GSTRING_LEN];
7448 } bnx2_stats_str_arr[] = {
7449         { "rx_bytes" },
7450         { "rx_error_bytes" },
7451         { "tx_bytes" },
7452         { "tx_error_bytes" },
7453         { "rx_ucast_packets" },
7454         { "rx_mcast_packets" },
7455         { "rx_bcast_packets" },
7456         { "tx_ucast_packets" },
7457         { "tx_mcast_packets" },
7458         { "tx_bcast_packets" },
7459         { "tx_mac_errors" },
7460         { "tx_carrier_errors" },
7461         { "rx_crc_errors" },
7462         { "rx_align_errors" },
7463         { "tx_single_collisions" },
7464         { "tx_multi_collisions" },
7465         { "tx_deferred" },
7466         { "tx_excess_collisions" },
7467         { "tx_late_collisions" },
7468         { "tx_total_collisions" },
7469         { "rx_fragments" },
7470         { "rx_jabbers" },
7471         { "rx_undersize_packets" },
7472         { "rx_oversize_packets" },
7473         { "rx_64_byte_packets" },
7474         { "rx_65_to_127_byte_packets" },
7475         { "rx_128_to_255_byte_packets" },
7476         { "rx_256_to_511_byte_packets" },
7477         { "rx_512_to_1023_byte_packets" },
7478         { "rx_1024_to_1522_byte_packets" },
7479         { "rx_1523_to_9022_byte_packets" },
7480         { "tx_64_byte_packets" },
7481         { "tx_65_to_127_byte_packets" },
7482         { "tx_128_to_255_byte_packets" },
7483         { "tx_256_to_511_byte_packets" },
7484         { "tx_512_to_1023_byte_packets" },
7485         { "tx_1024_to_1522_byte_packets" },
7486         { "tx_1523_to_9022_byte_packets" },
7487         { "rx_xon_frames" },
7488         { "rx_xoff_frames" },
7489         { "tx_xon_frames" },
7490         { "tx_xoff_frames" },
7491         { "rx_mac_ctrl_frames" },
7492         { "rx_filtered_packets" },
7493         { "rx_ftq_discards" },
7494         { "rx_discards" },
7495         { "rx_fw_discards" },
7496 };
7497
7498 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7499
7500 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7501
7502 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7503     STATS_OFFSET32(stat_IfHCInOctets_hi),
7504     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7505     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7506     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7507     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7508     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7509     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7510     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7511     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7513     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7514     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7515     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7516     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7517     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7518     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7519     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7520     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7521     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7522     STATS_OFFSET32(stat_EtherStatsCollisions),
7523     STATS_OFFSET32(stat_EtherStatsFragments),
7524     STATS_OFFSET32(stat_EtherStatsJabbers),
7525     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7526     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7527     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7528     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7529     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7530     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7541     STATS_OFFSET32(stat_XonPauseFramesReceived),
7542     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7543     STATS_OFFSET32(stat_OutXonSent),
7544     STATS_OFFSET32(stat_OutXoffSent),
7545     STATS_OFFSET32(stat_MacControlFramesReceived),
7546     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7547     STATS_OFFSET32(stat_IfInFTQDiscards),
7548     STATS_OFFSET32(stat_IfInMBUFDiscards),
7549     STATS_OFFSET32(stat_FwRxDrop),
7550 };
7551
7552 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7553  * skipped because of errata.
7554  */
7555 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7556         8,0,8,8,8,8,8,8,8,8,
7557         4,0,4,4,4,4,4,4,4,4,
7558         4,4,4,4,4,4,4,4,4,4,
7559         4,4,4,4,4,4,4,4,4,4,
7560         4,4,4,4,4,4,4,
7561 };
7562
7563 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7564         8,0,8,8,8,8,8,8,8,8,
7565         4,4,4,4,4,4,4,4,4,4,
7566         4,4,4,4,4,4,4,4,4,4,
7567         4,4,4,4,4,4,4,4,4,4,
7568         4,4,4,4,4,4,4,
7569 };
7570
7571 #define BNX2_NUM_TESTS 6
7572
7573 static struct {
7574         char string[ETH_GSTRING_LEN];
7575 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7576         { "register_test (offline)" },
7577         { "memory_test (offline)" },
7578         { "loopback_test (offline)" },
7579         { "nvram_test (online)" },
7580         { "interrupt_test (online)" },
7581         { "link_test (online)" },
7582 };
7583
7584 static int
7585 bnx2_get_sset_count(struct net_device *dev, int sset)
7586 {
7587         switch (sset) {
7588         case ETH_SS_TEST:
7589                 return BNX2_NUM_TESTS;
7590         case ETH_SS_STATS:
7591                 return BNX2_NUM_STATS;
7592         default:
7593                 return -EOPNOTSUPP;
7594         }
7595 }
7596
7597 static void
7598 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7599 {
7600         struct bnx2 *bp = netdev_priv(dev);
7601
7602         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7603         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7604                 int i;
7605
7606                 bnx2_netif_stop(bp, true);
7607                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7608                 bnx2_free_skbs(bp);
7609
7610                 if (bnx2_test_registers(bp) != 0) {
7611                         buf[0] = 1;
7612                         etest->flags |= ETH_TEST_FL_FAILED;
7613                 }
7614                 if (bnx2_test_memory(bp) != 0) {
7615                         buf[1] = 1;
7616                         etest->flags |= ETH_TEST_FL_FAILED;
7617                 }
7618                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7619                         etest->flags |= ETH_TEST_FL_FAILED;
7620
7621                 if (!netif_running(bp->dev))
7622                         bnx2_shutdown_chip(bp);
7623                 else {
7624                         bnx2_init_nic(bp, 1);
7625                         bnx2_netif_start(bp, true);
7626                 }
7627
7628                 /* wait for link up */
7629                 for (i = 0; i < 7; i++) {
7630                         if (bp->link_up)
7631                                 break;
7632                         msleep_interruptible(1000);
7633                 }
7634         }
7635
7636         if (bnx2_test_nvram(bp) != 0) {
7637                 buf[3] = 1;
7638                 etest->flags |= ETH_TEST_FL_FAILED;
7639         }
7640         if (bnx2_test_intr(bp) != 0) {
7641                 buf[4] = 1;
7642                 etest->flags |= ETH_TEST_FL_FAILED;
7643         }
7644
7645         if (bnx2_test_link(bp) != 0) {
7646                 buf[5] = 1;
7647                 etest->flags |= ETH_TEST_FL_FAILED;
7648
7649         }
7650 }
7651
7652 static void
7653 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7654 {
7655         switch (stringset) {
7656         case ETH_SS_STATS:
7657                 memcpy(buf, bnx2_stats_str_arr,
7658                         sizeof(bnx2_stats_str_arr));
7659                 break;
7660         case ETH_SS_TEST:
7661                 memcpy(buf, bnx2_tests_str_arr,
7662                         sizeof(bnx2_tests_str_arr));
7663                 break;
7664         }
7665 }
7666
7667 static void
7668 bnx2_get_ethtool_stats(struct net_device *dev,
7669                 struct ethtool_stats *stats, u64 *buf)
7670 {
7671         struct bnx2 *bp = netdev_priv(dev);
7672         int i;
7673         u32 *hw_stats = (u32 *) bp->stats_blk;
7674         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7675         u8 *stats_len_arr = NULL;
7676
7677         if (hw_stats == NULL) {
7678                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7679                 return;
7680         }
7681
7682         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7683             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7684             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7685             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7686                 stats_len_arr = bnx2_5706_stats_len_arr;
7687         else
7688                 stats_len_arr = bnx2_5708_stats_len_arr;
7689
7690         for (i = 0; i < BNX2_NUM_STATS; i++) {
7691                 unsigned long offset;
7692
7693                 if (stats_len_arr[i] == 0) {
7694                         /* skip this counter */
7695                         buf[i] = 0;
7696                         continue;
7697                 }
7698
7699                 offset = bnx2_stats_offset_arr[i];
7700                 if (stats_len_arr[i] == 4) {
7701                         /* 4-byte counter */
7702                         buf[i] = (u64) *(hw_stats + offset) +
7703                                  *(temp_stats + offset);
7704                         continue;
7705                 }
7706                 /* 8-byte counter */
7707                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7708                          *(hw_stats + offset + 1) +
7709                          (((u64) *(temp_stats + offset)) << 32) +
7710                          *(temp_stats + offset + 1);
7711         }
7712 }
7713
7714 static int
7715 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7716 {
7717         struct bnx2 *bp = netdev_priv(dev);
7718
7719         switch (state) {
7720         case ETHTOOL_ID_ACTIVE:
7721                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7722                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7723                 return 1;       /* cycle on/off once per second */
7724
7725         case ETHTOOL_ID_ON:
7726                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7727                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7728                         BNX2_EMAC_LED_100MB_OVERRIDE |
7729                         BNX2_EMAC_LED_10MB_OVERRIDE |
7730                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7731                         BNX2_EMAC_LED_TRAFFIC);
7732                 break;
7733
7734         case ETHTOOL_ID_OFF:
7735                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7736                 break;
7737
7738         case ETHTOOL_ID_INACTIVE:
7739                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7740                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7741                 break;
7742         }
7743
7744         return 0;
7745 }
7746
7747 static int
7748 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7749 {
7750         struct bnx2 *bp = netdev_priv(dev);
7751
7752         /* TSO with VLAN tag won't work with current firmware */
7753         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7754                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7755         else
7756                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7757
7758         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7759             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7760             netif_running(dev)) {
7761                 bnx2_netif_stop(bp, false);
7762                 dev->features = features;
7763                 bnx2_set_rx_mode(dev);
7764                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7765                 bnx2_netif_start(bp, false);
7766                 return 1;
7767         }
7768
7769         return 0;
7770 }
7771
7772 static void bnx2_get_channels(struct net_device *dev,
7773                               struct ethtool_channels *channels)
7774 {
7775         struct bnx2 *bp = netdev_priv(dev);
7776         u32 max_rx_rings = 1;
7777         u32 max_tx_rings = 1;
7778
7779         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7780                 max_rx_rings = RX_MAX_RINGS;
7781                 max_tx_rings = TX_MAX_RINGS;
7782         }
7783
7784         channels->max_rx = max_rx_rings;
7785         channels->max_tx = max_tx_rings;
7786         channels->max_other = 0;
7787         channels->max_combined = 0;
7788         channels->rx_count = bp->num_rx_rings;
7789         channels->tx_count = bp->num_tx_rings;
7790         channels->other_count = 0;
7791         channels->combined_count = 0;
7792 }
7793
7794 static int bnx2_set_channels(struct net_device *dev,
7795                               struct ethtool_channels *channels)
7796 {
7797         struct bnx2 *bp = netdev_priv(dev);
7798         u32 max_rx_rings = 1;
7799         u32 max_tx_rings = 1;
7800         int rc = 0;
7801
7802         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7803                 max_rx_rings = RX_MAX_RINGS;
7804                 max_tx_rings = TX_MAX_RINGS;
7805         }
7806         if (channels->rx_count > max_rx_rings ||
7807             channels->tx_count > max_tx_rings)
7808                 return -EINVAL;
7809
7810         bp->num_req_rx_rings = channels->rx_count;
7811         bp->num_req_tx_rings = channels->tx_count;
7812
7813         if (netif_running(dev))
7814                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7815                                            bp->tx_ring_size, true);
7816
7817         return rc;
7818 }
7819
7820 static const struct ethtool_ops bnx2_ethtool_ops = {
7821         .get_settings           = bnx2_get_settings,
7822         .set_settings           = bnx2_set_settings,
7823         .get_drvinfo            = bnx2_get_drvinfo,
7824         .get_regs_len           = bnx2_get_regs_len,
7825         .get_regs               = bnx2_get_regs,
7826         .get_wol                = bnx2_get_wol,
7827         .set_wol                = bnx2_set_wol,
7828         .nway_reset             = bnx2_nway_reset,
7829         .get_link               = bnx2_get_link,
7830         .get_eeprom_len         = bnx2_get_eeprom_len,
7831         .get_eeprom             = bnx2_get_eeprom,
7832         .set_eeprom             = bnx2_set_eeprom,
7833         .get_coalesce           = bnx2_get_coalesce,
7834         .set_coalesce           = bnx2_set_coalesce,
7835         .get_ringparam          = bnx2_get_ringparam,
7836         .set_ringparam          = bnx2_set_ringparam,
7837         .get_pauseparam         = bnx2_get_pauseparam,
7838         .set_pauseparam         = bnx2_set_pauseparam,
7839         .self_test              = bnx2_self_test,
7840         .get_strings            = bnx2_get_strings,
7841         .set_phys_id            = bnx2_set_phys_id,
7842         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7843         .get_sset_count         = bnx2_get_sset_count,
7844         .get_channels           = bnx2_get_channels,
7845         .set_channels           = bnx2_set_channels,
7846 };
7847
7848 /* Called with rtnl_lock */
7849 static int
7850 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7851 {
7852         struct mii_ioctl_data *data = if_mii(ifr);
7853         struct bnx2 *bp = netdev_priv(dev);
7854         int err;
7855
7856         switch(cmd) {
7857         case SIOCGMIIPHY:
7858                 data->phy_id = bp->phy_addr;
7859
7860                 /* fallthru */
7861         case SIOCGMIIREG: {
7862                 u32 mii_regval;
7863
7864                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7865                         return -EOPNOTSUPP;
7866
7867                 if (!netif_running(dev))
7868                         return -EAGAIN;
7869
7870                 spin_lock_bh(&bp->phy_lock);
7871                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7872                 spin_unlock_bh(&bp->phy_lock);
7873
7874                 data->val_out = mii_regval;
7875
7876                 return err;
7877         }
7878
7879         case SIOCSMIIREG:
7880                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7881                         return -EOPNOTSUPP;
7882
7883                 if (!netif_running(dev))
7884                         return -EAGAIN;
7885
7886                 spin_lock_bh(&bp->phy_lock);
7887                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7888                 spin_unlock_bh(&bp->phy_lock);
7889
7890                 return err;
7891
7892         default:
7893                 /* do nothing */
7894                 break;
7895         }
7896         return -EOPNOTSUPP;
7897 }
7898
7899 /* Called with rtnl_lock */
7900 static int
7901 bnx2_change_mac_addr(struct net_device *dev, void *p)
7902 {
7903         struct sockaddr *addr = p;
7904         struct bnx2 *bp = netdev_priv(dev);
7905
7906         if (!is_valid_ether_addr(addr->sa_data))
7907                 return -EADDRNOTAVAIL;
7908
7909         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7910         if (netif_running(dev))
7911                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7912
7913         return 0;
7914 }
7915
7916 /* Called with rtnl_lock */
7917 static int
7918 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7919 {
7920         struct bnx2 *bp = netdev_priv(dev);
7921
7922         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7923                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7924                 return -EINVAL;
7925
7926         dev->mtu = new_mtu;
7927         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7928                                      false);
7929 }
7930
7931 #ifdef CONFIG_NET_POLL_CONTROLLER
7932 static void
7933 poll_bnx2(struct net_device *dev)
7934 {
7935         struct bnx2 *bp = netdev_priv(dev);
7936         int i;
7937
7938         for (i = 0; i < bp->irq_nvecs; i++) {
7939                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7940
7941                 disable_irq(irq->vector);
7942                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7943                 enable_irq(irq->vector);
7944         }
7945 }
7946 #endif
7947
7948 static void
7949 bnx2_get_5709_media(struct bnx2 *bp)
7950 {
7951         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7952         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7953         u32 strap;
7954
7955         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7956                 return;
7957         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7958                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959                 return;
7960         }
7961
7962         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7963                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7964         else
7965                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7966
7967         if (bp->func == 0) {
7968                 switch (strap) {
7969                 case 0x4:
7970                 case 0x5:
7971                 case 0x6:
7972                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7973                         return;
7974                 }
7975         } else {
7976                 switch (strap) {
7977                 case 0x1:
7978                 case 0x2:
7979                 case 0x4:
7980                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7981                         return;
7982                 }
7983         }
7984 }
7985
7986 static void
7987 bnx2_get_pci_speed(struct bnx2 *bp)
7988 {
7989         u32 reg;
7990
7991         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7992         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7993                 u32 clkreg;
7994
7995                 bp->flags |= BNX2_FLAG_PCIX;
7996
7997                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7998
7999                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8000                 switch (clkreg) {
8001                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8002                         bp->bus_speed_mhz = 133;
8003                         break;
8004
8005                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8006                         bp->bus_speed_mhz = 100;
8007                         break;
8008
8009                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8010                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8011                         bp->bus_speed_mhz = 66;
8012                         break;
8013
8014                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8015                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8016                         bp->bus_speed_mhz = 50;
8017                         break;
8018
8019                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8020                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8021                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8022                         bp->bus_speed_mhz = 33;
8023                         break;
8024                 }
8025         }
8026         else {
8027                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8028                         bp->bus_speed_mhz = 66;
8029                 else
8030                         bp->bus_speed_mhz = 33;
8031         }
8032
8033         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8034                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8035
8036 }
8037
8038 static void
8039 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8040 {
8041         int rc, i, j;
8042         u8 *data;
8043         unsigned int block_end, rosize, len;
8044
8045 #define BNX2_VPD_NVRAM_OFFSET   0x300
8046 #define BNX2_VPD_LEN            128
8047 #define BNX2_MAX_VER_SLEN       30
8048
8049         data = kmalloc(256, GFP_KERNEL);
8050         if (!data)
8051                 return;
8052
8053         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8054                              BNX2_VPD_LEN);
8055         if (rc)
8056                 goto vpd_done;
8057
8058         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8059                 data[i] = data[i + BNX2_VPD_LEN + 3];
8060                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8061                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8062                 data[i + 3] = data[i + BNX2_VPD_LEN];
8063         }
8064
8065         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8066         if (i < 0)
8067                 goto vpd_done;
8068
8069         rosize = pci_vpd_lrdt_size(&data[i]);
8070         i += PCI_VPD_LRDT_TAG_SIZE;
8071         block_end = i + rosize;
8072
8073         if (block_end > BNX2_VPD_LEN)
8074                 goto vpd_done;
8075
8076         j = pci_vpd_find_info_keyword(data, i, rosize,
8077                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8078         if (j < 0)
8079                 goto vpd_done;
8080
8081         len = pci_vpd_info_field_size(&data[j]);
8082
8083         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8084         if (j + len > block_end || len != 4 ||
8085             memcmp(&data[j], "1028", 4))
8086                 goto vpd_done;
8087
8088         j = pci_vpd_find_info_keyword(data, i, rosize,
8089                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8090         if (j < 0)
8091                 goto vpd_done;
8092
8093         len = pci_vpd_info_field_size(&data[j]);
8094
8095         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8096         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8097                 goto vpd_done;
8098
8099         memcpy(bp->fw_version, &data[j], len);
8100         bp->fw_version[len] = ' ';
8101
8102 vpd_done:
8103         kfree(data);
8104 }
8105
8106 static int
8107 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8108 {
8109         struct bnx2 *bp;
8110         int rc, i, j;
8111         u32 reg;
8112         u64 dma_mask, persist_dma_mask;
8113         int err;
8114
8115         SET_NETDEV_DEV(dev, &pdev->dev);
8116         bp = netdev_priv(dev);
8117
8118         bp->flags = 0;
8119         bp->phy_flags = 0;
8120
8121         bp->temp_stats_blk =
8122                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8123
8124         if (bp->temp_stats_blk == NULL) {
8125                 rc = -ENOMEM;
8126                 goto err_out;
8127         }
8128
8129         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8130         rc = pci_enable_device(pdev);
8131         if (rc) {
8132                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8133                 goto err_out;
8134         }
8135
8136         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8137                 dev_err(&pdev->dev,
8138                         "Cannot find PCI device base address, aborting\n");
8139                 rc = -ENODEV;
8140                 goto err_out_disable;
8141         }
8142
8143         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8144         if (rc) {
8145                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8146                 goto err_out_disable;
8147         }
8148
8149         pci_set_master(pdev);
8150
8151         bp->pm_cap = pdev->pm_cap;
8152         if (bp->pm_cap == 0) {
8153                 dev_err(&pdev->dev,
8154                         "Cannot find power management capability, aborting\n");
8155                 rc = -EIO;
8156                 goto err_out_release;
8157         }
8158
8159         bp->dev = dev;
8160         bp->pdev = pdev;
8161
8162         spin_lock_init(&bp->phy_lock);
8163         spin_lock_init(&bp->indirect_lock);
8164 #ifdef BCM_CNIC
8165         mutex_init(&bp->cnic_lock);
8166 #endif
8167         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8168
8169         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8170                                                          TX_MAX_TSS_RINGS + 1));
8171         if (!bp->regview) {
8172                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8173                 rc = -ENOMEM;
8174                 goto err_out_release;
8175         }
8176
8177         /* Configure byte swap and enable write to the reg_window registers.
8178          * Rely on CPU to do target byte swapping on big endian systems
8179          * The chip's target access swapping will not swap all accesses
8180          */
8181         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8182                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8183                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8184
8185         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8186
8187         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8188                 if (!pci_is_pcie(pdev)) {
8189                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8190                         rc = -EIO;
8191                         goto err_out_unmap;
8192                 }
8193                 bp->flags |= BNX2_FLAG_PCIE;
8194                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8195                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8196
8197                 /* AER (Advanced Error Reporting) hooks */
8198                 err = pci_enable_pcie_error_reporting(pdev);
8199                 if (!err)
8200                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8201
8202         } else {
8203                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8204                 if (bp->pcix_cap == 0) {
8205                         dev_err(&pdev->dev,
8206                                 "Cannot find PCIX capability, aborting\n");
8207                         rc = -EIO;
8208                         goto err_out_unmap;
8209                 }
8210                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8211         }
8212
8213         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8214             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8215                 if (pdev->msix_cap)
8216                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8217         }
8218
8219         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8220             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8221                 if (pdev->msi_cap)
8222                         bp->flags |= BNX2_FLAG_MSI_CAP;
8223         }
8224
8225         /* 5708 cannot support DMA addresses > 40-bit.  */
8226         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8227                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8228         else
8229                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8230
8231         /* Configure DMA attributes. */
8232         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8233                 dev->features |= NETIF_F_HIGHDMA;
8234                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8235                 if (rc) {
8236                         dev_err(&pdev->dev,
8237                                 "pci_set_consistent_dma_mask failed, aborting\n");
8238                         goto err_out_unmap;
8239                 }
8240         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8241                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8242                 goto err_out_unmap;
8243         }
8244
8245         if (!(bp->flags & BNX2_FLAG_PCIE))
8246                 bnx2_get_pci_speed(bp);
8247
8248         /* 5706A0 may falsely detect SERR and PERR. */
8249         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8250                 reg = BNX2_RD(bp, PCI_COMMAND);
8251                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8252                 BNX2_WR(bp, PCI_COMMAND, reg);
8253         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8254                 !(bp->flags & BNX2_FLAG_PCIX)) {
8255
8256                 dev_err(&pdev->dev,
8257                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8258                 goto err_out_unmap;
8259         }
8260
8261         bnx2_init_nvram(bp);
8262
8263         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8264
8265         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8266                 bp->func = 1;
8267
8268         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8269             BNX2_SHM_HDR_SIGNATURE_SIG) {
8270                 u32 off = bp->func << 2;
8271
8272                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8273         } else
8274                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8275
8276         /* Get the permanent MAC address.  First we need to make sure the
8277          * firmware is actually running.
8278          */
8279         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8280
8281         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8282             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8283                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8284                 rc = -ENODEV;
8285                 goto err_out_unmap;
8286         }
8287
8288         bnx2_read_vpd_fw_ver(bp);
8289
8290         j = strlen(bp->fw_version);
8291         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8292         for (i = 0; i < 3 && j < 24; i++) {
8293                 u8 num, k, skip0;
8294
8295                 if (i == 0) {
8296                         bp->fw_version[j++] = 'b';
8297                         bp->fw_version[j++] = 'c';
8298                         bp->fw_version[j++] = ' ';
8299                 }
8300                 num = (u8) (reg >> (24 - (i * 8)));
8301                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8302                         if (num >= k || !skip0 || k == 1) {
8303                                 bp->fw_version[j++] = (num / k) + '0';
8304                                 skip0 = 0;
8305                         }
8306                 }
8307                 if (i != 2)
8308                         bp->fw_version[j++] = '.';
8309         }
8310         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8311         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8312                 bp->wol = 1;
8313
8314         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8315                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8316
8317                 for (i = 0; i < 30; i++) {
8318                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8319                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8320                                 break;
8321                         msleep(10);
8322                 }
8323         }
8324         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8325         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8326         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8327             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8328                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8329
8330                 if (j < 32)
8331                         bp->fw_version[j++] = ' ';
8332                 for (i = 0; i < 3 && j < 28; i++) {
8333                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8334                         reg = be32_to_cpu(reg);
8335                         memcpy(&bp->fw_version[j], &reg, 4);
8336                         j += 4;
8337                 }
8338         }
8339
8340         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8341         bp->mac_addr[0] = (u8) (reg >> 8);
8342         bp->mac_addr[1] = (u8) reg;
8343
8344         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8345         bp->mac_addr[2] = (u8) (reg >> 24);
8346         bp->mac_addr[3] = (u8) (reg >> 16);
8347         bp->mac_addr[4] = (u8) (reg >> 8);
8348         bp->mac_addr[5] = (u8) reg;
8349
8350         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8351         bnx2_set_rx_ring_size(bp, 255);
8352
8353         bp->tx_quick_cons_trip_int = 2;
8354         bp->tx_quick_cons_trip = 20;
8355         bp->tx_ticks_int = 18;
8356         bp->tx_ticks = 80;
8357
8358         bp->rx_quick_cons_trip_int = 2;
8359         bp->rx_quick_cons_trip = 12;
8360         bp->rx_ticks_int = 18;
8361         bp->rx_ticks = 18;
8362
8363         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8364
8365         bp->current_interval = BNX2_TIMER_INTERVAL;
8366
8367         bp->phy_addr = 1;
8368
8369         /* allocate stats_blk */
8370         rc = bnx2_alloc_stats_blk(dev);
8371         if (rc)
8372                 goto err_out_unmap;
8373
8374         /* Disable WOL support if we are running on a SERDES chip. */
8375         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8376                 bnx2_get_5709_media(bp);
8377         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8378                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8379
8380         bp->phy_port = PORT_TP;
8381         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8382                 bp->phy_port = PORT_FIBRE;
8383                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8384                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8385                         bp->flags |= BNX2_FLAG_NO_WOL;
8386                         bp->wol = 0;
8387                 }
8388                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8389                         /* Don't do parallel detect on this board because of
8390                          * some board problems.  The link will not go down
8391                          * if we do parallel detect.
8392                          */
8393                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8394                             pdev->subsystem_device == 0x310c)
8395                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8396                 } else {
8397                         bp->phy_addr = 2;
8398                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8399                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8400                 }
8401         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8402                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8403                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8404         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8405                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8406                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8407                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8408
8409         bnx2_init_fw_cap(bp);
8410
8411         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8412             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8413             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8414             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8415                 bp->flags |= BNX2_FLAG_NO_WOL;
8416                 bp->wol = 0;
8417         }
8418
8419         if (bp->flags & BNX2_FLAG_NO_WOL)
8420                 device_set_wakeup_capable(&bp->pdev->dev, false);
8421         else
8422                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8423
8424         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8425                 bp->tx_quick_cons_trip_int =
8426                         bp->tx_quick_cons_trip;
8427                 bp->tx_ticks_int = bp->tx_ticks;
8428                 bp->rx_quick_cons_trip_int =
8429                         bp->rx_quick_cons_trip;
8430                 bp->rx_ticks_int = bp->rx_ticks;
8431                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8432                 bp->com_ticks_int = bp->com_ticks;
8433                 bp->cmd_ticks_int = bp->cmd_ticks;
8434         }
8435
8436         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8437          *
8438          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8439          * with byte enables disabled on the unused 32-bit word.  This is legal
8440          * but causes problems on the AMD 8132 which will eventually stop
8441          * responding after a while.
8442          *
8443          * AMD believes this incompatibility is unique to the 5706, and
8444          * prefers to locally disable MSI rather than globally disabling it.
8445          */
8446         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8447                 struct pci_dev *amd_8132 = NULL;
8448
8449                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8450                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8451                                                   amd_8132))) {
8452
8453                         if (amd_8132->revision >= 0x10 &&
8454                             amd_8132->revision <= 0x13) {
8455                                 disable_msi = 1;
8456                                 pci_dev_put(amd_8132);
8457                                 break;
8458                         }
8459                 }
8460         }
8461
8462         bnx2_set_default_link(bp);
8463         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8464
8465         init_timer(&bp->timer);
8466         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8467         bp->timer.data = (unsigned long) bp;
8468         bp->timer.function = bnx2_timer;
8469
8470 #ifdef BCM_CNIC
8471         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8472                 bp->cnic_eth_dev.max_iscsi_conn =
8473                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8474                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8475         bp->cnic_probe = bnx2_cnic_probe;
8476 #endif
8477         pci_save_state(pdev);
8478
8479         return 0;
8480
8481 err_out_unmap:
8482         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8483                 pci_disable_pcie_error_reporting(pdev);
8484                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8485         }
8486
8487         pci_iounmap(pdev, bp->regview);
8488         bp->regview = NULL;
8489
8490 err_out_release:
8491         pci_release_regions(pdev);
8492
8493 err_out_disable:
8494         pci_disable_device(pdev);
8495
8496 err_out:
8497         kfree(bp->temp_stats_blk);
8498
8499         return rc;
8500 }
8501
8502 static char *
8503 bnx2_bus_string(struct bnx2 *bp, char *str)
8504 {
8505         char *s = str;
8506
8507         if (bp->flags & BNX2_FLAG_PCIE) {
8508                 s += sprintf(s, "PCI Express");
8509         } else {
8510                 s += sprintf(s, "PCI");
8511                 if (bp->flags & BNX2_FLAG_PCIX)
8512                         s += sprintf(s, "-X");
8513                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8514                         s += sprintf(s, " 32-bit");
8515                 else
8516                         s += sprintf(s, " 64-bit");
8517                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8518         }
8519         return str;
8520 }
8521
8522 static void
8523 bnx2_del_napi(struct bnx2 *bp)
8524 {
8525         int i;
8526
8527         for (i = 0; i < bp->irq_nvecs; i++)
8528                 netif_napi_del(&bp->bnx2_napi[i].napi);
8529 }
8530
8531 static void
8532 bnx2_init_napi(struct bnx2 *bp)
8533 {
8534         int i;
8535
8536         for (i = 0; i < bp->irq_nvecs; i++) {
8537                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8538                 int (*poll)(struct napi_struct *, int);
8539
8540                 if (i == 0)
8541                         poll = bnx2_poll;
8542                 else
8543                         poll = bnx2_poll_msix;
8544
8545                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8546                 bnapi->bp = bp;
8547         }
8548 }
8549
8550 static const struct net_device_ops bnx2_netdev_ops = {
8551         .ndo_open               = bnx2_open,
8552         .ndo_start_xmit         = bnx2_start_xmit,
8553         .ndo_stop               = bnx2_close,
8554         .ndo_get_stats64        = bnx2_get_stats64,
8555         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8556         .ndo_do_ioctl           = bnx2_ioctl,
8557         .ndo_validate_addr      = eth_validate_addr,
8558         .ndo_set_mac_address    = bnx2_change_mac_addr,
8559         .ndo_change_mtu         = bnx2_change_mtu,
8560         .ndo_set_features       = bnx2_set_features,
8561         .ndo_tx_timeout         = bnx2_tx_timeout,
8562 #ifdef CONFIG_NET_POLL_CONTROLLER
8563         .ndo_poll_controller    = poll_bnx2,
8564 #endif
8565 };
8566
8567 static int
8568 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8569 {
8570         static int version_printed = 0;
8571         struct net_device *dev;
8572         struct bnx2 *bp;
8573         int rc;
8574         char str[40];
8575
8576         if (version_printed++ == 0)
8577                 pr_info("%s", version);
8578
8579         /* dev zeroed in init_etherdev */
8580         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8581         if (!dev)
8582                 return -ENOMEM;
8583
8584         rc = bnx2_init_board(pdev, dev);
8585         if (rc < 0)
8586                 goto err_free;
8587
8588         dev->netdev_ops = &bnx2_netdev_ops;
8589         dev->watchdog_timeo = TX_TIMEOUT;
8590         dev->ethtool_ops = &bnx2_ethtool_ops;
8591
8592         bp = netdev_priv(dev);
8593
8594         pci_set_drvdata(pdev, dev);
8595
8596         /*
8597          * In-flight DMA from 1st kernel could continue going in kdump kernel.
8598          * New io-page table has been created before bnx2 does reset at open stage.
8599          * We have to wait for the in-flight DMA to complete to avoid it look up
8600          * into the newly created io-page table.
8601          */
8602         if (is_kdump_kernel())
8603                 bnx2_wait_dma_complete(bp);
8604
8605         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8606
8607         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8608                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8609                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8610
8611         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8612                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8613
8614         dev->vlan_features = dev->hw_features;
8615         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8616         dev->features |= dev->hw_features;
8617         dev->priv_flags |= IFF_UNICAST_FLT;
8618
8619         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8620                 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8621
8622         if ((rc = register_netdev(dev))) {
8623                 dev_err(&pdev->dev, "Cannot register net device\n");
8624                 goto error;
8625         }
8626
8627         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8628                     "node addr %pM\n", board_info[ent->driver_data].name,
8629                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8630                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8631                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8632                     pdev->irq, dev->dev_addr);
8633
8634         return 0;
8635
8636 error:
8637         pci_iounmap(pdev, bp->regview);
8638         pci_release_regions(pdev);
8639         pci_disable_device(pdev);
8640 err_free:
8641         bnx2_free_stats_blk(dev);
8642         free_netdev(dev);
8643         return rc;
8644 }
8645
8646 static void
8647 bnx2_remove_one(struct pci_dev *pdev)
8648 {
8649         struct net_device *dev = pci_get_drvdata(pdev);
8650         struct bnx2 *bp = netdev_priv(dev);
8651
8652         unregister_netdev(dev);
8653
8654         del_timer_sync(&bp->timer);
8655         cancel_work_sync(&bp->reset_task);
8656
8657         pci_iounmap(bp->pdev, bp->regview);
8658
8659         bnx2_free_stats_blk(dev);
8660         kfree(bp->temp_stats_blk);
8661
8662         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8663                 pci_disable_pcie_error_reporting(pdev);
8664                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8665         }
8666
8667         bnx2_release_firmware(bp);
8668
8669         free_netdev(dev);
8670
8671         pci_release_regions(pdev);
8672         pci_disable_device(pdev);
8673 }
8674
8675 #ifdef CONFIG_PM_SLEEP
8676 static int
8677 bnx2_suspend(struct device *device)
8678 {
8679         struct pci_dev *pdev = to_pci_dev(device);
8680         struct net_device *dev = pci_get_drvdata(pdev);
8681         struct bnx2 *bp = netdev_priv(dev);
8682
8683         if (netif_running(dev)) {
8684                 cancel_work_sync(&bp->reset_task);
8685                 bnx2_netif_stop(bp, true);
8686                 netif_device_detach(dev);
8687                 del_timer_sync(&bp->timer);
8688                 bnx2_shutdown_chip(bp);
8689                 __bnx2_free_irq(bp);
8690                 bnx2_free_skbs(bp);
8691         }
8692         bnx2_setup_wol(bp);
8693         return 0;
8694 }
8695
8696 static int
8697 bnx2_resume(struct device *device)
8698 {
8699         struct pci_dev *pdev = to_pci_dev(device);
8700         struct net_device *dev = pci_get_drvdata(pdev);
8701         struct bnx2 *bp = netdev_priv(dev);
8702
8703         if (!netif_running(dev))
8704                 return 0;
8705
8706         bnx2_set_power_state(bp, PCI_D0);
8707         netif_device_attach(dev);
8708         bnx2_request_irq(bp);
8709         bnx2_init_nic(bp, 1);
8710         bnx2_netif_start(bp, true);
8711         return 0;
8712 }
8713
8714 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8715 #define BNX2_PM_OPS (&bnx2_pm_ops)
8716
8717 #else
8718
8719 #define BNX2_PM_OPS NULL
8720
8721 #endif /* CONFIG_PM_SLEEP */
8722 /**
8723  * bnx2_io_error_detected - called when PCI error is detected
8724  * @pdev: Pointer to PCI device
8725  * @state: The current pci connection state
8726  *
8727  * This function is called after a PCI bus error affecting
8728  * this device has been detected.
8729  */
8730 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8731                                                pci_channel_state_t state)
8732 {
8733         struct net_device *dev = pci_get_drvdata(pdev);
8734         struct bnx2 *bp = netdev_priv(dev);
8735
8736         rtnl_lock();
8737         netif_device_detach(dev);
8738
8739         if (state == pci_channel_io_perm_failure) {
8740                 rtnl_unlock();
8741                 return PCI_ERS_RESULT_DISCONNECT;
8742         }
8743
8744         if (netif_running(dev)) {
8745                 bnx2_netif_stop(bp, true);
8746                 del_timer_sync(&bp->timer);
8747                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8748         }
8749
8750         pci_disable_device(pdev);
8751         rtnl_unlock();
8752
8753         /* Request a slot slot reset. */
8754         return PCI_ERS_RESULT_NEED_RESET;
8755 }
8756
8757 /**
8758  * bnx2_io_slot_reset - called after the pci bus has been reset.
8759  * @pdev: Pointer to PCI device
8760  *
8761  * Restart the card from scratch, as if from a cold-boot.
8762  */
8763 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8764 {
8765         struct net_device *dev = pci_get_drvdata(pdev);
8766         struct bnx2 *bp = netdev_priv(dev);
8767         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8768         int err = 0;
8769
8770         rtnl_lock();
8771         if (pci_enable_device(pdev)) {
8772                 dev_err(&pdev->dev,
8773                         "Cannot re-enable PCI device after reset\n");
8774         } else {
8775                 pci_set_master(pdev);
8776                 pci_restore_state(pdev);
8777                 pci_save_state(pdev);
8778
8779                 if (netif_running(dev))
8780                         err = bnx2_init_nic(bp, 1);
8781
8782                 if (!err)
8783                         result = PCI_ERS_RESULT_RECOVERED;
8784         }
8785
8786         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8787                 bnx2_napi_enable(bp);
8788                 dev_close(dev);
8789         }
8790         rtnl_unlock();
8791
8792         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8793                 return result;
8794
8795         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8796         if (err) {
8797                 dev_err(&pdev->dev,
8798                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8799                          err); /* non-fatal, continue */
8800         }
8801
8802         return result;
8803 }
8804
8805 /**
8806  * bnx2_io_resume - called when traffic can start flowing again.
8807  * @pdev: Pointer to PCI device
8808  *
8809  * This callback is called when the error recovery driver tells us that
8810  * its OK to resume normal operation.
8811  */
8812 static void bnx2_io_resume(struct pci_dev *pdev)
8813 {
8814         struct net_device *dev = pci_get_drvdata(pdev);
8815         struct bnx2 *bp = netdev_priv(dev);
8816
8817         rtnl_lock();
8818         if (netif_running(dev))
8819                 bnx2_netif_start(bp, true);
8820
8821         netif_device_attach(dev);
8822         rtnl_unlock();
8823 }
8824
8825 static void bnx2_shutdown(struct pci_dev *pdev)
8826 {
8827         struct net_device *dev = pci_get_drvdata(pdev);
8828         struct bnx2 *bp;
8829
8830         if (!dev)
8831                 return;
8832
8833         bp = netdev_priv(dev);
8834         if (!bp)
8835                 return;
8836
8837         rtnl_lock();
8838         if (netif_running(dev))
8839                 dev_close(bp->dev);
8840
8841         if (system_state == SYSTEM_POWER_OFF)
8842                 bnx2_set_power_state(bp, PCI_D3hot);
8843
8844         rtnl_unlock();
8845 }
8846
8847 static const struct pci_error_handlers bnx2_err_handler = {
8848         .error_detected = bnx2_io_error_detected,
8849         .slot_reset     = bnx2_io_slot_reset,
8850         .resume         = bnx2_io_resume,
8851 };
8852
8853 static struct pci_driver bnx2_pci_driver = {
8854         .name           = DRV_MODULE_NAME,
8855         .id_table       = bnx2_pci_tbl,
8856         .probe          = bnx2_init_one,
8857         .remove         = bnx2_remove_one,
8858         .driver.pm      = BNX2_PM_OPS,
8859         .err_handler    = &bnx2_err_handler,
8860         .shutdown       = bnx2_shutdown,
8861 };
8862
8863 module_pci_driver(bnx2_pci_driver);