GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static const struct pci_device_id e1000_pci_tbl[] = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                                     struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                                     struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                                     struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                                     struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 int e1000_open(struct net_device *netdev);
118 int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
135 static int e1000_set_mac(struct net_device *netdev, void *p);
136 static irqreturn_t e1000_intr(int irq, void *data);
137 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
138                                struct e1000_tx_ring *tx_ring);
139 static int e1000_clean(struct napi_struct *napi, int budget);
140 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
141                                struct e1000_rx_ring *rx_ring,
142                                int *work_done, int work_to_do);
143 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
144                                      struct e1000_rx_ring *rx_ring,
145                                      int *work_done, int work_to_do);
146 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
147                                          struct e1000_rx_ring *rx_ring,
148                                          int cleaned_count)
149 {
150 }
151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
152                                    struct e1000_rx_ring *rx_ring,
153                                    int cleaned_count);
154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
155                                          struct e1000_rx_ring *rx_ring,
156                                          int cleaned_count);
157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
159                            int cmd);
160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_tx_timeout(struct net_device *dev);
163 static void e1000_reset_task(struct work_struct *work);
164 static void e1000_smartspeed(struct e1000_adapter *adapter);
165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
166                                        struct sk_buff *skb);
167
168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
169 static void e1000_vlan_mode(struct net_device *netdev,
170                             netdev_features_t features);
171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
172                                      bool filter_on);
173 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
174                                  __be16 proto, u16 vid);
175 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
176                                   __be16 proto, u16 vid);
177 static void e1000_restore_vlan(struct e1000_adapter *adapter);
178
179 #ifdef CONFIG_PM
180 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
181 static int e1000_resume(struct pci_dev *pdev);
182 #endif
183 static void e1000_shutdown(struct pci_dev *pdev);
184
185 #ifdef CONFIG_NET_POLL_CONTROLLER
186 /* for netdump / net console */
187 static void e1000_netpoll (struct net_device *netdev);
188 #endif
189
190 #define COPYBREAK_DEFAULT 256
191 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
192 module_param(copybreak, uint, 0644);
193 MODULE_PARM_DESC(copybreak,
194         "Maximum size of packet that is copied to a new buffer on receive");
195
196 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
197                                                 pci_channel_state_t state);
198 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
199 static void e1000_io_resume(struct pci_dev *pdev);
200
201 static const struct pci_error_handlers e1000_err_handler = {
202         .error_detected = e1000_io_error_detected,
203         .slot_reset = e1000_io_slot_reset,
204         .resume = e1000_io_resume,
205 };
206
207 static struct pci_driver e1000_driver = {
208         .name     = e1000_driver_name,
209         .id_table = e1000_pci_tbl,
210         .probe    = e1000_probe,
211         .remove   = e1000_remove,
212 #ifdef CONFIG_PM
213         /* Power Management Hooks */
214         .suspend  = e1000_suspend,
215         .resume   = e1000_resume,
216 #endif
217         .shutdown = e1000_shutdown,
218         .err_handler = &e1000_err_handler
219 };
220
221 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
222 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_VERSION);
225
226 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
227 static int debug = -1;
228 module_param(debug, int, 0);
229 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
230
231 /**
232  * e1000_get_hw_dev - return device
233  * used by hardware layer to print debugging information
234  *
235  **/
236 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
237 {
238         struct e1000_adapter *adapter = hw->back;
239         return adapter->netdev;
240 }
241
242 /**
243  * e1000_init_module - Driver Registration Routine
244  *
245  * e1000_init_module is the first routine called when the driver is
246  * loaded. All it does is register with the PCI subsystem.
247  **/
248 static int __init e1000_init_module(void)
249 {
250         int ret;
251         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
252
253         pr_info("%s\n", e1000_copyright);
254
255         ret = pci_register_driver(&e1000_driver);
256         if (copybreak != COPYBREAK_DEFAULT) {
257                 if (copybreak == 0)
258                         pr_info("copybreak disabled\n");
259                 else
260                         pr_info("copybreak enabled for "
261                                    "packets <= %u bytes\n", copybreak);
262         }
263         return ret;
264 }
265
266 module_init(e1000_init_module);
267
268 /**
269  * e1000_exit_module - Driver Exit Cleanup Routine
270  *
271  * e1000_exit_module is called just before the driver is removed
272  * from memory.
273  **/
274 static void __exit e1000_exit_module(void)
275 {
276         pci_unregister_driver(&e1000_driver);
277 }
278
279 module_exit(e1000_exit_module);
280
281 static int e1000_request_irq(struct e1000_adapter *adapter)
282 {
283         struct net_device *netdev = adapter->netdev;
284         irq_handler_t handler = e1000_intr;
285         int irq_flags = IRQF_SHARED;
286         int err;
287
288         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
289                           netdev);
290         if (err) {
291                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
292         }
293
294         return err;
295 }
296
297 static void e1000_free_irq(struct e1000_adapter *adapter)
298 {
299         struct net_device *netdev = adapter->netdev;
300
301         free_irq(adapter->pdev->irq, netdev);
302 }
303
304 /**
305  * e1000_irq_disable - Mask off interrupt generation on the NIC
306  * @adapter: board private structure
307  **/
308 static void e1000_irq_disable(struct e1000_adapter *adapter)
309 {
310         struct e1000_hw *hw = &adapter->hw;
311
312         ew32(IMC, ~0);
313         E1000_WRITE_FLUSH();
314         synchronize_irq(adapter->pdev->irq);
315 }
316
317 /**
318  * e1000_irq_enable - Enable default interrupt generation settings
319  * @adapter: board private structure
320  **/
321 static void e1000_irq_enable(struct e1000_adapter *adapter)
322 {
323         struct e1000_hw *hw = &adapter->hw;
324
325         ew32(IMS, IMS_ENABLE_MASK);
326         E1000_WRITE_FLUSH();
327 }
328
329 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
330 {
331         struct e1000_hw *hw = &adapter->hw;
332         struct net_device *netdev = adapter->netdev;
333         u16 vid = hw->mng_cookie.vlan_id;
334         u16 old_vid = adapter->mng_vlan_id;
335
336         if (!e1000_vlan_used(adapter))
337                 return;
338
339         if (!test_bit(vid, adapter->active_vlans)) {
340                 if (hw->mng_cookie.status &
341                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
342                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
343                         adapter->mng_vlan_id = vid;
344                 } else {
345                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
346                 }
347                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
348                     (vid != old_vid) &&
349                     !test_bit(old_vid, adapter->active_vlans))
350                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
351                                                old_vid);
352         } else {
353                 adapter->mng_vlan_id = vid;
354         }
355 }
356
357 static void e1000_init_manageability(struct e1000_adapter *adapter)
358 {
359         struct e1000_hw *hw = &adapter->hw;
360
361         if (adapter->en_mng_pt) {
362                 u32 manc = er32(MANC);
363
364                 /* disable hardware interception of ARP */
365                 manc &= ~(E1000_MANC_ARP_EN);
366
367                 ew32(MANC, manc);
368         }
369 }
370
371 static void e1000_release_manageability(struct e1000_adapter *adapter)
372 {
373         struct e1000_hw *hw = &adapter->hw;
374
375         if (adapter->en_mng_pt) {
376                 u32 manc = er32(MANC);
377
378                 /* re-enable hardware interception of ARP */
379                 manc |= E1000_MANC_ARP_EN;
380
381                 ew32(MANC, manc);
382         }
383 }
384
385 /**
386  * e1000_configure - configure the hardware for RX and TX
387  * @adapter = private board structure
388  **/
389 static void e1000_configure(struct e1000_adapter *adapter)
390 {
391         struct net_device *netdev = adapter->netdev;
392         int i;
393
394         e1000_set_rx_mode(netdev);
395
396         e1000_restore_vlan(adapter);
397         e1000_init_manageability(adapter);
398
399         e1000_configure_tx(adapter);
400         e1000_setup_rctl(adapter);
401         e1000_configure_rx(adapter);
402         /* call E1000_DESC_UNUSED which always leaves
403          * at least 1 descriptor unused to make sure
404          * next_to_use != next_to_clean
405          */
406         for (i = 0; i < adapter->num_rx_queues; i++) {
407                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
408                 adapter->alloc_rx_buf(adapter, ring,
409                                       E1000_DESC_UNUSED(ring));
410         }
411 }
412
413 int e1000_up(struct e1000_adapter *adapter)
414 {
415         struct e1000_hw *hw = &adapter->hw;
416
417         /* hardware has been reset, we need to reload some things */
418         e1000_configure(adapter);
419
420         clear_bit(__E1000_DOWN, &adapter->flags);
421
422         napi_enable(&adapter->napi);
423
424         e1000_irq_enable(adapter);
425
426         netif_wake_queue(adapter->netdev);
427
428         /* fire a link change interrupt to start the watchdog */
429         ew32(ICS, E1000_ICS_LSC);
430         return 0;
431 }
432
433 /**
434  * e1000_power_up_phy - restore link in case the phy was powered down
435  * @adapter: address of board private structure
436  *
437  * The phy may be powered down to save power and turn off link when the
438  * driver is unloaded and wake on lan is not enabled (among others)
439  * *** this routine MUST be followed by a call to e1000_reset ***
440  **/
441 void e1000_power_up_phy(struct e1000_adapter *adapter)
442 {
443         struct e1000_hw *hw = &adapter->hw;
444         u16 mii_reg = 0;
445
446         /* Just clear the power down bit to wake the phy back up */
447         if (hw->media_type == e1000_media_type_copper) {
448                 /* according to the manual, the phy will retain its
449                  * settings across a power-down/up cycle
450                  */
451                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
452                 mii_reg &= ~MII_CR_POWER_DOWN;
453                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
454         }
455 }
456
457 static void e1000_power_down_phy(struct e1000_adapter *adapter)
458 {
459         struct e1000_hw *hw = &adapter->hw;
460
461         /* Power down the PHY so no link is implied when interface is down *
462          * The PHY cannot be powered down if any of the following is true *
463          * (a) WoL is enabled
464          * (b) AMT is active
465          * (c) SoL/IDER session is active
466          */
467         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
468            hw->media_type == e1000_media_type_copper) {
469                 u16 mii_reg = 0;
470
471                 switch (hw->mac_type) {
472                 case e1000_82540:
473                 case e1000_82545:
474                 case e1000_82545_rev_3:
475                 case e1000_82546:
476                 case e1000_ce4100:
477                 case e1000_82546_rev_3:
478                 case e1000_82541:
479                 case e1000_82541_rev_2:
480                 case e1000_82547:
481                 case e1000_82547_rev_2:
482                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
483                                 goto out;
484                         break;
485                 default:
486                         goto out;
487                 }
488                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
489                 mii_reg |= MII_CR_POWER_DOWN;
490                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
491                 msleep(1);
492         }
493 out:
494         return;
495 }
496
497 static void e1000_down_and_stop(struct e1000_adapter *adapter)
498 {
499         set_bit(__E1000_DOWN, &adapter->flags);
500
501         cancel_delayed_work_sync(&adapter->watchdog_task);
502
503         /*
504          * Since the watchdog task can reschedule other tasks, we should cancel
505          * it first, otherwise we can run into the situation when a work is
506          * still running after the adapter has been turned down.
507          */
508
509         cancel_delayed_work_sync(&adapter->phy_info_task);
510         cancel_delayed_work_sync(&adapter->fifo_stall_task);
511
512         /* Only kill reset task if adapter is not resetting */
513         if (!test_bit(__E1000_RESETTING, &adapter->flags))
514                 cancel_work_sync(&adapter->reset_task);
515 }
516
517 void e1000_down(struct e1000_adapter *adapter)
518 {
519         struct e1000_hw *hw = &adapter->hw;
520         struct net_device *netdev = adapter->netdev;
521         u32 rctl, tctl;
522
523         /* disable receives in the hardware */
524         rctl = er32(RCTL);
525         ew32(RCTL, rctl & ~E1000_RCTL_EN);
526         /* flush and sleep below */
527
528         netif_tx_disable(netdev);
529
530         /* disable transmits in the hardware */
531         tctl = er32(TCTL);
532         tctl &= ~E1000_TCTL_EN;
533         ew32(TCTL, tctl);
534         /* flush both disables and wait for them to finish */
535         E1000_WRITE_FLUSH();
536         msleep(10);
537
538         /* Set the carrier off after transmits have been disabled in the
539          * hardware, to avoid race conditions with e1000_watchdog() (which
540          * may be running concurrently to us, checking for the carrier
541          * bit to decide whether it should enable transmits again). Such
542          * a race condition would result into transmission being disabled
543          * in the hardware until the next IFF_DOWN+IFF_UP cycle.
544          */
545         netif_carrier_off(netdev);
546
547         napi_disable(&adapter->napi);
548
549         e1000_irq_disable(adapter);
550
551         /* Setting DOWN must be after irq_disable to prevent
552          * a screaming interrupt.  Setting DOWN also prevents
553          * tasks from rescheduling.
554          */
555         e1000_down_and_stop(adapter);
556
557         adapter->link_speed = 0;
558         adapter->link_duplex = 0;
559
560         e1000_reset(adapter);
561         e1000_clean_all_tx_rings(adapter);
562         e1000_clean_all_rx_rings(adapter);
563 }
564
565 void e1000_reinit_locked(struct e1000_adapter *adapter)
566 {
567         WARN_ON(in_interrupt());
568         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
569                 msleep(1);
570
571         /* only run the task if not already down */
572         if (!test_bit(__E1000_DOWN, &adapter->flags)) {
573                 e1000_down(adapter);
574                 e1000_up(adapter);
575         }
576
577         clear_bit(__E1000_RESETTING, &adapter->flags);
578 }
579
580 void e1000_reset(struct e1000_adapter *adapter)
581 {
582         struct e1000_hw *hw = &adapter->hw;
583         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
584         bool legacy_pba_adjust = false;
585         u16 hwm;
586
587         /* Repartition Pba for greater than 9k mtu
588          * To take effect CTRL.RST is required.
589          */
590
591         switch (hw->mac_type) {
592         case e1000_82542_rev2_0:
593         case e1000_82542_rev2_1:
594         case e1000_82543:
595         case e1000_82544:
596         case e1000_82540:
597         case e1000_82541:
598         case e1000_82541_rev_2:
599                 legacy_pba_adjust = true;
600                 pba = E1000_PBA_48K;
601                 break;
602         case e1000_82545:
603         case e1000_82545_rev_3:
604         case e1000_82546:
605         case e1000_ce4100:
606         case e1000_82546_rev_3:
607                 pba = E1000_PBA_48K;
608                 break;
609         case e1000_82547:
610         case e1000_82547_rev_2:
611                 legacy_pba_adjust = true;
612                 pba = E1000_PBA_30K;
613                 break;
614         case e1000_undefined:
615         case e1000_num_macs:
616                 break;
617         }
618
619         if (legacy_pba_adjust) {
620                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
621                         pba -= 8; /* allocate more FIFO for Tx */
622
623                 if (hw->mac_type == e1000_82547) {
624                         adapter->tx_fifo_head = 0;
625                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
626                         adapter->tx_fifo_size =
627                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
628                         atomic_set(&adapter->tx_fifo_stall, 0);
629                 }
630         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
631                 /* adjust PBA for jumbo frames */
632                 ew32(PBA, pba);
633
634                 /* To maintain wire speed transmits, the Tx FIFO should be
635                  * large enough to accommodate two full transmit packets,
636                  * rounded up to the next 1KB and expressed in KB.  Likewise,
637                  * the Rx FIFO should be large enough to accommodate at least
638                  * one full receive packet and is similarly rounded up and
639                  * expressed in KB.
640                  */
641                 pba = er32(PBA);
642                 /* upper 16 bits has Tx packet buffer allocation size in KB */
643                 tx_space = pba >> 16;
644                 /* lower 16 bits has Rx packet buffer allocation size in KB */
645                 pba &= 0xffff;
646                 /* the Tx fifo also stores 16 bytes of information about the Tx
647                  * but don't include ethernet FCS because hardware appends it
648                  */
649                 min_tx_space = (hw->max_frame_size +
650                                 sizeof(struct e1000_tx_desc) -
651                                 ETH_FCS_LEN) * 2;
652                 min_tx_space = ALIGN(min_tx_space, 1024);
653                 min_tx_space >>= 10;
654                 /* software strips receive CRC, so leave room for it */
655                 min_rx_space = hw->max_frame_size;
656                 min_rx_space = ALIGN(min_rx_space, 1024);
657                 min_rx_space >>= 10;
658
659                 /* If current Tx allocation is less than the min Tx FIFO size,
660                  * and the min Tx FIFO size is less than the current Rx FIFO
661                  * allocation, take space away from current Rx allocation
662                  */
663                 if (tx_space < min_tx_space &&
664                     ((min_tx_space - tx_space) < pba)) {
665                         pba = pba - (min_tx_space - tx_space);
666
667                         /* PCI/PCIx hardware has PBA alignment constraints */
668                         switch (hw->mac_type) {
669                         case e1000_82545 ... e1000_82546_rev_3:
670                                 pba &= ~(E1000_PBA_8K - 1);
671                                 break;
672                         default:
673                                 break;
674                         }
675
676                         /* if short on Rx space, Rx wins and must trump Tx
677                          * adjustment or use Early Receive if available
678                          */
679                         if (pba < min_rx_space)
680                                 pba = min_rx_space;
681                 }
682         }
683
684         ew32(PBA, pba);
685
686         /* flow control settings:
687          * The high water mark must be low enough to fit one full frame
688          * (or the size used for early receive) above it in the Rx FIFO.
689          * Set it to the lower of:
690          * - 90% of the Rx FIFO size, and
691          * - the full Rx FIFO size minus the early receive size (for parts
692          *   with ERT support assuming ERT set to E1000_ERT_2048), or
693          * - the full Rx FIFO size minus one full frame
694          */
695         hwm = min(((pba << 10) * 9 / 10),
696                   ((pba << 10) - hw->max_frame_size));
697
698         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
699         hw->fc_low_water = hw->fc_high_water - 8;
700         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
701         hw->fc_send_xon = 1;
702         hw->fc = hw->original_fc;
703
704         /* Allow time for pending master requests to run */
705         e1000_reset_hw(hw);
706         if (hw->mac_type >= e1000_82544)
707                 ew32(WUC, 0);
708
709         if (e1000_init_hw(hw))
710                 e_dev_err("Hardware Error\n");
711         e1000_update_mng_vlan(adapter);
712
713         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
714         if (hw->mac_type >= e1000_82544 &&
715             hw->autoneg == 1 &&
716             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
717                 u32 ctrl = er32(CTRL);
718                 /* clear phy power management bit if we are in gig only mode,
719                  * which if enabled will attempt negotiation to 100Mb, which
720                  * can cause a loss of link at power off or driver unload
721                  */
722                 ctrl &= ~E1000_CTRL_SWDPIN3;
723                 ew32(CTRL, ctrl);
724         }
725
726         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
727         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
728
729         e1000_reset_adaptive(hw);
730         e1000_phy_get_info(hw, &adapter->phy_info);
731
732         e1000_release_manageability(adapter);
733 }
734
735 /* Dump the eeprom for users having checksum issues */
736 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
737 {
738         struct net_device *netdev = adapter->netdev;
739         struct ethtool_eeprom eeprom;
740         const struct ethtool_ops *ops = netdev->ethtool_ops;
741         u8 *data;
742         int i;
743         u16 csum_old, csum_new = 0;
744
745         eeprom.len = ops->get_eeprom_len(netdev);
746         eeprom.offset = 0;
747
748         data = kmalloc(eeprom.len, GFP_KERNEL);
749         if (!data)
750                 return;
751
752         ops->get_eeprom(netdev, &eeprom, data);
753
754         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
755                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
756         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
757                 csum_new += data[i] + (data[i + 1] << 8);
758         csum_new = EEPROM_SUM - csum_new;
759
760         pr_err("/*********************/\n");
761         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
762         pr_err("Calculated              : 0x%04x\n", csum_new);
763
764         pr_err("Offset    Values\n");
765         pr_err("========  ======\n");
766         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
767
768         pr_err("Include this output when contacting your support provider.\n");
769         pr_err("This is not a software error! Something bad happened to\n");
770         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
771         pr_err("result in further problems, possibly loss of data,\n");
772         pr_err("corruption or system hangs!\n");
773         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
774         pr_err("which is invalid and requires you to set the proper MAC\n");
775         pr_err("address manually before continuing to enable this network\n");
776         pr_err("device. Please inspect the EEPROM dump and report the\n");
777         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
778         pr_err("/*********************/\n");
779
780         kfree(data);
781 }
782
783 /**
784  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
785  * @pdev: PCI device information struct
786  *
787  * Return true if an adapter needs ioport resources
788  **/
789 static int e1000_is_need_ioport(struct pci_dev *pdev)
790 {
791         switch (pdev->device) {
792         case E1000_DEV_ID_82540EM:
793         case E1000_DEV_ID_82540EM_LOM:
794         case E1000_DEV_ID_82540EP:
795         case E1000_DEV_ID_82540EP_LOM:
796         case E1000_DEV_ID_82540EP_LP:
797         case E1000_DEV_ID_82541EI:
798         case E1000_DEV_ID_82541EI_MOBILE:
799         case E1000_DEV_ID_82541ER:
800         case E1000_DEV_ID_82541ER_LOM:
801         case E1000_DEV_ID_82541GI:
802         case E1000_DEV_ID_82541GI_LF:
803         case E1000_DEV_ID_82541GI_MOBILE:
804         case E1000_DEV_ID_82544EI_COPPER:
805         case E1000_DEV_ID_82544EI_FIBER:
806         case E1000_DEV_ID_82544GC_COPPER:
807         case E1000_DEV_ID_82544GC_LOM:
808         case E1000_DEV_ID_82545EM_COPPER:
809         case E1000_DEV_ID_82545EM_FIBER:
810         case E1000_DEV_ID_82546EB_COPPER:
811         case E1000_DEV_ID_82546EB_FIBER:
812         case E1000_DEV_ID_82546EB_QUAD_COPPER:
813                 return true;
814         default:
815                 return false;
816         }
817 }
818
819 static netdev_features_t e1000_fix_features(struct net_device *netdev,
820         netdev_features_t features)
821 {
822         /* Since there is no support for separate Rx/Tx vlan accel
823          * enable/disable make sure Tx flag is always in same state as Rx.
824          */
825         if (features & NETIF_F_HW_VLAN_CTAG_RX)
826                 features |= NETIF_F_HW_VLAN_CTAG_TX;
827         else
828                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
829
830         return features;
831 }
832
833 static int e1000_set_features(struct net_device *netdev,
834         netdev_features_t features)
835 {
836         struct e1000_adapter *adapter = netdev_priv(netdev);
837         netdev_features_t changed = features ^ netdev->features;
838
839         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
840                 e1000_vlan_mode(netdev, features);
841
842         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
843                 return 0;
844
845         netdev->features = features;
846         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
847
848         if (netif_running(netdev))
849                 e1000_reinit_locked(adapter);
850         else
851                 e1000_reset(adapter);
852
853         return 0;
854 }
855
856 static const struct net_device_ops e1000_netdev_ops = {
857         .ndo_open               = e1000_open,
858         .ndo_stop               = e1000_close,
859         .ndo_start_xmit         = e1000_xmit_frame,
860         .ndo_set_rx_mode        = e1000_set_rx_mode,
861         .ndo_set_mac_address    = e1000_set_mac,
862         .ndo_tx_timeout         = e1000_tx_timeout,
863         .ndo_change_mtu         = e1000_change_mtu,
864         .ndo_do_ioctl           = e1000_ioctl,
865         .ndo_validate_addr      = eth_validate_addr,
866         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
867         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
868 #ifdef CONFIG_NET_POLL_CONTROLLER
869         .ndo_poll_controller    = e1000_netpoll,
870 #endif
871         .ndo_fix_features       = e1000_fix_features,
872         .ndo_set_features       = e1000_set_features,
873 };
874
875 /**
876  * e1000_init_hw_struct - initialize members of hw struct
877  * @adapter: board private struct
878  * @hw: structure used by e1000_hw.c
879  *
880  * Factors out initialization of the e1000_hw struct to its own function
881  * that can be called very early at init (just after struct allocation).
882  * Fields are initialized based on PCI device information and
883  * OS network device settings (MTU size).
884  * Returns negative error codes if MAC type setup fails.
885  */
886 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
887                                 struct e1000_hw *hw)
888 {
889         struct pci_dev *pdev = adapter->pdev;
890
891         /* PCI config space info */
892         hw->vendor_id = pdev->vendor;
893         hw->device_id = pdev->device;
894         hw->subsystem_vendor_id = pdev->subsystem_vendor;
895         hw->subsystem_id = pdev->subsystem_device;
896         hw->revision_id = pdev->revision;
897
898         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
899
900         hw->max_frame_size = adapter->netdev->mtu +
901                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
902         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
903
904         /* identify the MAC */
905         if (e1000_set_mac_type(hw)) {
906                 e_err(probe, "Unknown MAC Type\n");
907                 return -EIO;
908         }
909
910         switch (hw->mac_type) {
911         default:
912                 break;
913         case e1000_82541:
914         case e1000_82547:
915         case e1000_82541_rev_2:
916         case e1000_82547_rev_2:
917                 hw->phy_init_script = 1;
918                 break;
919         }
920
921         e1000_set_media_type(hw);
922         e1000_get_bus_info(hw);
923
924         hw->wait_autoneg_complete = false;
925         hw->tbi_compatibility_en = true;
926         hw->adaptive_ifs = true;
927
928         /* Copper options */
929
930         if (hw->media_type == e1000_media_type_copper) {
931                 hw->mdix = AUTO_ALL_MODES;
932                 hw->disable_polarity_correction = false;
933                 hw->master_slave = E1000_MASTER_SLAVE;
934         }
935
936         return 0;
937 }
938
939 /**
940  * e1000_probe - Device Initialization Routine
941  * @pdev: PCI device information struct
942  * @ent: entry in e1000_pci_tbl
943  *
944  * Returns 0 on success, negative on failure
945  *
946  * e1000_probe initializes an adapter identified by a pci_dev structure.
947  * The OS initialization, configuring of the adapter private structure,
948  * and a hardware reset occur.
949  **/
950 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
951 {
952         struct net_device *netdev;
953         struct e1000_adapter *adapter = NULL;
954         struct e1000_hw *hw;
955
956         static int cards_found;
957         static int global_quad_port_a; /* global ksp3 port a indication */
958         int i, err, pci_using_dac;
959         u16 eeprom_data = 0;
960         u16 tmp = 0;
961         u16 eeprom_apme_mask = E1000_EEPROM_APME;
962         int bars, need_ioport;
963         bool disable_dev = false;
964
965         /* do not allocate ioport bars when not needed */
966         need_ioport = e1000_is_need_ioport(pdev);
967         if (need_ioport) {
968                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
969                 err = pci_enable_device(pdev);
970         } else {
971                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
972                 err = pci_enable_device_mem(pdev);
973         }
974         if (err)
975                 return err;
976
977         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
978         if (err)
979                 goto err_pci_reg;
980
981         pci_set_master(pdev);
982         err = pci_save_state(pdev);
983         if (err)
984                 goto err_alloc_etherdev;
985
986         err = -ENOMEM;
987         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
988         if (!netdev)
989                 goto err_alloc_etherdev;
990
991         SET_NETDEV_DEV(netdev, &pdev->dev);
992
993         pci_set_drvdata(pdev, netdev);
994         adapter = netdev_priv(netdev);
995         adapter->netdev = netdev;
996         adapter->pdev = pdev;
997         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
998         adapter->bars = bars;
999         adapter->need_ioport = need_ioport;
1000
1001         hw = &adapter->hw;
1002         hw->back = adapter;
1003
1004         err = -EIO;
1005         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1006         if (!hw->hw_addr)
1007                 goto err_ioremap;
1008
1009         if (adapter->need_ioport) {
1010                 for (i = BAR_1; i <= BAR_5; i++) {
1011                         if (pci_resource_len(pdev, i) == 0)
1012                                 continue;
1013                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1014                                 hw->io_base = pci_resource_start(pdev, i);
1015                                 break;
1016                         }
1017                 }
1018         }
1019
1020         /* make ready for any if (hw->...) below */
1021         err = e1000_init_hw_struct(adapter, hw);
1022         if (err)
1023                 goto err_sw_init;
1024
1025         /* there is a workaround being applied below that limits
1026          * 64-bit DMA addresses to 64-bit hardware.  There are some
1027          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1028          */
1029         pci_using_dac = 0;
1030         if ((hw->bus_type == e1000_bus_type_pcix) &&
1031             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1032                 pci_using_dac = 1;
1033         } else {
1034                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1035                 if (err) {
1036                         pr_err("No usable DMA config, aborting\n");
1037                         goto err_dma;
1038                 }
1039         }
1040
1041         netdev->netdev_ops = &e1000_netdev_ops;
1042         e1000_set_ethtool_ops(netdev);
1043         netdev->watchdog_timeo = 5 * HZ;
1044         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1045
1046         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1047
1048         adapter->bd_number = cards_found;
1049
1050         /* setup the private structure */
1051
1052         err = e1000_sw_init(adapter);
1053         if (err)
1054                 goto err_sw_init;
1055
1056         err = -EIO;
1057         if (hw->mac_type == e1000_ce4100) {
1058                 hw->ce4100_gbe_mdio_base_virt =
1059                                         ioremap(pci_resource_start(pdev, BAR_1),
1060                                                 pci_resource_len(pdev, BAR_1));
1061
1062                 if (!hw->ce4100_gbe_mdio_base_virt)
1063                         goto err_mdio_ioremap;
1064         }
1065
1066         if (hw->mac_type >= e1000_82543) {
1067                 netdev->hw_features = NETIF_F_SG |
1068                                    NETIF_F_HW_CSUM |
1069                                    NETIF_F_HW_VLAN_CTAG_RX;
1070                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1071                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1072         }
1073
1074         if ((hw->mac_type >= e1000_82544) &&
1075            (hw->mac_type != e1000_82547))
1076                 netdev->hw_features |= NETIF_F_TSO;
1077
1078         netdev->priv_flags |= IFF_SUPP_NOFCS;
1079
1080         netdev->features |= netdev->hw_features;
1081         netdev->hw_features |= (NETIF_F_RXCSUM |
1082                                 NETIF_F_RXALL |
1083                                 NETIF_F_RXFCS);
1084
1085         if (pci_using_dac) {
1086                 netdev->features |= NETIF_F_HIGHDMA;
1087                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088         }
1089
1090         netdev->vlan_features |= (NETIF_F_TSO |
1091                                   NETIF_F_HW_CSUM |
1092                                   NETIF_F_SG);
1093
1094         /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1095         if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1096             hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1097                 netdev->priv_flags |= IFF_UNICAST_FLT;
1098
1099         /* MTU range: 46 - 16110 */
1100         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1101         netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1102
1103         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1104
1105         /* initialize eeprom parameters */
1106         if (e1000_init_eeprom_params(hw)) {
1107                 e_err(probe, "EEPROM initialization failed\n");
1108                 goto err_eeprom;
1109         }
1110
1111         /* before reading the EEPROM, reset the controller to
1112          * put the device in a known good starting state
1113          */
1114
1115         e1000_reset_hw(hw);
1116
1117         /* make sure the EEPROM is good */
1118         if (e1000_validate_eeprom_checksum(hw) < 0) {
1119                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1120                 e1000_dump_eeprom(adapter);
1121                 /* set MAC address to all zeroes to invalidate and temporary
1122                  * disable this device for the user. This blocks regular
1123                  * traffic while still permitting ethtool ioctls from reaching
1124                  * the hardware as well as allowing the user to run the
1125                  * interface after manually setting a hw addr using
1126                  * `ip set address`
1127                  */
1128                 memset(hw->mac_addr, 0, netdev->addr_len);
1129         } else {
1130                 /* copy the MAC address out of the EEPROM */
1131                 if (e1000_read_mac_addr(hw))
1132                         e_err(probe, "EEPROM Read Error\n");
1133         }
1134         /* don't block initialization here due to bad MAC address */
1135         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1136
1137         if (!is_valid_ether_addr(netdev->dev_addr))
1138                 e_err(probe, "Invalid MAC Address\n");
1139
1140
1141         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1142         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1143                           e1000_82547_tx_fifo_stall_task);
1144         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1145         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1146
1147         e1000_check_options(adapter);
1148
1149         /* Initial Wake on LAN setting
1150          * If APM wake is enabled in the EEPROM,
1151          * enable the ACPI Magic Packet filter
1152          */
1153
1154         switch (hw->mac_type) {
1155         case e1000_82542_rev2_0:
1156         case e1000_82542_rev2_1:
1157         case e1000_82543:
1158                 break;
1159         case e1000_82544:
1160                 e1000_read_eeprom(hw,
1161                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1162                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1163                 break;
1164         case e1000_82546:
1165         case e1000_82546_rev_3:
1166                 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1167                         e1000_read_eeprom(hw,
1168                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1169                         break;
1170                 }
1171                 /* Fall Through */
1172         default:
1173                 e1000_read_eeprom(hw,
1174                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1175                 break;
1176         }
1177         if (eeprom_data & eeprom_apme_mask)
1178                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1179
1180         /* now that we have the eeprom settings, apply the special cases
1181          * where the eeprom may be wrong or the board simply won't support
1182          * wake on lan on a particular port
1183          */
1184         switch (pdev->device) {
1185         case E1000_DEV_ID_82546GB_PCIE:
1186                 adapter->eeprom_wol = 0;
1187                 break;
1188         case E1000_DEV_ID_82546EB_FIBER:
1189         case E1000_DEV_ID_82546GB_FIBER:
1190                 /* Wake events only supported on port A for dual fiber
1191                  * regardless of eeprom setting
1192                  */
1193                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1194                         adapter->eeprom_wol = 0;
1195                 break;
1196         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1197                 /* if quad port adapter, disable WoL on all but port A */
1198                 if (global_quad_port_a != 0)
1199                         adapter->eeprom_wol = 0;
1200                 else
1201                         adapter->quad_port_a = true;
1202                 /* Reset for multiple quad port adapters */
1203                 if (++global_quad_port_a == 4)
1204                         global_quad_port_a = 0;
1205                 break;
1206         }
1207
1208         /* initialize the wol settings based on the eeprom settings */
1209         adapter->wol = adapter->eeprom_wol;
1210         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1211
1212         /* Auto detect PHY address */
1213         if (hw->mac_type == e1000_ce4100) {
1214                 for (i = 0; i < 32; i++) {
1215                         hw->phy_addr = i;
1216                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1217
1218                         if (tmp != 0 && tmp != 0xFF)
1219                                 break;
1220                 }
1221
1222                 if (i >= 32)
1223                         goto err_eeprom;
1224         }
1225
1226         /* reset the hardware with the new settings */
1227         e1000_reset(adapter);
1228
1229         strcpy(netdev->name, "eth%d");
1230         err = register_netdev(netdev);
1231         if (err)
1232                 goto err_register;
1233
1234         e1000_vlan_filter_on_off(adapter, false);
1235
1236         /* print bus type/speed/width info */
1237         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1238                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1239                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1240                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1241                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1242                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1243                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1244                netdev->dev_addr);
1245
1246         /* carrier off reporting is important to ethtool even BEFORE open */
1247         netif_carrier_off(netdev);
1248
1249         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1250
1251         cards_found++;
1252         return 0;
1253
1254 err_register:
1255 err_eeprom:
1256         e1000_phy_hw_reset(hw);
1257
1258         if (hw->flash_address)
1259                 iounmap(hw->flash_address);
1260         kfree(adapter->tx_ring);
1261         kfree(adapter->rx_ring);
1262 err_dma:
1263 err_sw_init:
1264 err_mdio_ioremap:
1265         iounmap(hw->ce4100_gbe_mdio_base_virt);
1266         iounmap(hw->hw_addr);
1267 err_ioremap:
1268         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1269         free_netdev(netdev);
1270 err_alloc_etherdev:
1271         pci_release_selected_regions(pdev, bars);
1272 err_pci_reg:
1273         if (!adapter || disable_dev)
1274                 pci_disable_device(pdev);
1275         return err;
1276 }
1277
1278 /**
1279  * e1000_remove - Device Removal Routine
1280  * @pdev: PCI device information struct
1281  *
1282  * e1000_remove is called by the PCI subsystem to alert the driver
1283  * that it should release a PCI device. That could be caused by a
1284  * Hot-Plug event, or because the driver is going to be removed from
1285  * memory.
1286  **/
1287 static void e1000_remove(struct pci_dev *pdev)
1288 {
1289         struct net_device *netdev = pci_get_drvdata(pdev);
1290         struct e1000_adapter *adapter = netdev_priv(netdev);
1291         struct e1000_hw *hw = &adapter->hw;
1292         bool disable_dev;
1293
1294         e1000_down_and_stop(adapter);
1295         e1000_release_manageability(adapter);
1296
1297         unregister_netdev(netdev);
1298
1299         e1000_phy_hw_reset(hw);
1300
1301         kfree(adapter->tx_ring);
1302         kfree(adapter->rx_ring);
1303
1304         if (hw->mac_type == e1000_ce4100)
1305                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1306         iounmap(hw->hw_addr);
1307         if (hw->flash_address)
1308                 iounmap(hw->flash_address);
1309         pci_release_selected_regions(pdev, adapter->bars);
1310
1311         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1312         free_netdev(netdev);
1313
1314         if (disable_dev)
1315                 pci_disable_device(pdev);
1316 }
1317
1318 /**
1319  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1320  * @adapter: board private structure to initialize
1321  *
1322  * e1000_sw_init initializes the Adapter private data structure.
1323  * e1000_init_hw_struct MUST be called before this function
1324  **/
1325 static int e1000_sw_init(struct e1000_adapter *adapter)
1326 {
1327         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1328
1329         adapter->num_tx_queues = 1;
1330         adapter->num_rx_queues = 1;
1331
1332         if (e1000_alloc_queues(adapter)) {
1333                 e_err(probe, "Unable to allocate memory for queues\n");
1334                 return -ENOMEM;
1335         }
1336
1337         /* Explicitly disable IRQ since the NIC can be in any state. */
1338         e1000_irq_disable(adapter);
1339
1340         spin_lock_init(&adapter->stats_lock);
1341
1342         set_bit(__E1000_DOWN, &adapter->flags);
1343
1344         return 0;
1345 }
1346
1347 /**
1348  * e1000_alloc_queues - Allocate memory for all rings
1349  * @adapter: board private structure to initialize
1350  *
1351  * We allocate one ring per queue at run-time since we don't know the
1352  * number of queues at compile-time.
1353  **/
1354 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1355 {
1356         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1357                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1358         if (!adapter->tx_ring)
1359                 return -ENOMEM;
1360
1361         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1362                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1363         if (!adapter->rx_ring) {
1364                 kfree(adapter->tx_ring);
1365                 return -ENOMEM;
1366         }
1367
1368         return E1000_SUCCESS;
1369 }
1370
1371 /**
1372  * e1000_open - Called when a network interface is made active
1373  * @netdev: network interface device structure
1374  *
1375  * Returns 0 on success, negative value on failure
1376  *
1377  * The open entry point is called when a network interface is made
1378  * active by the system (IFF_UP).  At this point all resources needed
1379  * for transmit and receive operations are allocated, the interrupt
1380  * handler is registered with the OS, the watchdog task is started,
1381  * and the stack is notified that the interface is ready.
1382  **/
1383 int e1000_open(struct net_device *netdev)
1384 {
1385         struct e1000_adapter *adapter = netdev_priv(netdev);
1386         struct e1000_hw *hw = &adapter->hw;
1387         int err;
1388
1389         /* disallow open during test */
1390         if (test_bit(__E1000_TESTING, &adapter->flags))
1391                 return -EBUSY;
1392
1393         netif_carrier_off(netdev);
1394
1395         /* allocate transmit descriptors */
1396         err = e1000_setup_all_tx_resources(adapter);
1397         if (err)
1398                 goto err_setup_tx;
1399
1400         /* allocate receive descriptors */
1401         err = e1000_setup_all_rx_resources(adapter);
1402         if (err)
1403                 goto err_setup_rx;
1404
1405         e1000_power_up_phy(adapter);
1406
1407         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1408         if ((hw->mng_cookie.status &
1409                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1410                 e1000_update_mng_vlan(adapter);
1411         }
1412
1413         /* before we allocate an interrupt, we must be ready to handle it.
1414          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1415          * as soon as we call pci_request_irq, so we have to setup our
1416          * clean_rx handler before we do so.
1417          */
1418         e1000_configure(adapter);
1419
1420         err = e1000_request_irq(adapter);
1421         if (err)
1422                 goto err_req_irq;
1423
1424         /* From here on the code is the same as e1000_up() */
1425         clear_bit(__E1000_DOWN, &adapter->flags);
1426
1427         napi_enable(&adapter->napi);
1428
1429         e1000_irq_enable(adapter);
1430
1431         netif_start_queue(netdev);
1432
1433         /* fire a link status change interrupt to start the watchdog */
1434         ew32(ICS, E1000_ICS_LSC);
1435
1436         return E1000_SUCCESS;
1437
1438 err_req_irq:
1439         e1000_power_down_phy(adapter);
1440         e1000_free_all_rx_resources(adapter);
1441 err_setup_rx:
1442         e1000_free_all_tx_resources(adapter);
1443 err_setup_tx:
1444         e1000_reset(adapter);
1445
1446         return err;
1447 }
1448
1449 /**
1450  * e1000_close - Disables a network interface
1451  * @netdev: network interface device structure
1452  *
1453  * Returns 0, this is not allowed to fail
1454  *
1455  * The close entry point is called when an interface is de-activated
1456  * by the OS.  The hardware is still under the drivers control, but
1457  * needs to be disabled.  A global MAC reset is issued to stop the
1458  * hardware, and all transmit and receive resources are freed.
1459  **/
1460 int e1000_close(struct net_device *netdev)
1461 {
1462         struct e1000_adapter *adapter = netdev_priv(netdev);
1463         struct e1000_hw *hw = &adapter->hw;
1464         int count = E1000_CHECK_RESET_COUNT;
1465
1466         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1467                 usleep_range(10000, 20000);
1468
1469         WARN_ON(count < 0);
1470
1471         /* signal that we're down so that the reset task will no longer run */
1472         set_bit(__E1000_DOWN, &adapter->flags);
1473         clear_bit(__E1000_RESETTING, &adapter->flags);
1474
1475         e1000_down(adapter);
1476         e1000_power_down_phy(adapter);
1477         e1000_free_irq(adapter);
1478
1479         e1000_free_all_tx_resources(adapter);
1480         e1000_free_all_rx_resources(adapter);
1481
1482         /* kill manageability vlan ID if supported, but not if a vlan with
1483          * the same ID is registered on the host OS (let 8021q kill it)
1484          */
1485         if ((hw->mng_cookie.status &
1486              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1487             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1488                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1489                                        adapter->mng_vlan_id);
1490         }
1491
1492         return 0;
1493 }
1494
1495 /**
1496  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1497  * @adapter: address of board private structure
1498  * @start: address of beginning of memory
1499  * @len: length of memory
1500  **/
1501 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1502                                   unsigned long len)
1503 {
1504         struct e1000_hw *hw = &adapter->hw;
1505         unsigned long begin = (unsigned long)start;
1506         unsigned long end = begin + len;
1507
1508         /* First rev 82545 and 82546 need to not allow any memory
1509          * write location to cross 64k boundary due to errata 23
1510          */
1511         if (hw->mac_type == e1000_82545 ||
1512             hw->mac_type == e1000_ce4100 ||
1513             hw->mac_type == e1000_82546) {
1514                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1515         }
1516
1517         return true;
1518 }
1519
1520 /**
1521  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1522  * @adapter: board private structure
1523  * @txdr:    tx descriptor ring (for a specific queue) to setup
1524  *
1525  * Return 0 on success, negative on failure
1526  **/
1527 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1528                                     struct e1000_tx_ring *txdr)
1529 {
1530         struct pci_dev *pdev = adapter->pdev;
1531         int size;
1532
1533         size = sizeof(struct e1000_tx_buffer) * txdr->count;
1534         txdr->buffer_info = vzalloc(size);
1535         if (!txdr->buffer_info)
1536                 return -ENOMEM;
1537
1538         /* round up to nearest 4K */
1539
1540         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1541         txdr->size = ALIGN(txdr->size, 4096);
1542
1543         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1544                                         GFP_KERNEL);
1545         if (!txdr->desc) {
1546 setup_tx_desc_die:
1547                 vfree(txdr->buffer_info);
1548                 return -ENOMEM;
1549         }
1550
1551         /* Fix for errata 23, can't cross 64kB boundary */
1552         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1553                 void *olddesc = txdr->desc;
1554                 dma_addr_t olddma = txdr->dma;
1555                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1556                       txdr->size, txdr->desc);
1557                 /* Try again, without freeing the previous */
1558                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1559                                                 &txdr->dma, GFP_KERNEL);
1560                 /* Failed allocation, critical failure */
1561                 if (!txdr->desc) {
1562                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1563                                           olddma);
1564                         goto setup_tx_desc_die;
1565                 }
1566
1567                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1568                         /* give up */
1569                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1570                                           txdr->dma);
1571                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1572                                           olddma);
1573                         e_err(probe, "Unable to allocate aligned memory "
1574                               "for the transmit descriptor ring\n");
1575                         vfree(txdr->buffer_info);
1576                         return -ENOMEM;
1577                 } else {
1578                         /* Free old allocation, new allocation was successful */
1579                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1580                                           olddma);
1581                 }
1582         }
1583         memset(txdr->desc, 0, txdr->size);
1584
1585         txdr->next_to_use = 0;
1586         txdr->next_to_clean = 0;
1587
1588         return 0;
1589 }
1590
1591 /**
1592  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1593  *                                (Descriptors) for all queues
1594  * @adapter: board private structure
1595  *
1596  * Return 0 on success, negative on failure
1597  **/
1598 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1599 {
1600         int i, err = 0;
1601
1602         for (i = 0; i < adapter->num_tx_queues; i++) {
1603                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1604                 if (err) {
1605                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1606                         for (i-- ; i >= 0; i--)
1607                                 e1000_free_tx_resources(adapter,
1608                                                         &adapter->tx_ring[i]);
1609                         break;
1610                 }
1611         }
1612
1613         return err;
1614 }
1615
1616 /**
1617  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1618  * @adapter: board private structure
1619  *
1620  * Configure the Tx unit of the MAC after a reset.
1621  **/
1622 static void e1000_configure_tx(struct e1000_adapter *adapter)
1623 {
1624         u64 tdba;
1625         struct e1000_hw *hw = &adapter->hw;
1626         u32 tdlen, tctl, tipg;
1627         u32 ipgr1, ipgr2;
1628
1629         /* Setup the HW Tx Head and Tail descriptor pointers */
1630
1631         switch (adapter->num_tx_queues) {
1632         case 1:
1633         default:
1634                 tdba = adapter->tx_ring[0].dma;
1635                 tdlen = adapter->tx_ring[0].count *
1636                         sizeof(struct e1000_tx_desc);
1637                 ew32(TDLEN, tdlen);
1638                 ew32(TDBAH, (tdba >> 32));
1639                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1640                 ew32(TDT, 0);
1641                 ew32(TDH, 0);
1642                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1643                                            E1000_TDH : E1000_82542_TDH);
1644                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1645                                            E1000_TDT : E1000_82542_TDT);
1646                 break;
1647         }
1648
1649         /* Set the default values for the Tx Inter Packet Gap timer */
1650         if ((hw->media_type == e1000_media_type_fiber ||
1651              hw->media_type == e1000_media_type_internal_serdes))
1652                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1653         else
1654                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1655
1656         switch (hw->mac_type) {
1657         case e1000_82542_rev2_0:
1658         case e1000_82542_rev2_1:
1659                 tipg = DEFAULT_82542_TIPG_IPGT;
1660                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1661                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1662                 break;
1663         default:
1664                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1665                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1666                 break;
1667         }
1668         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1669         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1670         ew32(TIPG, tipg);
1671
1672         /* Set the Tx Interrupt Delay register */
1673
1674         ew32(TIDV, adapter->tx_int_delay);
1675         if (hw->mac_type >= e1000_82540)
1676                 ew32(TADV, adapter->tx_abs_int_delay);
1677
1678         /* Program the Transmit Control Register */
1679
1680         tctl = er32(TCTL);
1681         tctl &= ~E1000_TCTL_CT;
1682         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1683                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1684
1685         e1000_config_collision_dist(hw);
1686
1687         /* Setup Transmit Descriptor Settings for eop descriptor */
1688         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1689
1690         /* only set IDE if we are delaying interrupts using the timers */
1691         if (adapter->tx_int_delay)
1692                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1693
1694         if (hw->mac_type < e1000_82543)
1695                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1696         else
1697                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1698
1699         /* Cache if we're 82544 running in PCI-X because we'll
1700          * need this to apply a workaround later in the send path.
1701          */
1702         if (hw->mac_type == e1000_82544 &&
1703             hw->bus_type == e1000_bus_type_pcix)
1704                 adapter->pcix_82544 = true;
1705
1706         ew32(TCTL, tctl);
1707
1708 }
1709
1710 /**
1711  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1712  * @adapter: board private structure
1713  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1714  *
1715  * Returns 0 on success, negative on failure
1716  **/
1717 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1718                                     struct e1000_rx_ring *rxdr)
1719 {
1720         struct pci_dev *pdev = adapter->pdev;
1721         int size, desc_len;
1722
1723         size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1724         rxdr->buffer_info = vzalloc(size);
1725         if (!rxdr->buffer_info)
1726                 return -ENOMEM;
1727
1728         desc_len = sizeof(struct e1000_rx_desc);
1729
1730         /* Round up to nearest 4K */
1731
1732         rxdr->size = rxdr->count * desc_len;
1733         rxdr->size = ALIGN(rxdr->size, 4096);
1734
1735         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1736                                         GFP_KERNEL);
1737         if (!rxdr->desc) {
1738 setup_rx_desc_die:
1739                 vfree(rxdr->buffer_info);
1740                 return -ENOMEM;
1741         }
1742
1743         /* Fix for errata 23, can't cross 64kB boundary */
1744         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1745                 void *olddesc = rxdr->desc;
1746                 dma_addr_t olddma = rxdr->dma;
1747                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1748                       rxdr->size, rxdr->desc);
1749                 /* Try again, without freeing the previous */
1750                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1751                                                 &rxdr->dma, GFP_KERNEL);
1752                 /* Failed allocation, critical failure */
1753                 if (!rxdr->desc) {
1754                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1755                                           olddma);
1756                         goto setup_rx_desc_die;
1757                 }
1758
1759                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1760                         /* give up */
1761                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1762                                           rxdr->dma);
1763                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1764                                           olddma);
1765                         e_err(probe, "Unable to allocate aligned memory for "
1766                               "the Rx descriptor ring\n");
1767                         goto setup_rx_desc_die;
1768                 } else {
1769                         /* Free old allocation, new allocation was successful */
1770                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1771                                           olddma);
1772                 }
1773         }
1774         memset(rxdr->desc, 0, rxdr->size);
1775
1776         rxdr->next_to_clean = 0;
1777         rxdr->next_to_use = 0;
1778         rxdr->rx_skb_top = NULL;
1779
1780         return 0;
1781 }
1782
1783 /**
1784  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1785  *                                (Descriptors) for all queues
1786  * @adapter: board private structure
1787  *
1788  * Return 0 on success, negative on failure
1789  **/
1790 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1791 {
1792         int i, err = 0;
1793
1794         for (i = 0; i < adapter->num_rx_queues; i++) {
1795                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1796                 if (err) {
1797                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1798                         for (i-- ; i >= 0; i--)
1799                                 e1000_free_rx_resources(adapter,
1800                                                         &adapter->rx_ring[i]);
1801                         break;
1802                 }
1803         }
1804
1805         return err;
1806 }
1807
1808 /**
1809  * e1000_setup_rctl - configure the receive control registers
1810  * @adapter: Board private structure
1811  **/
1812 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1813 {
1814         struct e1000_hw *hw = &adapter->hw;
1815         u32 rctl;
1816
1817         rctl = er32(RCTL);
1818
1819         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1820
1821         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1822                 E1000_RCTL_RDMTS_HALF |
1823                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1824
1825         if (hw->tbi_compatibility_on == 1)
1826                 rctl |= E1000_RCTL_SBP;
1827         else
1828                 rctl &= ~E1000_RCTL_SBP;
1829
1830         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1831                 rctl &= ~E1000_RCTL_LPE;
1832         else
1833                 rctl |= E1000_RCTL_LPE;
1834
1835         /* Setup buffer sizes */
1836         rctl &= ~E1000_RCTL_SZ_4096;
1837         rctl |= E1000_RCTL_BSEX;
1838         switch (adapter->rx_buffer_len) {
1839         case E1000_RXBUFFER_2048:
1840         default:
1841                 rctl |= E1000_RCTL_SZ_2048;
1842                 rctl &= ~E1000_RCTL_BSEX;
1843                 break;
1844         case E1000_RXBUFFER_4096:
1845                 rctl |= E1000_RCTL_SZ_4096;
1846                 break;
1847         case E1000_RXBUFFER_8192:
1848                 rctl |= E1000_RCTL_SZ_8192;
1849                 break;
1850         case E1000_RXBUFFER_16384:
1851                 rctl |= E1000_RCTL_SZ_16384;
1852                 break;
1853         }
1854
1855         /* This is useful for sniffing bad packets. */
1856         if (adapter->netdev->features & NETIF_F_RXALL) {
1857                 /* UPE and MPE will be handled by normal PROMISC logic
1858                  * in e1000e_set_rx_mode
1859                  */
1860                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1861                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1862                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1863
1864                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1865                           E1000_RCTL_DPF | /* Allow filtered pause */
1866                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1867                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1868                  * and that breaks VLANs.
1869                  */
1870         }
1871
1872         ew32(RCTL, rctl);
1873 }
1874
1875 /**
1876  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1877  * @adapter: board private structure
1878  *
1879  * Configure the Rx unit of the MAC after a reset.
1880  **/
1881 static void e1000_configure_rx(struct e1000_adapter *adapter)
1882 {
1883         u64 rdba;
1884         struct e1000_hw *hw = &adapter->hw;
1885         u32 rdlen, rctl, rxcsum;
1886
1887         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1888                 rdlen = adapter->rx_ring[0].count *
1889                         sizeof(struct e1000_rx_desc);
1890                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1891                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1892         } else {
1893                 rdlen = adapter->rx_ring[0].count *
1894                         sizeof(struct e1000_rx_desc);
1895                 adapter->clean_rx = e1000_clean_rx_irq;
1896                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1897         }
1898
1899         /* disable receives while setting up the descriptors */
1900         rctl = er32(RCTL);
1901         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1902
1903         /* set the Receive Delay Timer Register */
1904         ew32(RDTR, adapter->rx_int_delay);
1905
1906         if (hw->mac_type >= e1000_82540) {
1907                 ew32(RADV, adapter->rx_abs_int_delay);
1908                 if (adapter->itr_setting != 0)
1909                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1910         }
1911
1912         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1913          * the Base and Length of the Rx Descriptor Ring
1914          */
1915         switch (adapter->num_rx_queues) {
1916         case 1:
1917         default:
1918                 rdba = adapter->rx_ring[0].dma;
1919                 ew32(RDLEN, rdlen);
1920                 ew32(RDBAH, (rdba >> 32));
1921                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1922                 ew32(RDT, 0);
1923                 ew32(RDH, 0);
1924                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1925                                            E1000_RDH : E1000_82542_RDH);
1926                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1927                                            E1000_RDT : E1000_82542_RDT);
1928                 break;
1929         }
1930
1931         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1932         if (hw->mac_type >= e1000_82543) {
1933                 rxcsum = er32(RXCSUM);
1934                 if (adapter->rx_csum)
1935                         rxcsum |= E1000_RXCSUM_TUOFL;
1936                 else
1937                         /* don't need to clear IPPCSE as it defaults to 0 */
1938                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1939                 ew32(RXCSUM, rxcsum);
1940         }
1941
1942         /* Enable Receives */
1943         ew32(RCTL, rctl | E1000_RCTL_EN);
1944 }
1945
1946 /**
1947  * e1000_free_tx_resources - Free Tx Resources per Queue
1948  * @adapter: board private structure
1949  * @tx_ring: Tx descriptor ring for a specific queue
1950  *
1951  * Free all transmit software resources
1952  **/
1953 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1954                                     struct e1000_tx_ring *tx_ring)
1955 {
1956         struct pci_dev *pdev = adapter->pdev;
1957
1958         e1000_clean_tx_ring(adapter, tx_ring);
1959
1960         vfree(tx_ring->buffer_info);
1961         tx_ring->buffer_info = NULL;
1962
1963         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1964                           tx_ring->dma);
1965
1966         tx_ring->desc = NULL;
1967 }
1968
1969 /**
1970  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1971  * @adapter: board private structure
1972  *
1973  * Free all transmit software resources
1974  **/
1975 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1976 {
1977         int i;
1978
1979         for (i = 0; i < adapter->num_tx_queues; i++)
1980                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1981 }
1982
1983 static void
1984 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1985                                  struct e1000_tx_buffer *buffer_info)
1986 {
1987         if (buffer_info->dma) {
1988                 if (buffer_info->mapped_as_page)
1989                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1990                                        buffer_info->length, DMA_TO_DEVICE);
1991                 else
1992                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1993                                          buffer_info->length,
1994                                          DMA_TO_DEVICE);
1995                 buffer_info->dma = 0;
1996         }
1997         if (buffer_info->skb) {
1998                 dev_kfree_skb_any(buffer_info->skb);
1999                 buffer_info->skb = NULL;
2000         }
2001         buffer_info->time_stamp = 0;
2002         /* buffer_info must be completely set up in the transmit path */
2003 }
2004
2005 /**
2006  * e1000_clean_tx_ring - Free Tx Buffers
2007  * @adapter: board private structure
2008  * @tx_ring: ring to be cleaned
2009  **/
2010 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2011                                 struct e1000_tx_ring *tx_ring)
2012 {
2013         struct e1000_hw *hw = &adapter->hw;
2014         struct e1000_tx_buffer *buffer_info;
2015         unsigned long size;
2016         unsigned int i;
2017
2018         /* Free all the Tx ring sk_buffs */
2019
2020         for (i = 0; i < tx_ring->count; i++) {
2021                 buffer_info = &tx_ring->buffer_info[i];
2022                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2023         }
2024
2025         netdev_reset_queue(adapter->netdev);
2026         size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2027         memset(tx_ring->buffer_info, 0, size);
2028
2029         /* Zero out the descriptor ring */
2030
2031         memset(tx_ring->desc, 0, tx_ring->size);
2032
2033         tx_ring->next_to_use = 0;
2034         tx_ring->next_to_clean = 0;
2035         tx_ring->last_tx_tso = false;
2036
2037         writel(0, hw->hw_addr + tx_ring->tdh);
2038         writel(0, hw->hw_addr + tx_ring->tdt);
2039 }
2040
2041 /**
2042  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2043  * @adapter: board private structure
2044  **/
2045 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2046 {
2047         int i;
2048
2049         for (i = 0; i < adapter->num_tx_queues; i++)
2050                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2051 }
2052
2053 /**
2054  * e1000_free_rx_resources - Free Rx Resources
2055  * @adapter: board private structure
2056  * @rx_ring: ring to clean the resources from
2057  *
2058  * Free all receive software resources
2059  **/
2060 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2061                                     struct e1000_rx_ring *rx_ring)
2062 {
2063         struct pci_dev *pdev = adapter->pdev;
2064
2065         e1000_clean_rx_ring(adapter, rx_ring);
2066
2067         vfree(rx_ring->buffer_info);
2068         rx_ring->buffer_info = NULL;
2069
2070         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2071                           rx_ring->dma);
2072
2073         rx_ring->desc = NULL;
2074 }
2075
2076 /**
2077  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2078  * @adapter: board private structure
2079  *
2080  * Free all receive software resources
2081  **/
2082 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2083 {
2084         int i;
2085
2086         for (i = 0; i < adapter->num_rx_queues; i++)
2087                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2088 }
2089
2090 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2091 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2092 {
2093         return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2094                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2095 }
2096
2097 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2098 {
2099         unsigned int len = e1000_frag_len(a);
2100         u8 *data = netdev_alloc_frag(len);
2101
2102         if (likely(data))
2103                 data += E1000_HEADROOM;
2104         return data;
2105 }
2106
2107 /**
2108  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2109  * @adapter: board private structure
2110  * @rx_ring: ring to free buffers from
2111  **/
2112 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2113                                 struct e1000_rx_ring *rx_ring)
2114 {
2115         struct e1000_hw *hw = &adapter->hw;
2116         struct e1000_rx_buffer *buffer_info;
2117         struct pci_dev *pdev = adapter->pdev;
2118         unsigned long size;
2119         unsigned int i;
2120
2121         /* Free all the Rx netfrags */
2122         for (i = 0; i < rx_ring->count; i++) {
2123                 buffer_info = &rx_ring->buffer_info[i];
2124                 if (adapter->clean_rx == e1000_clean_rx_irq) {
2125                         if (buffer_info->dma)
2126                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
2127                                                  adapter->rx_buffer_len,
2128                                                  DMA_FROM_DEVICE);
2129                         if (buffer_info->rxbuf.data) {
2130                                 skb_free_frag(buffer_info->rxbuf.data);
2131                                 buffer_info->rxbuf.data = NULL;
2132                         }
2133                 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2134                         if (buffer_info->dma)
2135                                 dma_unmap_page(&pdev->dev, buffer_info->dma,
2136                                                adapter->rx_buffer_len,
2137                                                DMA_FROM_DEVICE);
2138                         if (buffer_info->rxbuf.page) {
2139                                 put_page(buffer_info->rxbuf.page);
2140                                 buffer_info->rxbuf.page = NULL;
2141                         }
2142                 }
2143
2144                 buffer_info->dma = 0;
2145         }
2146
2147         /* there also may be some cached data from a chained receive */
2148         napi_free_frags(&adapter->napi);
2149         rx_ring->rx_skb_top = NULL;
2150
2151         size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2152         memset(rx_ring->buffer_info, 0, size);
2153
2154         /* Zero out the descriptor ring */
2155         memset(rx_ring->desc, 0, rx_ring->size);
2156
2157         rx_ring->next_to_clean = 0;
2158         rx_ring->next_to_use = 0;
2159
2160         writel(0, hw->hw_addr + rx_ring->rdh);
2161         writel(0, hw->hw_addr + rx_ring->rdt);
2162 }
2163
2164 /**
2165  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2166  * @adapter: board private structure
2167  **/
2168 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2169 {
2170         int i;
2171
2172         for (i = 0; i < adapter->num_rx_queues; i++)
2173                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2174 }
2175
2176 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2177  * and memory write and invalidate disabled for certain operations
2178  */
2179 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2180 {
2181         struct e1000_hw *hw = &adapter->hw;
2182         struct net_device *netdev = adapter->netdev;
2183         u32 rctl;
2184
2185         e1000_pci_clear_mwi(hw);
2186
2187         rctl = er32(RCTL);
2188         rctl |= E1000_RCTL_RST;
2189         ew32(RCTL, rctl);
2190         E1000_WRITE_FLUSH();
2191         mdelay(5);
2192
2193         if (netif_running(netdev))
2194                 e1000_clean_all_rx_rings(adapter);
2195 }
2196
2197 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2198 {
2199         struct e1000_hw *hw = &adapter->hw;
2200         struct net_device *netdev = adapter->netdev;
2201         u32 rctl;
2202
2203         rctl = er32(RCTL);
2204         rctl &= ~E1000_RCTL_RST;
2205         ew32(RCTL, rctl);
2206         E1000_WRITE_FLUSH();
2207         mdelay(5);
2208
2209         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2210                 e1000_pci_set_mwi(hw);
2211
2212         if (netif_running(netdev)) {
2213                 /* No need to loop, because 82542 supports only 1 queue */
2214                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2215                 e1000_configure_rx(adapter);
2216                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2217         }
2218 }
2219
2220 /**
2221  * e1000_set_mac - Change the Ethernet Address of the NIC
2222  * @netdev: network interface device structure
2223  * @p: pointer to an address structure
2224  *
2225  * Returns 0 on success, negative on failure
2226  **/
2227 static int e1000_set_mac(struct net_device *netdev, void *p)
2228 {
2229         struct e1000_adapter *adapter = netdev_priv(netdev);
2230         struct e1000_hw *hw = &adapter->hw;
2231         struct sockaddr *addr = p;
2232
2233         if (!is_valid_ether_addr(addr->sa_data))
2234                 return -EADDRNOTAVAIL;
2235
2236         /* 82542 2.0 needs to be in reset to write receive address registers */
2237
2238         if (hw->mac_type == e1000_82542_rev2_0)
2239                 e1000_enter_82542_rst(adapter);
2240
2241         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2242         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2243
2244         e1000_rar_set(hw, hw->mac_addr, 0);
2245
2246         if (hw->mac_type == e1000_82542_rev2_0)
2247                 e1000_leave_82542_rst(adapter);
2248
2249         return 0;
2250 }
2251
2252 /**
2253  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2254  * @netdev: network interface device structure
2255  *
2256  * The set_rx_mode entry point is called whenever the unicast or multicast
2257  * address lists or the network interface flags are updated. This routine is
2258  * responsible for configuring the hardware for proper unicast, multicast,
2259  * promiscuous mode, and all-multi behavior.
2260  **/
2261 static void e1000_set_rx_mode(struct net_device *netdev)
2262 {
2263         struct e1000_adapter *adapter = netdev_priv(netdev);
2264         struct e1000_hw *hw = &adapter->hw;
2265         struct netdev_hw_addr *ha;
2266         bool use_uc = false;
2267         u32 rctl;
2268         u32 hash_value;
2269         int i, rar_entries = E1000_RAR_ENTRIES;
2270         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2271         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2272
2273         if (!mcarray)
2274                 return;
2275
2276         /* Check for Promiscuous and All Multicast modes */
2277
2278         rctl = er32(RCTL);
2279
2280         if (netdev->flags & IFF_PROMISC) {
2281                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2282                 rctl &= ~E1000_RCTL_VFE;
2283         } else {
2284                 if (netdev->flags & IFF_ALLMULTI)
2285                         rctl |= E1000_RCTL_MPE;
2286                 else
2287                         rctl &= ~E1000_RCTL_MPE;
2288                 /* Enable VLAN filter if there is a VLAN */
2289                 if (e1000_vlan_used(adapter))
2290                         rctl |= E1000_RCTL_VFE;
2291         }
2292
2293         if (netdev_uc_count(netdev) > rar_entries - 1) {
2294                 rctl |= E1000_RCTL_UPE;
2295         } else if (!(netdev->flags & IFF_PROMISC)) {
2296                 rctl &= ~E1000_RCTL_UPE;
2297                 use_uc = true;
2298         }
2299
2300         ew32(RCTL, rctl);
2301
2302         /* 82542 2.0 needs to be in reset to write receive address registers */
2303
2304         if (hw->mac_type == e1000_82542_rev2_0)
2305                 e1000_enter_82542_rst(adapter);
2306
2307         /* load the first 14 addresses into the exact filters 1-14. Unicast
2308          * addresses take precedence to avoid disabling unicast filtering
2309          * when possible.
2310          *
2311          * RAR 0 is used for the station MAC address
2312          * if there are not 14 addresses, go ahead and clear the filters
2313          */
2314         i = 1;
2315         if (use_uc)
2316                 netdev_for_each_uc_addr(ha, netdev) {
2317                         if (i == rar_entries)
2318                                 break;
2319                         e1000_rar_set(hw, ha->addr, i++);
2320                 }
2321
2322         netdev_for_each_mc_addr(ha, netdev) {
2323                 if (i == rar_entries) {
2324                         /* load any remaining addresses into the hash table */
2325                         u32 hash_reg, hash_bit, mta;
2326                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2327                         hash_reg = (hash_value >> 5) & 0x7F;
2328                         hash_bit = hash_value & 0x1F;
2329                         mta = (1 << hash_bit);
2330                         mcarray[hash_reg] |= mta;
2331                 } else {
2332                         e1000_rar_set(hw, ha->addr, i++);
2333                 }
2334         }
2335
2336         for (; i < rar_entries; i++) {
2337                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2338                 E1000_WRITE_FLUSH();
2339                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2340                 E1000_WRITE_FLUSH();
2341         }
2342
2343         /* write the hash table completely, write from bottom to avoid
2344          * both stupid write combining chipsets, and flushing each write
2345          */
2346         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2347                 /* If we are on an 82544 has an errata where writing odd
2348                  * offsets overwrites the previous even offset, but writing
2349                  * backwards over the range solves the issue by always
2350                  * writing the odd offset first
2351                  */
2352                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2353         }
2354         E1000_WRITE_FLUSH();
2355
2356         if (hw->mac_type == e1000_82542_rev2_0)
2357                 e1000_leave_82542_rst(adapter);
2358
2359         kfree(mcarray);
2360 }
2361
2362 /**
2363  * e1000_update_phy_info_task - get phy info
2364  * @work: work struct contained inside adapter struct
2365  *
2366  * Need to wait a few seconds after link up to get diagnostic information from
2367  * the phy
2368  */
2369 static void e1000_update_phy_info_task(struct work_struct *work)
2370 {
2371         struct e1000_adapter *adapter = container_of(work,
2372                                                      struct e1000_adapter,
2373                                                      phy_info_task.work);
2374
2375         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2376 }
2377
2378 /**
2379  * e1000_82547_tx_fifo_stall_task - task to complete work
2380  * @work: work struct contained inside adapter struct
2381  **/
2382 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2383 {
2384         struct e1000_adapter *adapter = container_of(work,
2385                                                      struct e1000_adapter,
2386                                                      fifo_stall_task.work);
2387         struct e1000_hw *hw = &adapter->hw;
2388         struct net_device *netdev = adapter->netdev;
2389         u32 tctl;
2390
2391         if (atomic_read(&adapter->tx_fifo_stall)) {
2392                 if ((er32(TDT) == er32(TDH)) &&
2393                    (er32(TDFT) == er32(TDFH)) &&
2394                    (er32(TDFTS) == er32(TDFHS))) {
2395                         tctl = er32(TCTL);
2396                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2397                         ew32(TDFT, adapter->tx_head_addr);
2398                         ew32(TDFH, adapter->tx_head_addr);
2399                         ew32(TDFTS, adapter->tx_head_addr);
2400                         ew32(TDFHS, adapter->tx_head_addr);
2401                         ew32(TCTL, tctl);
2402                         E1000_WRITE_FLUSH();
2403
2404                         adapter->tx_fifo_head = 0;
2405                         atomic_set(&adapter->tx_fifo_stall, 0);
2406                         netif_wake_queue(netdev);
2407                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2408                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2409                 }
2410         }
2411 }
2412
2413 bool e1000_has_link(struct e1000_adapter *adapter)
2414 {
2415         struct e1000_hw *hw = &adapter->hw;
2416         bool link_active = false;
2417
2418         /* get_link_status is set on LSC (link status) interrupt or rx
2419          * sequence error interrupt (except on intel ce4100).
2420          * get_link_status will stay false until the
2421          * e1000_check_for_link establishes link for copper adapters
2422          * ONLY
2423          */
2424         switch (hw->media_type) {
2425         case e1000_media_type_copper:
2426                 if (hw->mac_type == e1000_ce4100)
2427                         hw->get_link_status = 1;
2428                 if (hw->get_link_status) {
2429                         e1000_check_for_link(hw);
2430                         link_active = !hw->get_link_status;
2431                 } else {
2432                         link_active = true;
2433                 }
2434                 break;
2435         case e1000_media_type_fiber:
2436                 e1000_check_for_link(hw);
2437                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2438                 break;
2439         case e1000_media_type_internal_serdes:
2440                 e1000_check_for_link(hw);
2441                 link_active = hw->serdes_has_link;
2442                 break;
2443         default:
2444                 break;
2445         }
2446
2447         return link_active;
2448 }
2449
2450 /**
2451  * e1000_watchdog - work function
2452  * @work: work struct contained inside adapter struct
2453  **/
2454 static void e1000_watchdog(struct work_struct *work)
2455 {
2456         struct e1000_adapter *adapter = container_of(work,
2457                                                      struct e1000_adapter,
2458                                                      watchdog_task.work);
2459         struct e1000_hw *hw = &adapter->hw;
2460         struct net_device *netdev = adapter->netdev;
2461         struct e1000_tx_ring *txdr = adapter->tx_ring;
2462         u32 link, tctl;
2463
2464         link = e1000_has_link(adapter);
2465         if ((netif_carrier_ok(netdev)) && link)
2466                 goto link_up;
2467
2468         if (link) {
2469                 if (!netif_carrier_ok(netdev)) {
2470                         u32 ctrl;
2471                         bool txb2b = true;
2472                         /* update snapshot of PHY registers on LSC */
2473                         e1000_get_speed_and_duplex(hw,
2474                                                    &adapter->link_speed,
2475                                                    &adapter->link_duplex);
2476
2477                         ctrl = er32(CTRL);
2478                         pr_info("%s NIC Link is Up %d Mbps %s, "
2479                                 "Flow Control: %s\n",
2480                                 netdev->name,
2481                                 adapter->link_speed,
2482                                 adapter->link_duplex == FULL_DUPLEX ?
2483                                 "Full Duplex" : "Half Duplex",
2484                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2485                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2486                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2487                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2488
2489                         /* adjust timeout factor according to speed/duplex */
2490                         adapter->tx_timeout_factor = 1;
2491                         switch (adapter->link_speed) {
2492                         case SPEED_10:
2493                                 txb2b = false;
2494                                 adapter->tx_timeout_factor = 16;
2495                                 break;
2496                         case SPEED_100:
2497                                 txb2b = false;
2498                                 /* maybe add some timeout factor ? */
2499                                 break;
2500                         }
2501
2502                         /* enable transmits in the hardware */
2503                         tctl = er32(TCTL);
2504                         tctl |= E1000_TCTL_EN;
2505                         ew32(TCTL, tctl);
2506
2507                         netif_carrier_on(netdev);
2508                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2509                                 schedule_delayed_work(&adapter->phy_info_task,
2510                                                       2 * HZ);
2511                         adapter->smartspeed = 0;
2512                 }
2513         } else {
2514                 if (netif_carrier_ok(netdev)) {
2515                         adapter->link_speed = 0;
2516                         adapter->link_duplex = 0;
2517                         pr_info("%s NIC Link is Down\n",
2518                                 netdev->name);
2519                         netif_carrier_off(netdev);
2520
2521                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2522                                 schedule_delayed_work(&adapter->phy_info_task,
2523                                                       2 * HZ);
2524                 }
2525
2526                 e1000_smartspeed(adapter);
2527         }
2528
2529 link_up:
2530         e1000_update_stats(adapter);
2531
2532         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2533         adapter->tpt_old = adapter->stats.tpt;
2534         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2535         adapter->colc_old = adapter->stats.colc;
2536
2537         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2538         adapter->gorcl_old = adapter->stats.gorcl;
2539         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2540         adapter->gotcl_old = adapter->stats.gotcl;
2541
2542         e1000_update_adaptive(hw);
2543
2544         if (!netif_carrier_ok(netdev)) {
2545                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2546                         /* We've lost link, so the controller stops DMA,
2547                          * but we've got queued Tx work that's never going
2548                          * to get done, so reset controller to flush Tx.
2549                          * (Do the reset outside of interrupt context).
2550                          */
2551                         adapter->tx_timeout_count++;
2552                         schedule_work(&adapter->reset_task);
2553                         /* exit immediately since reset is imminent */
2554                         return;
2555                 }
2556         }
2557
2558         /* Simple mode for Interrupt Throttle Rate (ITR) */
2559         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2560                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2561                  * Total asymmetrical Tx or Rx gets ITR=8000;
2562                  * everyone else is between 2000-8000.
2563                  */
2564                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2565                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2566                             adapter->gotcl - adapter->gorcl :
2567                             adapter->gorcl - adapter->gotcl) / 10000;
2568                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2569
2570                 ew32(ITR, 1000000000 / (itr * 256));
2571         }
2572
2573         /* Cause software interrupt to ensure rx ring is cleaned */
2574         ew32(ICS, E1000_ICS_RXDMT0);
2575
2576         /* Force detection of hung controller every watchdog period */
2577         adapter->detect_tx_hung = true;
2578
2579         /* Reschedule the task */
2580         if (!test_bit(__E1000_DOWN, &adapter->flags))
2581                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2582 }
2583
2584 enum latency_range {
2585         lowest_latency = 0,
2586         low_latency = 1,
2587         bulk_latency = 2,
2588         latency_invalid = 255
2589 };
2590
2591 /**
2592  * e1000_update_itr - update the dynamic ITR value based on statistics
2593  * @adapter: pointer to adapter
2594  * @itr_setting: current adapter->itr
2595  * @packets: the number of packets during this measurement interval
2596  * @bytes: the number of bytes during this measurement interval
2597  *
2598  *      Stores a new ITR value based on packets and byte
2599  *      counts during the last interrupt.  The advantage of per interrupt
2600  *      computation is faster updates and more accurate ITR for the current
2601  *      traffic pattern.  Constants in this function were computed
2602  *      based on theoretical maximum wire speed and thresholds were set based
2603  *      on testing data as well as attempting to minimize response time
2604  *      while increasing bulk throughput.
2605  *      this functionality is controlled by the InterruptThrottleRate module
2606  *      parameter (see e1000_param.c)
2607  **/
2608 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2609                                      u16 itr_setting, int packets, int bytes)
2610 {
2611         unsigned int retval = itr_setting;
2612         struct e1000_hw *hw = &adapter->hw;
2613
2614         if (unlikely(hw->mac_type < e1000_82540))
2615                 goto update_itr_done;
2616
2617         if (packets == 0)
2618                 goto update_itr_done;
2619
2620         switch (itr_setting) {
2621         case lowest_latency:
2622                 /* jumbo frames get bulk treatment*/
2623                 if (bytes/packets > 8000)
2624                         retval = bulk_latency;
2625                 else if ((packets < 5) && (bytes > 512))
2626                         retval = low_latency;
2627                 break;
2628         case low_latency:  /* 50 usec aka 20000 ints/s */
2629                 if (bytes > 10000) {
2630                         /* jumbo frames need bulk latency setting */
2631                         if (bytes/packets > 8000)
2632                                 retval = bulk_latency;
2633                         else if ((packets < 10) || ((bytes/packets) > 1200))
2634                                 retval = bulk_latency;
2635                         else if ((packets > 35))
2636                                 retval = lowest_latency;
2637                 } else if (bytes/packets > 2000)
2638                         retval = bulk_latency;
2639                 else if (packets <= 2 && bytes < 512)
2640                         retval = lowest_latency;
2641                 break;
2642         case bulk_latency: /* 250 usec aka 4000 ints/s */
2643                 if (bytes > 25000) {
2644                         if (packets > 35)
2645                                 retval = low_latency;
2646                 } else if (bytes < 6000) {
2647                         retval = low_latency;
2648                 }
2649                 break;
2650         }
2651
2652 update_itr_done:
2653         return retval;
2654 }
2655
2656 static void e1000_set_itr(struct e1000_adapter *adapter)
2657 {
2658         struct e1000_hw *hw = &adapter->hw;
2659         u16 current_itr;
2660         u32 new_itr = adapter->itr;
2661
2662         if (unlikely(hw->mac_type < e1000_82540))
2663                 return;
2664
2665         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2666         if (unlikely(adapter->link_speed != SPEED_1000)) {
2667                 current_itr = 0;
2668                 new_itr = 4000;
2669                 goto set_itr_now;
2670         }
2671
2672         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2673                                            adapter->total_tx_packets,
2674                                            adapter->total_tx_bytes);
2675         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2676         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2677                 adapter->tx_itr = low_latency;
2678
2679         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2680                                            adapter->total_rx_packets,
2681                                            adapter->total_rx_bytes);
2682         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2683         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2684                 adapter->rx_itr = low_latency;
2685
2686         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2687
2688         switch (current_itr) {
2689         /* counts and packets in update_itr are dependent on these numbers */
2690         case lowest_latency:
2691                 new_itr = 70000;
2692                 break;
2693         case low_latency:
2694                 new_itr = 20000; /* aka hwitr = ~200 */
2695                 break;
2696         case bulk_latency:
2697                 new_itr = 4000;
2698                 break;
2699         default:
2700                 break;
2701         }
2702
2703 set_itr_now:
2704         if (new_itr != adapter->itr) {
2705                 /* this attempts to bias the interrupt rate towards Bulk
2706                  * by adding intermediate steps when interrupt rate is
2707                  * increasing
2708                  */
2709                 new_itr = new_itr > adapter->itr ?
2710                           min(adapter->itr + (new_itr >> 2), new_itr) :
2711                           new_itr;
2712                 adapter->itr = new_itr;
2713                 ew32(ITR, 1000000000 / (new_itr * 256));
2714         }
2715 }
2716
2717 #define E1000_TX_FLAGS_CSUM             0x00000001
2718 #define E1000_TX_FLAGS_VLAN             0x00000002
2719 #define E1000_TX_FLAGS_TSO              0x00000004
2720 #define E1000_TX_FLAGS_IPV4             0x00000008
2721 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2722 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2723 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2724
2725 static int e1000_tso(struct e1000_adapter *adapter,
2726                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2727                      __be16 protocol)
2728 {
2729         struct e1000_context_desc *context_desc;
2730         struct e1000_tx_buffer *buffer_info;
2731         unsigned int i;
2732         u32 cmd_length = 0;
2733         u16 ipcse = 0, tucse, mss;
2734         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2735
2736         if (skb_is_gso(skb)) {
2737                 int err;
2738
2739                 err = skb_cow_head(skb, 0);
2740                 if (err < 0)
2741                         return err;
2742
2743                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2744                 mss = skb_shinfo(skb)->gso_size;
2745                 if (protocol == htons(ETH_P_IP)) {
2746                         struct iphdr *iph = ip_hdr(skb);
2747                         iph->tot_len = 0;
2748                         iph->check = 0;
2749                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2750                                                                  iph->daddr, 0,
2751                                                                  IPPROTO_TCP,
2752                                                                  0);
2753                         cmd_length = E1000_TXD_CMD_IP;
2754                         ipcse = skb_transport_offset(skb) - 1;
2755                 } else if (skb_is_gso_v6(skb)) {
2756                         ipv6_hdr(skb)->payload_len = 0;
2757                         tcp_hdr(skb)->check =
2758                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2759                                                  &ipv6_hdr(skb)->daddr,
2760                                                  0, IPPROTO_TCP, 0);
2761                         ipcse = 0;
2762                 }
2763                 ipcss = skb_network_offset(skb);
2764                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2765                 tucss = skb_transport_offset(skb);
2766                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2767                 tucse = 0;
2768
2769                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2770                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2771
2772                 i = tx_ring->next_to_use;
2773                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2774                 buffer_info = &tx_ring->buffer_info[i];
2775
2776                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2777                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2778                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2779                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2780                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2781                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2782                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2783                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2784                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2785
2786                 buffer_info->time_stamp = jiffies;
2787                 buffer_info->next_to_watch = i;
2788
2789                 if (++i == tx_ring->count)
2790                         i = 0;
2791
2792                 tx_ring->next_to_use = i;
2793
2794                 return true;
2795         }
2796         return false;
2797 }
2798
2799 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2800                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2801                           __be16 protocol)
2802 {
2803         struct e1000_context_desc *context_desc;
2804         struct e1000_tx_buffer *buffer_info;
2805         unsigned int i;
2806         u8 css;
2807         u32 cmd_len = E1000_TXD_CMD_DEXT;
2808
2809         if (skb->ip_summed != CHECKSUM_PARTIAL)
2810                 return false;
2811
2812         switch (protocol) {
2813         case cpu_to_be16(ETH_P_IP):
2814                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2815                         cmd_len |= E1000_TXD_CMD_TCP;
2816                 break;
2817         case cpu_to_be16(ETH_P_IPV6):
2818                 /* XXX not handling all IPV6 headers */
2819                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2820                         cmd_len |= E1000_TXD_CMD_TCP;
2821                 break;
2822         default:
2823                 if (unlikely(net_ratelimit()))
2824                         e_warn(drv, "checksum_partial proto=%x!\n",
2825                                skb->protocol);
2826                 break;
2827         }
2828
2829         css = skb_checksum_start_offset(skb);
2830
2831         i = tx_ring->next_to_use;
2832         buffer_info = &tx_ring->buffer_info[i];
2833         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2834
2835         context_desc->lower_setup.ip_config = 0;
2836         context_desc->upper_setup.tcp_fields.tucss = css;
2837         context_desc->upper_setup.tcp_fields.tucso =
2838                 css + skb->csum_offset;
2839         context_desc->upper_setup.tcp_fields.tucse = 0;
2840         context_desc->tcp_seg_setup.data = 0;
2841         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2842
2843         buffer_info->time_stamp = jiffies;
2844         buffer_info->next_to_watch = i;
2845
2846         if (unlikely(++i == tx_ring->count))
2847                 i = 0;
2848
2849         tx_ring->next_to_use = i;
2850
2851         return true;
2852 }
2853
2854 #define E1000_MAX_TXD_PWR       12
2855 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2856
2857 static int e1000_tx_map(struct e1000_adapter *adapter,
2858                         struct e1000_tx_ring *tx_ring,
2859                         struct sk_buff *skb, unsigned int first,
2860                         unsigned int max_per_txd, unsigned int nr_frags,
2861                         unsigned int mss)
2862 {
2863         struct e1000_hw *hw = &adapter->hw;
2864         struct pci_dev *pdev = adapter->pdev;
2865         struct e1000_tx_buffer *buffer_info;
2866         unsigned int len = skb_headlen(skb);
2867         unsigned int offset = 0, size, count = 0, i;
2868         unsigned int f, bytecount, segs;
2869
2870         i = tx_ring->next_to_use;
2871
2872         while (len) {
2873                 buffer_info = &tx_ring->buffer_info[i];
2874                 size = min(len, max_per_txd);
2875                 /* Workaround for Controller erratum --
2876                  * descriptor for non-tso packet in a linear SKB that follows a
2877                  * tso gets written back prematurely before the data is fully
2878                  * DMA'd to the controller
2879                  */
2880                 if (!skb->data_len && tx_ring->last_tx_tso &&
2881                     !skb_is_gso(skb)) {
2882                         tx_ring->last_tx_tso = false;
2883                         size -= 4;
2884                 }
2885
2886                 /* Workaround for premature desc write-backs
2887                  * in TSO mode.  Append 4-byte sentinel desc
2888                  */
2889                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2890                         size -= 4;
2891                 /* work-around for errata 10 and it applies
2892                  * to all controllers in PCI-X mode
2893                  * The fix is to make sure that the first descriptor of a
2894                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2895                  */
2896                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2897                              (size > 2015) && count == 0))
2898                         size = 2015;
2899
2900                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2901                  * terminating buffers within evenly-aligned dwords.
2902                  */
2903                 if (unlikely(adapter->pcix_82544 &&
2904                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2905                    size > 4))
2906                         size -= 4;
2907
2908                 buffer_info->length = size;
2909                 /* set time_stamp *before* dma to help avoid a possible race */
2910                 buffer_info->time_stamp = jiffies;
2911                 buffer_info->mapped_as_page = false;
2912                 buffer_info->dma = dma_map_single(&pdev->dev,
2913                                                   skb->data + offset,
2914                                                   size, DMA_TO_DEVICE);
2915                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2916                         goto dma_error;
2917                 buffer_info->next_to_watch = i;
2918
2919                 len -= size;
2920                 offset += size;
2921                 count++;
2922                 if (len) {
2923                         i++;
2924                         if (unlikely(i == tx_ring->count))
2925                                 i = 0;
2926                 }
2927         }
2928
2929         for (f = 0; f < nr_frags; f++) {
2930                 const struct skb_frag_struct *frag;
2931
2932                 frag = &skb_shinfo(skb)->frags[f];
2933                 len = skb_frag_size(frag);
2934                 offset = 0;
2935
2936                 while (len) {
2937                         unsigned long bufend;
2938                         i++;
2939                         if (unlikely(i == tx_ring->count))
2940                                 i = 0;
2941
2942                         buffer_info = &tx_ring->buffer_info[i];
2943                         size = min(len, max_per_txd);
2944                         /* Workaround for premature desc write-backs
2945                          * in TSO mode.  Append 4-byte sentinel desc
2946                          */
2947                         if (unlikely(mss && f == (nr_frags-1) &&
2948                             size == len && size > 8))
2949                                 size -= 4;
2950                         /* Workaround for potential 82544 hang in PCI-X.
2951                          * Avoid terminating buffers within evenly-aligned
2952                          * dwords.
2953                          */
2954                         bufend = (unsigned long)
2955                                 page_to_phys(skb_frag_page(frag));
2956                         bufend += offset + size - 1;
2957                         if (unlikely(adapter->pcix_82544 &&
2958                                      !(bufend & 4) &&
2959                                      size > 4))
2960                                 size -= 4;
2961
2962                         buffer_info->length = size;
2963                         buffer_info->time_stamp = jiffies;
2964                         buffer_info->mapped_as_page = true;
2965                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2966                                                 offset, size, DMA_TO_DEVICE);
2967                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2968                                 goto dma_error;
2969                         buffer_info->next_to_watch = i;
2970
2971                         len -= size;
2972                         offset += size;
2973                         count++;
2974                 }
2975         }
2976
2977         segs = skb_shinfo(skb)->gso_segs ?: 1;
2978         /* multiply data chunks by size of headers */
2979         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2980
2981         tx_ring->buffer_info[i].skb = skb;
2982         tx_ring->buffer_info[i].segs = segs;
2983         tx_ring->buffer_info[i].bytecount = bytecount;
2984         tx_ring->buffer_info[first].next_to_watch = i;
2985
2986         return count;
2987
2988 dma_error:
2989         dev_err(&pdev->dev, "TX DMA map failed\n");
2990         buffer_info->dma = 0;
2991         if (count)
2992                 count--;
2993
2994         while (count--) {
2995                 if (i == 0)
2996                         i += tx_ring->count;
2997                 i--;
2998                 buffer_info = &tx_ring->buffer_info[i];
2999                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3000         }
3001
3002         return 0;
3003 }
3004
3005 static void e1000_tx_queue(struct e1000_adapter *adapter,
3006                            struct e1000_tx_ring *tx_ring, int tx_flags,
3007                            int count)
3008 {
3009         struct e1000_tx_desc *tx_desc = NULL;
3010         struct e1000_tx_buffer *buffer_info;
3011         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3012         unsigned int i;
3013
3014         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3015                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3016                              E1000_TXD_CMD_TSE;
3017                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3018
3019                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3020                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3021         }
3022
3023         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3024                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3025                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3026         }
3027
3028         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3029                 txd_lower |= E1000_TXD_CMD_VLE;
3030                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3031         }
3032
3033         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3034                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3035
3036         i = tx_ring->next_to_use;
3037
3038         while (count--) {
3039                 buffer_info = &tx_ring->buffer_info[i];
3040                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3041                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3042                 tx_desc->lower.data =
3043                         cpu_to_le32(txd_lower | buffer_info->length);
3044                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3045                 if (unlikely(++i == tx_ring->count))
3046                         i = 0;
3047         }
3048
3049         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3050
3051         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3052         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3053                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3054
3055         /* Force memory writes to complete before letting h/w
3056          * know there are new descriptors to fetch.  (Only
3057          * applicable for weak-ordered memory model archs,
3058          * such as IA-64).
3059          */
3060         wmb();
3061
3062         tx_ring->next_to_use = i;
3063 }
3064
3065 /* 82547 workaround to avoid controller hang in half-duplex environment.
3066  * The workaround is to avoid queuing a large packet that would span
3067  * the internal Tx FIFO ring boundary by notifying the stack to resend
3068  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3069  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3070  * to the beginning of the Tx FIFO.
3071  */
3072
3073 #define E1000_FIFO_HDR                  0x10
3074 #define E1000_82547_PAD_LEN             0x3E0
3075
3076 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3077                                        struct sk_buff *skb)
3078 {
3079         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3080         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3081
3082         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3083
3084         if (adapter->link_duplex != HALF_DUPLEX)
3085                 goto no_fifo_stall_required;
3086
3087         if (atomic_read(&adapter->tx_fifo_stall))
3088                 return 1;
3089
3090         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3091                 atomic_set(&adapter->tx_fifo_stall, 1);
3092                 return 1;
3093         }
3094
3095 no_fifo_stall_required:
3096         adapter->tx_fifo_head += skb_fifo_len;
3097         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3098                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3099         return 0;
3100 }
3101
3102 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3103 {
3104         struct e1000_adapter *adapter = netdev_priv(netdev);
3105         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3106
3107         netif_stop_queue(netdev);
3108         /* Herbert's original patch had:
3109          *  smp_mb__after_netif_stop_queue();
3110          * but since that doesn't exist yet, just open code it.
3111          */
3112         smp_mb();
3113
3114         /* We need to check again in a case another CPU has just
3115          * made room available.
3116          */
3117         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3118                 return -EBUSY;
3119
3120         /* A reprieve! */
3121         netif_start_queue(netdev);
3122         ++adapter->restart_queue;
3123         return 0;
3124 }
3125
3126 static int e1000_maybe_stop_tx(struct net_device *netdev,
3127                                struct e1000_tx_ring *tx_ring, int size)
3128 {
3129         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3130                 return 0;
3131         return __e1000_maybe_stop_tx(netdev, size);
3132 }
3133
3134 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3136                                     struct net_device *netdev)
3137 {
3138         struct e1000_adapter *adapter = netdev_priv(netdev);
3139         struct e1000_hw *hw = &adapter->hw;
3140         struct e1000_tx_ring *tx_ring;
3141         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3142         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3143         unsigned int tx_flags = 0;
3144         unsigned int len = skb_headlen(skb);
3145         unsigned int nr_frags;
3146         unsigned int mss;
3147         int count = 0;
3148         int tso;
3149         unsigned int f;
3150         __be16 protocol = vlan_get_protocol(skb);
3151
3152         /* This goes back to the question of how to logically map a Tx queue
3153          * to a flow.  Right now, performance is impacted slightly negatively
3154          * if using multiple Tx queues.  If the stack breaks away from a
3155          * single qdisc implementation, we can look at this again.
3156          */
3157         tx_ring = adapter->tx_ring;
3158
3159         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3160          * packets may get corrupted during padding by HW.
3161          * To WA this issue, pad all small packets manually.
3162          */
3163         if (eth_skb_pad(skb))
3164                 return NETDEV_TX_OK;
3165
3166         mss = skb_shinfo(skb)->gso_size;
3167         /* The controller does a simple calculation to
3168          * make sure there is enough room in the FIFO before
3169          * initiating the DMA for each buffer.  The calc is:
3170          * 4 = ceil(buffer len/mss).  To make sure we don't
3171          * overrun the FIFO, adjust the max buffer len if mss
3172          * drops.
3173          */
3174         if (mss) {
3175                 u8 hdr_len;
3176                 max_per_txd = min(mss << 2, max_per_txd);
3177                 max_txd_pwr = fls(max_per_txd) - 1;
3178
3179                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3180                 if (skb->data_len && hdr_len == len) {
3181                         switch (hw->mac_type) {
3182                         case e1000_82544: {
3183                                 unsigned int pull_size;
3184
3185                                 /* Make sure we have room to chop off 4 bytes,
3186                                  * and that the end alignment will work out to
3187                                  * this hardware's requirements
3188                                  * NOTE: this is a TSO only workaround
3189                                  * if end byte alignment not correct move us
3190                                  * into the next dword
3191                                  */
3192                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3193                                     & 4)
3194                                         break;
3195                                 /* fall through */
3196                                 pull_size = min((unsigned int)4, skb->data_len);
3197                                 if (!__pskb_pull_tail(skb, pull_size)) {
3198                                         e_err(drv, "__pskb_pull_tail "
3199                                               "failed.\n");
3200                                         dev_kfree_skb_any(skb);
3201                                         return NETDEV_TX_OK;
3202                                 }
3203                                 len = skb_headlen(skb);
3204                                 break;
3205                         }
3206                         default:
3207                                 /* do nothing */
3208                                 break;
3209                         }
3210                 }
3211         }
3212
3213         /* reserve a descriptor for the offload context */
3214         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3215                 count++;
3216         count++;
3217
3218         /* Controller Erratum workaround */
3219         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3220                 count++;
3221
3222         count += TXD_USE_COUNT(len, max_txd_pwr);
3223
3224         if (adapter->pcix_82544)
3225                 count++;
3226
3227         /* work-around for errata 10 and it applies to all controllers
3228          * in PCI-X mode, so add one more descriptor to the count
3229          */
3230         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3231                         (len > 2015)))
3232                 count++;
3233
3234         nr_frags = skb_shinfo(skb)->nr_frags;
3235         for (f = 0; f < nr_frags; f++)
3236                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3237                                        max_txd_pwr);
3238         if (adapter->pcix_82544)
3239                 count += nr_frags;
3240
3241         /* need: count + 2 desc gap to keep tail from touching
3242          * head, otherwise try next time
3243          */
3244         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3245                 return NETDEV_TX_BUSY;
3246
3247         if (unlikely((hw->mac_type == e1000_82547) &&
3248                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3249                 netif_stop_queue(netdev);
3250                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3251                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3252                 return NETDEV_TX_BUSY;
3253         }
3254
3255         if (skb_vlan_tag_present(skb)) {
3256                 tx_flags |= E1000_TX_FLAGS_VLAN;
3257                 tx_flags |= (skb_vlan_tag_get(skb) <<
3258                              E1000_TX_FLAGS_VLAN_SHIFT);
3259         }
3260
3261         first = tx_ring->next_to_use;
3262
3263         tso = e1000_tso(adapter, tx_ring, skb, protocol);
3264         if (tso < 0) {
3265                 dev_kfree_skb_any(skb);
3266                 return NETDEV_TX_OK;
3267         }
3268
3269         if (likely(tso)) {
3270                 if (likely(hw->mac_type != e1000_82544))
3271                         tx_ring->last_tx_tso = true;
3272                 tx_flags |= E1000_TX_FLAGS_TSO;
3273         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3274                 tx_flags |= E1000_TX_FLAGS_CSUM;
3275
3276         if (protocol == htons(ETH_P_IP))
3277                 tx_flags |= E1000_TX_FLAGS_IPV4;
3278
3279         if (unlikely(skb->no_fcs))
3280                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3281
3282         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3283                              nr_frags, mss);
3284
3285         if (count) {
3286                 /* The descriptors needed is higher than other Intel drivers
3287                  * due to a number of workarounds.  The breakdown is below:
3288                  * Data descriptors: MAX_SKB_FRAGS + 1
3289                  * Context Descriptor: 1
3290                  * Keep head from touching tail: 2
3291                  * Workarounds: 3
3292                  */
3293                 int desc_needed = MAX_SKB_FRAGS + 7;
3294
3295                 netdev_sent_queue(netdev, skb->len);
3296                 skb_tx_timestamp(skb);
3297
3298                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3299
3300                 /* 82544 potentially requires twice as many data descriptors
3301                  * in order to guarantee buffers don't end on evenly-aligned
3302                  * dwords
3303                  */
3304                 if (adapter->pcix_82544)
3305                         desc_needed += MAX_SKB_FRAGS + 1;
3306
3307                 /* Make sure there is space in the ring for the next send. */
3308                 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3309
3310                 if (!skb->xmit_more ||
3311                     netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3312                         writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3313                         /* we need this if more than one processor can write to
3314                          * our tail at a time, it synchronizes IO on IA64/Altix
3315                          * systems
3316                          */
3317                         mmiowb();
3318                 }
3319         } else {
3320                 dev_kfree_skb_any(skb);
3321                 tx_ring->buffer_info[first].time_stamp = 0;
3322                 tx_ring->next_to_use = first;
3323         }
3324
3325         return NETDEV_TX_OK;
3326 }
3327
3328 #define NUM_REGS 38 /* 1 based count */
3329 static void e1000_regdump(struct e1000_adapter *adapter)
3330 {
3331         struct e1000_hw *hw = &adapter->hw;
3332         u32 regs[NUM_REGS];
3333         u32 *regs_buff = regs;
3334         int i = 0;
3335
3336         static const char * const reg_name[] = {
3337                 "CTRL",  "STATUS",
3338                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3339                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3340                 "TIDV", "TXDCTL", "TADV", "TARC0",
3341                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3342                 "TXDCTL1", "TARC1",
3343                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3344                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3345                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3346         };
3347
3348         regs_buff[0]  = er32(CTRL);
3349         regs_buff[1]  = er32(STATUS);
3350
3351         regs_buff[2]  = er32(RCTL);
3352         regs_buff[3]  = er32(RDLEN);
3353         regs_buff[4]  = er32(RDH);
3354         regs_buff[5]  = er32(RDT);
3355         regs_buff[6]  = er32(RDTR);
3356
3357         regs_buff[7]  = er32(TCTL);
3358         regs_buff[8]  = er32(TDBAL);
3359         regs_buff[9]  = er32(TDBAH);
3360         regs_buff[10] = er32(TDLEN);
3361         regs_buff[11] = er32(TDH);
3362         regs_buff[12] = er32(TDT);
3363         regs_buff[13] = er32(TIDV);
3364         regs_buff[14] = er32(TXDCTL);
3365         regs_buff[15] = er32(TADV);
3366         regs_buff[16] = er32(TARC0);
3367
3368         regs_buff[17] = er32(TDBAL1);
3369         regs_buff[18] = er32(TDBAH1);
3370         regs_buff[19] = er32(TDLEN1);
3371         regs_buff[20] = er32(TDH1);
3372         regs_buff[21] = er32(TDT1);
3373         regs_buff[22] = er32(TXDCTL1);
3374         regs_buff[23] = er32(TARC1);
3375         regs_buff[24] = er32(CTRL_EXT);
3376         regs_buff[25] = er32(ERT);
3377         regs_buff[26] = er32(RDBAL0);
3378         regs_buff[27] = er32(RDBAH0);
3379         regs_buff[28] = er32(TDFH);
3380         regs_buff[29] = er32(TDFT);
3381         regs_buff[30] = er32(TDFHS);
3382         regs_buff[31] = er32(TDFTS);
3383         regs_buff[32] = er32(TDFPC);
3384         regs_buff[33] = er32(RDFH);
3385         regs_buff[34] = er32(RDFT);
3386         regs_buff[35] = er32(RDFHS);
3387         regs_buff[36] = er32(RDFTS);
3388         regs_buff[37] = er32(RDFPC);
3389
3390         pr_info("Register dump\n");
3391         for (i = 0; i < NUM_REGS; i++)
3392                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3393 }
3394
3395 /*
3396  * e1000_dump: Print registers, tx ring and rx ring
3397  */
3398 static void e1000_dump(struct e1000_adapter *adapter)
3399 {
3400         /* this code doesn't handle multiple rings */
3401         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3402         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3403         int i;
3404
3405         if (!netif_msg_hw(adapter))
3406                 return;
3407
3408         /* Print Registers */
3409         e1000_regdump(adapter);
3410
3411         /* transmit dump */
3412         pr_info("TX Desc ring0 dump\n");
3413
3414         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3415          *
3416          * Legacy Transmit Descriptor
3417          *   +--------------------------------------------------------------+
3418          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3419          *   +--------------------------------------------------------------+
3420          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3421          *   +--------------------------------------------------------------+
3422          *   63       48 47        36 35    32 31     24 23    16 15        0
3423          *
3424          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3425          *   63      48 47    40 39       32 31             16 15    8 7      0
3426          *   +----------------------------------------------------------------+
3427          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3428          *   +----------------------------------------------------------------+
3429          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3430          *   +----------------------------------------------------------------+
3431          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3432          *
3433          * Extended Data Descriptor (DTYP=0x1)
3434          *   +----------------------------------------------------------------+
3435          * 0 |                     Buffer Address [63:0]                      |
3436          *   +----------------------------------------------------------------+
3437          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3438          *   +----------------------------------------------------------------+
3439          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3440          */
3441         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3442         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3443
3444         if (!netif_msg_tx_done(adapter))
3445                 goto rx_ring_summary;
3446
3447         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3448                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3449                 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3450                 struct my_u { __le64 a; __le64 b; };
3451                 struct my_u *u = (struct my_u *)tx_desc;
3452                 const char *type;
3453
3454                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3455                         type = "NTC/U";
3456                 else if (i == tx_ring->next_to_use)
3457                         type = "NTU";
3458                 else if (i == tx_ring->next_to_clean)
3459                         type = "NTC";
3460                 else
3461                         type = "";
3462
3463                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3464                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3465                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3466                         (u64)buffer_info->dma, buffer_info->length,
3467                         buffer_info->next_to_watch,
3468                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3469         }
3470
3471 rx_ring_summary:
3472         /* receive dump */
3473         pr_info("\nRX Desc ring dump\n");
3474
3475         /* Legacy Receive Descriptor Format
3476          *
3477          * +-----------------------------------------------------+
3478          * |                Buffer Address [63:0]                |
3479          * +-----------------------------------------------------+
3480          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3481          * +-----------------------------------------------------+
3482          * 63       48 47    40 39      32 31         16 15      0
3483          */
3484         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3485
3486         if (!netif_msg_rx_status(adapter))
3487                 goto exit;
3488
3489         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3490                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3491                 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3492                 struct my_u { __le64 a; __le64 b; };
3493                 struct my_u *u = (struct my_u *)rx_desc;
3494                 const char *type;
3495
3496                 if (i == rx_ring->next_to_use)
3497                         type = "NTU";
3498                 else if (i == rx_ring->next_to_clean)
3499                         type = "NTC";
3500                 else
3501                         type = "";
3502
3503                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3504                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3505                         (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3506         } /* for */
3507
3508         /* dump the descriptor caches */
3509         /* rx */
3510         pr_info("Rx descriptor cache in 64bit format\n");
3511         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3512                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3513                         i,
3514                         readl(adapter->hw.hw_addr + i+4),
3515                         readl(adapter->hw.hw_addr + i),
3516                         readl(adapter->hw.hw_addr + i+12),
3517                         readl(adapter->hw.hw_addr + i+8));
3518         }
3519         /* tx */
3520         pr_info("Tx descriptor cache in 64bit format\n");
3521         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3522                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3523                         i,
3524                         readl(adapter->hw.hw_addr + i+4),
3525                         readl(adapter->hw.hw_addr + i),
3526                         readl(adapter->hw.hw_addr + i+12),
3527                         readl(adapter->hw.hw_addr + i+8));
3528         }
3529 exit:
3530         return;
3531 }
3532
3533 /**
3534  * e1000_tx_timeout - Respond to a Tx Hang
3535  * @netdev: network interface device structure
3536  **/
3537 static void e1000_tx_timeout(struct net_device *netdev)
3538 {
3539         struct e1000_adapter *adapter = netdev_priv(netdev);
3540
3541         /* Do the reset outside of interrupt context */
3542         adapter->tx_timeout_count++;
3543         schedule_work(&adapter->reset_task);
3544 }
3545
3546 static void e1000_reset_task(struct work_struct *work)
3547 {
3548         struct e1000_adapter *adapter =
3549                 container_of(work, struct e1000_adapter, reset_task);
3550
3551         e_err(drv, "Reset adapter\n");
3552         e1000_reinit_locked(adapter);
3553 }
3554
3555 /**
3556  * e1000_change_mtu - Change the Maximum Transfer Unit
3557  * @netdev: network interface device structure
3558  * @new_mtu: new value for maximum frame size
3559  *
3560  * Returns 0 on success, negative on failure
3561  **/
3562 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3563 {
3564         struct e1000_adapter *adapter = netdev_priv(netdev);
3565         struct e1000_hw *hw = &adapter->hw;
3566         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3567
3568         /* Adapter-specific max frame size limits. */
3569         switch (hw->mac_type) {
3570         case e1000_undefined ... e1000_82542_rev2_1:
3571                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3572                         e_err(probe, "Jumbo Frames not supported.\n");
3573                         return -EINVAL;
3574                 }
3575                 break;
3576         default:
3577                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3578                 break;
3579         }
3580
3581         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3582                 msleep(1);
3583         /* e1000_down has a dependency on max_frame_size */
3584         hw->max_frame_size = max_frame;
3585         if (netif_running(netdev)) {
3586                 /* prevent buffers from being reallocated */
3587                 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3588                 e1000_down(adapter);
3589         }
3590
3591         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3592          * means we reserve 2 more, this pushes us to allocate from the next
3593          * larger slab size.
3594          * i.e. RXBUFFER_2048 --> size-4096 slab
3595          * however with the new *_jumbo_rx* routines, jumbo receives will use
3596          * fragmented skbs
3597          */
3598
3599         if (max_frame <= E1000_RXBUFFER_2048)
3600                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3601         else
3602 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3603                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3604 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3605                 adapter->rx_buffer_len = PAGE_SIZE;
3606 #endif
3607
3608         /* adjust allocation if LPE protects us, and we aren't using SBP */
3609         if (!hw->tbi_compatibility_on &&
3610             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3611              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3612                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3613
3614         pr_info("%s changing MTU from %d to %d\n",
3615                 netdev->name, netdev->mtu, new_mtu);
3616         netdev->mtu = new_mtu;
3617
3618         if (netif_running(netdev))
3619                 e1000_up(adapter);
3620         else
3621                 e1000_reset(adapter);
3622
3623         clear_bit(__E1000_RESETTING, &adapter->flags);
3624
3625         return 0;
3626 }
3627
3628 /**
3629  * e1000_update_stats - Update the board statistics counters
3630  * @adapter: board private structure
3631  **/
3632 void e1000_update_stats(struct e1000_adapter *adapter)
3633 {
3634         struct net_device *netdev = adapter->netdev;
3635         struct e1000_hw *hw = &adapter->hw;
3636         struct pci_dev *pdev = adapter->pdev;
3637         unsigned long flags;
3638         u16 phy_tmp;
3639
3640 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3641
3642         /* Prevent stats update while adapter is being reset, or if the pci
3643          * connection is down.
3644          */
3645         if (adapter->link_speed == 0)
3646                 return;
3647         if (pci_channel_offline(pdev))
3648                 return;
3649
3650         spin_lock_irqsave(&adapter->stats_lock, flags);
3651
3652         /* these counters are modified from e1000_tbi_adjust_stats,
3653          * called from the interrupt context, so they must only
3654          * be written while holding adapter->stats_lock
3655          */
3656
3657         adapter->stats.crcerrs += er32(CRCERRS);
3658         adapter->stats.gprc += er32(GPRC);
3659         adapter->stats.gorcl += er32(GORCL);
3660         adapter->stats.gorch += er32(GORCH);
3661         adapter->stats.bprc += er32(BPRC);
3662         adapter->stats.mprc += er32(MPRC);
3663         adapter->stats.roc += er32(ROC);
3664
3665         adapter->stats.prc64 += er32(PRC64);
3666         adapter->stats.prc127 += er32(PRC127);
3667         adapter->stats.prc255 += er32(PRC255);
3668         adapter->stats.prc511 += er32(PRC511);
3669         adapter->stats.prc1023 += er32(PRC1023);
3670         adapter->stats.prc1522 += er32(PRC1522);
3671
3672         adapter->stats.symerrs += er32(SYMERRS);
3673         adapter->stats.mpc += er32(MPC);
3674         adapter->stats.scc += er32(SCC);
3675         adapter->stats.ecol += er32(ECOL);
3676         adapter->stats.mcc += er32(MCC);
3677         adapter->stats.latecol += er32(LATECOL);
3678         adapter->stats.dc += er32(DC);
3679         adapter->stats.sec += er32(SEC);
3680         adapter->stats.rlec += er32(RLEC);
3681         adapter->stats.xonrxc += er32(XONRXC);
3682         adapter->stats.xontxc += er32(XONTXC);
3683         adapter->stats.xoffrxc += er32(XOFFRXC);
3684         adapter->stats.xofftxc += er32(XOFFTXC);
3685         adapter->stats.fcruc += er32(FCRUC);
3686         adapter->stats.gptc += er32(GPTC);
3687         adapter->stats.gotcl += er32(GOTCL);
3688         adapter->stats.gotch += er32(GOTCH);
3689         adapter->stats.rnbc += er32(RNBC);
3690         adapter->stats.ruc += er32(RUC);
3691         adapter->stats.rfc += er32(RFC);
3692         adapter->stats.rjc += er32(RJC);
3693         adapter->stats.torl += er32(TORL);
3694         adapter->stats.torh += er32(TORH);
3695         adapter->stats.totl += er32(TOTL);
3696         adapter->stats.toth += er32(TOTH);
3697         adapter->stats.tpr += er32(TPR);
3698
3699         adapter->stats.ptc64 += er32(PTC64);
3700         adapter->stats.ptc127 += er32(PTC127);
3701         adapter->stats.ptc255 += er32(PTC255);
3702         adapter->stats.ptc511 += er32(PTC511);
3703         adapter->stats.ptc1023 += er32(PTC1023);
3704         adapter->stats.ptc1522 += er32(PTC1522);
3705
3706         adapter->stats.mptc += er32(MPTC);
3707         adapter->stats.bptc += er32(BPTC);
3708
3709         /* used for adaptive IFS */
3710
3711         hw->tx_packet_delta = er32(TPT);
3712         adapter->stats.tpt += hw->tx_packet_delta;
3713         hw->collision_delta = er32(COLC);
3714         adapter->stats.colc += hw->collision_delta;
3715
3716         if (hw->mac_type >= e1000_82543) {
3717                 adapter->stats.algnerrc += er32(ALGNERRC);
3718                 adapter->stats.rxerrc += er32(RXERRC);
3719                 adapter->stats.tncrs += er32(TNCRS);
3720                 adapter->stats.cexterr += er32(CEXTERR);
3721                 adapter->stats.tsctc += er32(TSCTC);
3722                 adapter->stats.tsctfc += er32(TSCTFC);
3723         }
3724
3725         /* Fill out the OS statistics structure */
3726         netdev->stats.multicast = adapter->stats.mprc;
3727         netdev->stats.collisions = adapter->stats.colc;
3728
3729         /* Rx Errors */
3730
3731         /* RLEC on some newer hardware can be incorrect so build
3732          * our own version based on RUC and ROC
3733          */
3734         netdev->stats.rx_errors = adapter->stats.rxerrc +
3735                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3736                 adapter->stats.ruc + adapter->stats.roc +
3737                 adapter->stats.cexterr;
3738         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3739         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3740         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3741         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3742         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3743
3744         /* Tx Errors */
3745         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3746         netdev->stats.tx_errors = adapter->stats.txerrc;
3747         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3748         netdev->stats.tx_window_errors = adapter->stats.latecol;
3749         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3750         if (hw->bad_tx_carr_stats_fd &&
3751             adapter->link_duplex == FULL_DUPLEX) {
3752                 netdev->stats.tx_carrier_errors = 0;
3753                 adapter->stats.tncrs = 0;
3754         }
3755
3756         /* Tx Dropped needs to be maintained elsewhere */
3757
3758         /* Phy Stats */
3759         if (hw->media_type == e1000_media_type_copper) {
3760                 if ((adapter->link_speed == SPEED_1000) &&
3761                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3762                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3763                         adapter->phy_stats.idle_errors += phy_tmp;
3764                 }
3765
3766                 if ((hw->mac_type <= e1000_82546) &&
3767                    (hw->phy_type == e1000_phy_m88) &&
3768                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3769                         adapter->phy_stats.receive_errors += phy_tmp;
3770         }
3771
3772         /* Management Stats */
3773         if (hw->has_smbus) {
3774                 adapter->stats.mgptc += er32(MGTPTC);
3775                 adapter->stats.mgprc += er32(MGTPRC);
3776                 adapter->stats.mgpdc += er32(MGTPDC);
3777         }
3778
3779         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3780 }
3781
3782 /**
3783  * e1000_intr - Interrupt Handler
3784  * @irq: interrupt number
3785  * @data: pointer to a network interface device structure
3786  **/
3787 static irqreturn_t e1000_intr(int irq, void *data)
3788 {
3789         struct net_device *netdev = data;
3790         struct e1000_adapter *adapter = netdev_priv(netdev);
3791         struct e1000_hw *hw = &adapter->hw;
3792         u32 icr = er32(ICR);
3793
3794         if (unlikely((!icr)))
3795                 return IRQ_NONE;  /* Not our interrupt */
3796
3797         /* we might have caused the interrupt, but the above
3798          * read cleared it, and just in case the driver is
3799          * down there is nothing to do so return handled
3800          */
3801         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3802                 return IRQ_HANDLED;
3803
3804         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3805                 hw->get_link_status = 1;
3806                 /* guard against interrupt when we're going down */
3807                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3808                         schedule_delayed_work(&adapter->watchdog_task, 1);
3809         }
3810
3811         /* disable interrupts, without the synchronize_irq bit */
3812         ew32(IMC, ~0);
3813         E1000_WRITE_FLUSH();
3814
3815         if (likely(napi_schedule_prep(&adapter->napi))) {
3816                 adapter->total_tx_bytes = 0;
3817                 adapter->total_tx_packets = 0;
3818                 adapter->total_rx_bytes = 0;
3819                 adapter->total_rx_packets = 0;
3820                 __napi_schedule(&adapter->napi);
3821         } else {
3822                 /* this really should not happen! if it does it is basically a
3823                  * bug, but not a hard error, so enable ints and continue
3824                  */
3825                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3826                         e1000_irq_enable(adapter);
3827         }
3828
3829         return IRQ_HANDLED;
3830 }
3831
3832 /**
3833  * e1000_clean - NAPI Rx polling callback
3834  * @adapter: board private structure
3835  **/
3836 static int e1000_clean(struct napi_struct *napi, int budget)
3837 {
3838         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3839                                                      napi);
3840         int tx_clean_complete = 0, work_done = 0;
3841
3842         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3843
3844         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3845
3846         if (!tx_clean_complete)
3847                 work_done = budget;
3848
3849         /* If budget not fully consumed, exit the polling mode */
3850         if (work_done < budget) {
3851                 if (likely(adapter->itr_setting & 3))
3852                         e1000_set_itr(adapter);
3853                 napi_complete_done(napi, work_done);
3854                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3855                         e1000_irq_enable(adapter);
3856         }
3857
3858         return work_done;
3859 }
3860
3861 /**
3862  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3863  * @adapter: board private structure
3864  **/
3865 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3866                                struct e1000_tx_ring *tx_ring)
3867 {
3868         struct e1000_hw *hw = &adapter->hw;
3869         struct net_device *netdev = adapter->netdev;
3870         struct e1000_tx_desc *tx_desc, *eop_desc;
3871         struct e1000_tx_buffer *buffer_info;
3872         unsigned int i, eop;
3873         unsigned int count = 0;
3874         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3875         unsigned int bytes_compl = 0, pkts_compl = 0;
3876
3877         i = tx_ring->next_to_clean;
3878         eop = tx_ring->buffer_info[i].next_to_watch;
3879         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3880
3881         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3882                (count < tx_ring->count)) {
3883                 bool cleaned = false;
3884                 dma_rmb();      /* read buffer_info after eop_desc */
3885                 for ( ; !cleaned; count++) {
3886                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3887                         buffer_info = &tx_ring->buffer_info[i];
3888                         cleaned = (i == eop);
3889
3890                         if (cleaned) {
3891                                 total_tx_packets += buffer_info->segs;
3892                                 total_tx_bytes += buffer_info->bytecount;
3893                                 if (buffer_info->skb) {
3894                                         bytes_compl += buffer_info->skb->len;
3895                                         pkts_compl++;
3896                                 }
3897
3898                         }
3899                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3900                         tx_desc->upper.data = 0;
3901
3902                         if (unlikely(++i == tx_ring->count))
3903                                 i = 0;
3904                 }
3905
3906                 eop = tx_ring->buffer_info[i].next_to_watch;
3907                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3908         }
3909
3910         /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3911          * which will reuse the cleaned buffers.
3912          */
3913         smp_store_release(&tx_ring->next_to_clean, i);
3914
3915         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3916
3917 #define TX_WAKE_THRESHOLD 32
3918         if (unlikely(count && netif_carrier_ok(netdev) &&
3919                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3920                 /* Make sure that anybody stopping the queue after this
3921                  * sees the new next_to_clean.
3922                  */
3923                 smp_mb();
3924
3925                 if (netif_queue_stopped(netdev) &&
3926                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3927                         netif_wake_queue(netdev);
3928                         ++adapter->restart_queue;
3929                 }
3930         }
3931
3932         if (adapter->detect_tx_hung) {
3933                 /* Detect a transmit hang in hardware, this serializes the
3934                  * check with the clearing of time_stamp and movement of i
3935                  */
3936                 adapter->detect_tx_hung = false;
3937                 if (tx_ring->buffer_info[eop].time_stamp &&
3938                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3939                                (adapter->tx_timeout_factor * HZ)) &&
3940                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3941
3942                         /* detected Tx unit hang */
3943                         e_err(drv, "Detected Tx Unit Hang\n"
3944                               "  Tx Queue             <%lu>\n"
3945                               "  TDH                  <%x>\n"
3946                               "  TDT                  <%x>\n"
3947                               "  next_to_use          <%x>\n"
3948                               "  next_to_clean        <%x>\n"
3949                               "buffer_info[next_to_clean]\n"
3950                               "  time_stamp           <%lx>\n"
3951                               "  next_to_watch        <%x>\n"
3952                               "  jiffies              <%lx>\n"
3953                               "  next_to_watch.status <%x>\n",
3954                                 (unsigned long)(tx_ring - adapter->tx_ring),
3955                                 readl(hw->hw_addr + tx_ring->tdh),
3956                                 readl(hw->hw_addr + tx_ring->tdt),
3957                                 tx_ring->next_to_use,
3958                                 tx_ring->next_to_clean,
3959                                 tx_ring->buffer_info[eop].time_stamp,
3960                                 eop,
3961                                 jiffies,
3962                                 eop_desc->upper.fields.status);
3963                         e1000_dump(adapter);
3964                         netif_stop_queue(netdev);
3965                 }
3966         }
3967         adapter->total_tx_bytes += total_tx_bytes;
3968         adapter->total_tx_packets += total_tx_packets;
3969         netdev->stats.tx_bytes += total_tx_bytes;
3970         netdev->stats.tx_packets += total_tx_packets;
3971         return count < tx_ring->count;
3972 }
3973
3974 /**
3975  * e1000_rx_checksum - Receive Checksum Offload for 82543
3976  * @adapter:     board private structure
3977  * @status_err:  receive descriptor status and error fields
3978  * @csum:        receive descriptor csum field
3979  * @sk_buff:     socket buffer with received data
3980  **/
3981 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3982                               u32 csum, struct sk_buff *skb)
3983 {
3984         struct e1000_hw *hw = &adapter->hw;
3985         u16 status = (u16)status_err;
3986         u8 errors = (u8)(status_err >> 24);
3987
3988         skb_checksum_none_assert(skb);
3989
3990         /* 82543 or newer only */
3991         if (unlikely(hw->mac_type < e1000_82543))
3992                 return;
3993         /* Ignore Checksum bit is set */
3994         if (unlikely(status & E1000_RXD_STAT_IXSM))
3995                 return;
3996         /* TCP/UDP checksum error bit is set */
3997         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3998                 /* let the stack verify checksum errors */
3999                 adapter->hw_csum_err++;
4000                 return;
4001         }
4002         /* TCP/UDP Checksum has not been calculated */
4003         if (!(status & E1000_RXD_STAT_TCPCS))
4004                 return;
4005
4006         /* It must be a TCP or UDP packet with a valid checksum */
4007         if (likely(status & E1000_RXD_STAT_TCPCS)) {
4008                 /* TCP checksum is good */
4009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4010         }
4011         adapter->hw_csum_good++;
4012 }
4013
4014 /**
4015  * e1000_consume_page - helper function for jumbo Rx path
4016  **/
4017 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4018                                u16 length)
4019 {
4020         bi->rxbuf.page = NULL;
4021         skb->len += length;
4022         skb->data_len += length;
4023         skb->truesize += PAGE_SIZE;
4024 }
4025
4026 /**
4027  * e1000_receive_skb - helper function to handle rx indications
4028  * @adapter: board private structure
4029  * @status: descriptor status field as written by hardware
4030  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4031  * @skb: pointer to sk_buff to be indicated to stack
4032  */
4033 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4034                               __le16 vlan, struct sk_buff *skb)
4035 {
4036         skb->protocol = eth_type_trans(skb, adapter->netdev);
4037
4038         if (status & E1000_RXD_STAT_VP) {
4039                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4040
4041                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4042         }
4043         napi_gro_receive(&adapter->napi, skb);
4044 }
4045
4046 /**
4047  * e1000_tbi_adjust_stats
4048  * @hw: Struct containing variables accessed by shared code
4049  * @frame_len: The length of the frame in question
4050  * @mac_addr: The Ethernet destination address of the frame in question
4051  *
4052  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4053  */
4054 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4055                                    struct e1000_hw_stats *stats,
4056                                    u32 frame_len, const u8 *mac_addr)
4057 {
4058         u64 carry_bit;
4059
4060         /* First adjust the frame length. */
4061         frame_len--;
4062         /* We need to adjust the statistics counters, since the hardware
4063          * counters overcount this packet as a CRC error and undercount
4064          * the packet as a good packet
4065          */
4066         /* This packet should not be counted as a CRC error. */
4067         stats->crcerrs--;
4068         /* This packet does count as a Good Packet Received. */
4069         stats->gprc++;
4070
4071         /* Adjust the Good Octets received counters */
4072         carry_bit = 0x80000000 & stats->gorcl;
4073         stats->gorcl += frame_len;
4074         /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4075          * Received Count) was one before the addition,
4076          * AND it is zero after, then we lost the carry out,
4077          * need to add one to Gorch (Good Octets Received Count High).
4078          * This could be simplified if all environments supported
4079          * 64-bit integers.
4080          */
4081         if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4082                 stats->gorch++;
4083         /* Is this a broadcast or multicast?  Check broadcast first,
4084          * since the test for a multicast frame will test positive on
4085          * a broadcast frame.
4086          */
4087         if (is_broadcast_ether_addr(mac_addr))
4088                 stats->bprc++;
4089         else if (is_multicast_ether_addr(mac_addr))
4090                 stats->mprc++;
4091
4092         if (frame_len == hw->max_frame_size) {
4093                 /* In this case, the hardware has overcounted the number of
4094                  * oversize frames.
4095                  */
4096                 if (stats->roc > 0)
4097                         stats->roc--;
4098         }
4099
4100         /* Adjust the bin counters when the extra byte put the frame in the
4101          * wrong bin. Remember that the frame_len was adjusted above.
4102          */
4103         if (frame_len == 64) {
4104                 stats->prc64++;
4105                 stats->prc127--;
4106         } else if (frame_len == 127) {
4107                 stats->prc127++;
4108                 stats->prc255--;
4109         } else if (frame_len == 255) {
4110                 stats->prc255++;
4111                 stats->prc511--;
4112         } else if (frame_len == 511) {
4113                 stats->prc511++;
4114                 stats->prc1023--;
4115         } else if (frame_len == 1023) {
4116                 stats->prc1023++;
4117                 stats->prc1522--;
4118         } else if (frame_len == 1522) {
4119                 stats->prc1522++;
4120         }
4121 }
4122
4123 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4124                                     u8 status, u8 errors,
4125                                     u32 length, const u8 *data)
4126 {
4127         struct e1000_hw *hw = &adapter->hw;
4128         u8 last_byte = *(data + length - 1);
4129
4130         if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4131                 unsigned long irq_flags;
4132
4133                 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4134                 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4135                 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4136
4137                 return true;
4138         }
4139
4140         return false;
4141 }
4142
4143 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4144                                           unsigned int bufsz)
4145 {
4146         struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4147
4148         if (unlikely(!skb))
4149                 adapter->alloc_rx_buff_failed++;
4150         return skb;
4151 }
4152
4153 /**
4154  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4155  * @adapter: board private structure
4156  * @rx_ring: ring to clean
4157  * @work_done: amount of napi work completed this call
4158  * @work_to_do: max amount of work allowed for this call to do
4159  *
4160  * the return value indicates whether actual cleaning was done, there
4161  * is no guarantee that everything was cleaned
4162  */
4163 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4164                                      struct e1000_rx_ring *rx_ring,
4165                                      int *work_done, int work_to_do)
4166 {
4167         struct net_device *netdev = adapter->netdev;
4168         struct pci_dev *pdev = adapter->pdev;
4169         struct e1000_rx_desc *rx_desc, *next_rxd;
4170         struct e1000_rx_buffer *buffer_info, *next_buffer;
4171         u32 length;
4172         unsigned int i;
4173         int cleaned_count = 0;
4174         bool cleaned = false;
4175         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4176
4177         i = rx_ring->next_to_clean;
4178         rx_desc = E1000_RX_DESC(*rx_ring, i);
4179         buffer_info = &rx_ring->buffer_info[i];
4180
4181         while (rx_desc->status & E1000_RXD_STAT_DD) {
4182                 struct sk_buff *skb;
4183                 u8 status;
4184
4185                 if (*work_done >= work_to_do)
4186                         break;
4187                 (*work_done)++;
4188                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4189
4190                 status = rx_desc->status;
4191
4192                 if (++i == rx_ring->count)
4193                         i = 0;
4194
4195                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4196                 prefetch(next_rxd);
4197
4198                 next_buffer = &rx_ring->buffer_info[i];
4199
4200                 cleaned = true;
4201                 cleaned_count++;
4202                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4203                                adapter->rx_buffer_len, DMA_FROM_DEVICE);
4204                 buffer_info->dma = 0;
4205
4206                 length = le16_to_cpu(rx_desc->length);
4207
4208                 /* errors is only valid for DD + EOP descriptors */
4209                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4210                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4211                         u8 *mapped = page_address(buffer_info->rxbuf.page);
4212
4213                         if (e1000_tbi_should_accept(adapter, status,
4214                                                     rx_desc->errors,
4215                                                     length, mapped)) {
4216                                 length--;
4217                         } else if (netdev->features & NETIF_F_RXALL) {
4218                                 goto process_skb;
4219                         } else {
4220                                 /* an error means any chain goes out the window
4221                                  * too
4222                                  */
4223                                 if (rx_ring->rx_skb_top)
4224                                         dev_kfree_skb(rx_ring->rx_skb_top);
4225                                 rx_ring->rx_skb_top = NULL;
4226                                 goto next_desc;
4227                         }
4228                 }
4229
4230 #define rxtop rx_ring->rx_skb_top
4231 process_skb:
4232                 if (!(status & E1000_RXD_STAT_EOP)) {
4233                         /* this descriptor is only the beginning (or middle) */
4234                         if (!rxtop) {
4235                                 /* this is the beginning of a chain */
4236                                 rxtop = napi_get_frags(&adapter->napi);
4237                                 if (!rxtop)
4238                                         break;
4239
4240                                 skb_fill_page_desc(rxtop, 0,
4241                                                    buffer_info->rxbuf.page,
4242                                                    0, length);
4243                         } else {
4244                                 /* this is the middle of a chain */
4245                                 skb_fill_page_desc(rxtop,
4246                                     skb_shinfo(rxtop)->nr_frags,
4247                                     buffer_info->rxbuf.page, 0, length);
4248                         }
4249                         e1000_consume_page(buffer_info, rxtop, length);
4250                         goto next_desc;
4251                 } else {
4252                         if (rxtop) {
4253                                 /* end of the chain */
4254                                 skb_fill_page_desc(rxtop,
4255                                     skb_shinfo(rxtop)->nr_frags,
4256                                     buffer_info->rxbuf.page, 0, length);
4257                                 skb = rxtop;
4258                                 rxtop = NULL;
4259                                 e1000_consume_page(buffer_info, skb, length);
4260                         } else {
4261                                 struct page *p;
4262                                 /* no chain, got EOP, this buf is the packet
4263                                  * copybreak to save the put_page/alloc_page
4264                                  */
4265                                 p = buffer_info->rxbuf.page;
4266                                 if (length <= copybreak) {
4267                                         u8 *vaddr;
4268
4269                                         if (likely(!(netdev->features & NETIF_F_RXFCS)))
4270                                                 length -= 4;
4271                                         skb = e1000_alloc_rx_skb(adapter,
4272                                                                  length);
4273                                         if (!skb)
4274                                                 break;
4275
4276                                         vaddr = kmap_atomic(p);
4277                                         memcpy(skb_tail_pointer(skb), vaddr,
4278                                                length);
4279                                         kunmap_atomic(vaddr);
4280                                         /* re-use the page, so don't erase
4281                                          * buffer_info->rxbuf.page
4282                                          */
4283                                         skb_put(skb, length);
4284                                         e1000_rx_checksum(adapter,
4285                                                           status | rx_desc->errors << 24,
4286                                                           le16_to_cpu(rx_desc->csum), skb);
4287
4288                                         total_rx_bytes += skb->len;
4289                                         total_rx_packets++;
4290
4291                                         e1000_receive_skb(adapter, status,
4292                                                           rx_desc->special, skb);
4293                                         goto next_desc;
4294                                 } else {
4295                                         skb = napi_get_frags(&adapter->napi);
4296                                         if (!skb) {
4297                                                 adapter->alloc_rx_buff_failed++;
4298                                                 break;
4299                                         }
4300                                         skb_fill_page_desc(skb, 0, p, 0,
4301                                                            length);
4302                                         e1000_consume_page(buffer_info, skb,
4303                                                            length);
4304                                 }
4305                         }
4306                 }
4307
4308                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4309                 e1000_rx_checksum(adapter,
4310                                   (u32)(status) |
4311                                   ((u32)(rx_desc->errors) << 24),
4312                                   le16_to_cpu(rx_desc->csum), skb);
4313
4314                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4315                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4316                         pskb_trim(skb, skb->len - 4);
4317                 total_rx_packets++;
4318
4319                 if (status & E1000_RXD_STAT_VP) {
4320                         __le16 vlan = rx_desc->special;
4321                         u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4322
4323                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4324                 }
4325
4326                 napi_gro_frags(&adapter->napi);
4327
4328 next_desc:
4329                 rx_desc->status = 0;
4330
4331                 /* return some buffers to hardware, one at a time is too slow */
4332                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4333                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4334                         cleaned_count = 0;
4335                 }
4336
4337                 /* use prefetched values */
4338                 rx_desc = next_rxd;
4339                 buffer_info = next_buffer;
4340         }
4341         rx_ring->next_to_clean = i;
4342
4343         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4344         if (cleaned_count)
4345                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4346
4347         adapter->total_rx_packets += total_rx_packets;
4348         adapter->total_rx_bytes += total_rx_bytes;
4349         netdev->stats.rx_bytes += total_rx_bytes;
4350         netdev->stats.rx_packets += total_rx_packets;
4351         return cleaned;
4352 }
4353
4354 /* this should improve performance for small packets with large amounts
4355  * of reassembly being done in the stack
4356  */
4357 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4358                                        struct e1000_rx_buffer *buffer_info,
4359                                        u32 length, const void *data)
4360 {
4361         struct sk_buff *skb;
4362
4363         if (length > copybreak)
4364                 return NULL;
4365
4366         skb = e1000_alloc_rx_skb(adapter, length);
4367         if (!skb)
4368                 return NULL;
4369
4370         dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4371                                 length, DMA_FROM_DEVICE);
4372
4373         skb_put_data(skb, data, length);
4374
4375         return skb;
4376 }
4377
4378 /**
4379  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4380  * @adapter: board private structure
4381  * @rx_ring: ring to clean
4382  * @work_done: amount of napi work completed this call
4383  * @work_to_do: max amount of work allowed for this call to do
4384  */
4385 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4386                                struct e1000_rx_ring *rx_ring,
4387                                int *work_done, int work_to_do)
4388 {
4389         struct net_device *netdev = adapter->netdev;
4390         struct pci_dev *pdev = adapter->pdev;
4391         struct e1000_rx_desc *rx_desc, *next_rxd;
4392         struct e1000_rx_buffer *buffer_info, *next_buffer;
4393         u32 length;
4394         unsigned int i;
4395         int cleaned_count = 0;
4396         bool cleaned = false;
4397         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4398
4399         i = rx_ring->next_to_clean;
4400         rx_desc = E1000_RX_DESC(*rx_ring, i);
4401         buffer_info = &rx_ring->buffer_info[i];
4402
4403         while (rx_desc->status & E1000_RXD_STAT_DD) {
4404                 struct sk_buff *skb;
4405                 u8 *data;
4406                 u8 status;
4407
4408                 if (*work_done >= work_to_do)
4409                         break;
4410                 (*work_done)++;
4411                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4412
4413                 status = rx_desc->status;
4414                 length = le16_to_cpu(rx_desc->length);
4415
4416                 data = buffer_info->rxbuf.data;
4417                 prefetch(data);
4418                 skb = e1000_copybreak(adapter, buffer_info, length, data);
4419                 if (!skb) {
4420                         unsigned int frag_len = e1000_frag_len(adapter);
4421
4422                         skb = build_skb(data - E1000_HEADROOM, frag_len);
4423                         if (!skb) {
4424                                 adapter->alloc_rx_buff_failed++;
4425                                 break;
4426                         }
4427
4428                         skb_reserve(skb, E1000_HEADROOM);
4429                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4430                                          adapter->rx_buffer_len,
4431                                          DMA_FROM_DEVICE);
4432                         buffer_info->dma = 0;
4433                         buffer_info->rxbuf.data = NULL;
4434                 }
4435
4436                 if (++i == rx_ring->count)
4437                         i = 0;
4438
4439                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4440                 prefetch(next_rxd);
4441
4442                 next_buffer = &rx_ring->buffer_info[i];
4443
4444                 cleaned = true;
4445                 cleaned_count++;
4446
4447                 /* !EOP means multiple descriptors were used to store a single
4448                  * packet, if thats the case we need to toss it.  In fact, we
4449                  * to toss every packet with the EOP bit clear and the next
4450                  * frame that _does_ have the EOP bit set, as it is by
4451                  * definition only a frame fragment
4452                  */
4453                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4454                         adapter->discarding = true;
4455
4456                 if (adapter->discarding) {
4457                         /* All receives must fit into a single buffer */
4458                         netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4459                         dev_kfree_skb(skb);
4460                         if (status & E1000_RXD_STAT_EOP)
4461                                 adapter->discarding = false;
4462                         goto next_desc;
4463                 }
4464
4465                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4466                         if (e1000_tbi_should_accept(adapter, status,
4467                                                     rx_desc->errors,
4468                                                     length, data)) {
4469                                 length--;
4470                         } else if (netdev->features & NETIF_F_RXALL) {
4471                                 goto process_skb;
4472                         } else {
4473                                 dev_kfree_skb(skb);
4474                                 goto next_desc;
4475                         }
4476                 }
4477
4478 process_skb:
4479                 total_rx_bytes += (length - 4); /* don't count FCS */
4480                 total_rx_packets++;
4481
4482                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4483                         /* adjust length to remove Ethernet CRC, this must be
4484                          * done after the TBI_ACCEPT workaround above
4485                          */
4486                         length -= 4;
4487
4488                 if (buffer_info->rxbuf.data == NULL)
4489                         skb_put(skb, length);
4490                 else /* copybreak skb */
4491                         skb_trim(skb, length);
4492
4493                 /* Receive Checksum Offload */
4494                 e1000_rx_checksum(adapter,
4495                                   (u32)(status) |
4496                                   ((u32)(rx_desc->errors) << 24),
4497                                   le16_to_cpu(rx_desc->csum), skb);
4498
4499                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4500
4501 next_desc:
4502                 rx_desc->status = 0;
4503
4504                 /* return some buffers to hardware, one at a time is too slow */
4505                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4506                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4507                         cleaned_count = 0;
4508                 }
4509
4510                 /* use prefetched values */
4511                 rx_desc = next_rxd;
4512                 buffer_info = next_buffer;
4513         }
4514         rx_ring->next_to_clean = i;
4515
4516         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4517         if (cleaned_count)
4518                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4519
4520         adapter->total_rx_packets += total_rx_packets;
4521         adapter->total_rx_bytes += total_rx_bytes;
4522         netdev->stats.rx_bytes += total_rx_bytes;
4523         netdev->stats.rx_packets += total_rx_packets;
4524         return cleaned;
4525 }
4526
4527 /**
4528  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4529  * @adapter: address of board private structure
4530  * @rx_ring: pointer to receive ring structure
4531  * @cleaned_count: number of buffers to allocate this pass
4532  **/
4533 static void
4534 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4535                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4536 {
4537         struct pci_dev *pdev = adapter->pdev;
4538         struct e1000_rx_desc *rx_desc;
4539         struct e1000_rx_buffer *buffer_info;
4540         unsigned int i;
4541
4542         i = rx_ring->next_to_use;
4543         buffer_info = &rx_ring->buffer_info[i];
4544
4545         while (cleaned_count--) {
4546                 /* allocate a new page if necessary */
4547                 if (!buffer_info->rxbuf.page) {
4548                         buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4549                         if (unlikely(!buffer_info->rxbuf.page)) {
4550                                 adapter->alloc_rx_buff_failed++;
4551                                 break;
4552                         }
4553                 }
4554
4555                 if (!buffer_info->dma) {
4556                         buffer_info->dma = dma_map_page(&pdev->dev,
4557                                                         buffer_info->rxbuf.page, 0,
4558                                                         adapter->rx_buffer_len,
4559                                                         DMA_FROM_DEVICE);
4560                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4561                                 put_page(buffer_info->rxbuf.page);
4562                                 buffer_info->rxbuf.page = NULL;
4563                                 buffer_info->dma = 0;
4564                                 adapter->alloc_rx_buff_failed++;
4565                                 break;
4566                         }
4567                 }
4568
4569                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4570                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4571
4572                 if (unlikely(++i == rx_ring->count))
4573                         i = 0;
4574                 buffer_info = &rx_ring->buffer_info[i];
4575         }
4576
4577         if (likely(rx_ring->next_to_use != i)) {
4578                 rx_ring->next_to_use = i;
4579                 if (unlikely(i-- == 0))
4580                         i = (rx_ring->count - 1);
4581
4582                 /* Force memory writes to complete before letting h/w
4583                  * know there are new descriptors to fetch.  (Only
4584                  * applicable for weak-ordered memory model archs,
4585                  * such as IA-64).
4586                  */
4587                 wmb();
4588                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4589         }
4590 }
4591
4592 /**
4593  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4594  * @adapter: address of board private structure
4595  **/
4596 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4597                                    struct e1000_rx_ring *rx_ring,
4598                                    int cleaned_count)
4599 {
4600         struct e1000_hw *hw = &adapter->hw;
4601         struct pci_dev *pdev = adapter->pdev;
4602         struct e1000_rx_desc *rx_desc;
4603         struct e1000_rx_buffer *buffer_info;
4604         unsigned int i;
4605         unsigned int bufsz = adapter->rx_buffer_len;
4606
4607         i = rx_ring->next_to_use;
4608         buffer_info = &rx_ring->buffer_info[i];
4609
4610         while (cleaned_count--) {
4611                 void *data;
4612
4613                 if (buffer_info->rxbuf.data)
4614                         goto skip;
4615
4616                 data = e1000_alloc_frag(adapter);
4617                 if (!data) {
4618                         /* Better luck next round */
4619                         adapter->alloc_rx_buff_failed++;
4620                         break;
4621                 }
4622
4623                 /* Fix for errata 23, can't cross 64kB boundary */
4624                 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4625                         void *olddata = data;
4626                         e_err(rx_err, "skb align check failed: %u bytes at "
4627                               "%p\n", bufsz, data);
4628                         /* Try again, without freeing the previous */
4629                         data = e1000_alloc_frag(adapter);
4630                         /* Failed allocation, critical failure */
4631                         if (!data) {
4632                                 skb_free_frag(olddata);
4633                                 adapter->alloc_rx_buff_failed++;
4634                                 break;
4635                         }
4636
4637                         if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4638                                 /* give up */
4639                                 skb_free_frag(data);
4640                                 skb_free_frag(olddata);
4641                                 adapter->alloc_rx_buff_failed++;
4642                                 break;
4643                         }
4644
4645                         /* Use new allocation */
4646                         skb_free_frag(olddata);
4647                 }
4648                 buffer_info->dma = dma_map_single(&pdev->dev,
4649                                                   data,
4650                                                   adapter->rx_buffer_len,
4651                                                   DMA_FROM_DEVICE);
4652                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4653                         skb_free_frag(data);
4654                         buffer_info->dma = 0;
4655                         adapter->alloc_rx_buff_failed++;
4656                         break;
4657                 }
4658
4659                 /* XXX if it was allocated cleanly it will never map to a
4660                  * boundary crossing
4661                  */
4662
4663                 /* Fix for errata 23, can't cross 64kB boundary */
4664                 if (!e1000_check_64k_bound(adapter,
4665                                         (void *)(unsigned long)buffer_info->dma,
4666                                         adapter->rx_buffer_len)) {
4667                         e_err(rx_err, "dma align check failed: %u bytes at "
4668                               "%p\n", adapter->rx_buffer_len,
4669                               (void *)(unsigned long)buffer_info->dma);
4670
4671                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4672                                          adapter->rx_buffer_len,
4673                                          DMA_FROM_DEVICE);
4674
4675                         skb_free_frag(data);
4676                         buffer_info->rxbuf.data = NULL;
4677                         buffer_info->dma = 0;
4678
4679                         adapter->alloc_rx_buff_failed++;
4680                         break;
4681                 }
4682                 buffer_info->rxbuf.data = data;
4683  skip:
4684                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4685                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4686
4687                 if (unlikely(++i == rx_ring->count))
4688                         i = 0;
4689                 buffer_info = &rx_ring->buffer_info[i];
4690         }
4691
4692         if (likely(rx_ring->next_to_use != i)) {
4693                 rx_ring->next_to_use = i;
4694                 if (unlikely(i-- == 0))
4695                         i = (rx_ring->count - 1);
4696
4697                 /* Force memory writes to complete before letting h/w
4698                  * know there are new descriptors to fetch.  (Only
4699                  * applicable for weak-ordered memory model archs,
4700                  * such as IA-64).
4701                  */
4702                 wmb();
4703                 writel(i, hw->hw_addr + rx_ring->rdt);
4704         }
4705 }
4706
4707 /**
4708  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4709  * @adapter:
4710  **/
4711 static void e1000_smartspeed(struct e1000_adapter *adapter)
4712 {
4713         struct e1000_hw *hw = &adapter->hw;
4714         u16 phy_status;
4715         u16 phy_ctrl;
4716
4717         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4718            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4719                 return;
4720
4721         if (adapter->smartspeed == 0) {
4722                 /* If Master/Slave config fault is asserted twice,
4723                  * we assume back-to-back
4724                  */
4725                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4726                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4727                         return;
4728                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4729                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4730                         return;
4731                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4732                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4733                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4734                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4735                                             phy_ctrl);
4736                         adapter->smartspeed++;
4737                         if (!e1000_phy_setup_autoneg(hw) &&
4738                            !e1000_read_phy_reg(hw, PHY_CTRL,
4739                                                &phy_ctrl)) {
4740                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4741                                              MII_CR_RESTART_AUTO_NEG);
4742                                 e1000_write_phy_reg(hw, PHY_CTRL,
4743                                                     phy_ctrl);
4744                         }
4745                 }
4746                 return;
4747         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4748                 /* If still no link, perhaps using 2/3 pair cable */
4749                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4750                 phy_ctrl |= CR_1000T_MS_ENABLE;
4751                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4752                 if (!e1000_phy_setup_autoneg(hw) &&
4753                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4754                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4755                                      MII_CR_RESTART_AUTO_NEG);
4756                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4757                 }
4758         }
4759         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4760         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4761                 adapter->smartspeed = 0;
4762 }
4763
4764 /**
4765  * e1000_ioctl -
4766  * @netdev:
4767  * @ifreq:
4768  * @cmd:
4769  **/
4770 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4771 {
4772         switch (cmd) {
4773         case SIOCGMIIPHY:
4774         case SIOCGMIIREG:
4775         case SIOCSMIIREG:
4776                 return e1000_mii_ioctl(netdev, ifr, cmd);
4777         default:
4778                 return -EOPNOTSUPP;
4779         }
4780 }
4781
4782 /**
4783  * e1000_mii_ioctl -
4784  * @netdev:
4785  * @ifreq:
4786  * @cmd:
4787  **/
4788 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4789                            int cmd)
4790 {
4791         struct e1000_adapter *adapter = netdev_priv(netdev);
4792         struct e1000_hw *hw = &adapter->hw;
4793         struct mii_ioctl_data *data = if_mii(ifr);
4794         int retval;
4795         u16 mii_reg;
4796         unsigned long flags;
4797
4798         if (hw->media_type != e1000_media_type_copper)
4799                 return -EOPNOTSUPP;
4800
4801         switch (cmd) {
4802         case SIOCGMIIPHY:
4803                 data->phy_id = hw->phy_addr;
4804                 break;
4805         case SIOCGMIIREG:
4806                 spin_lock_irqsave(&adapter->stats_lock, flags);
4807                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4808                                    &data->val_out)) {
4809                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4810                         return -EIO;
4811                 }
4812                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4813                 break;
4814         case SIOCSMIIREG:
4815                 if (data->reg_num & ~(0x1F))
4816                         return -EFAULT;
4817                 mii_reg = data->val_in;
4818                 spin_lock_irqsave(&adapter->stats_lock, flags);
4819                 if (e1000_write_phy_reg(hw, data->reg_num,
4820                                         mii_reg)) {
4821                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4822                         return -EIO;
4823                 }
4824                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4825                 if (hw->media_type == e1000_media_type_copper) {
4826                         switch (data->reg_num) {
4827                         case PHY_CTRL:
4828                                 if (mii_reg & MII_CR_POWER_DOWN)
4829                                         break;
4830                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4831                                         hw->autoneg = 1;
4832                                         hw->autoneg_advertised = 0x2F;
4833                                 } else {
4834                                         u32 speed;
4835                                         if (mii_reg & 0x40)
4836                                                 speed = SPEED_1000;
4837                                         else if (mii_reg & 0x2000)
4838                                                 speed = SPEED_100;
4839                                         else
4840                                                 speed = SPEED_10;
4841                                         retval = e1000_set_spd_dplx(
4842                                                 adapter, speed,
4843                                                 ((mii_reg & 0x100)
4844                                                  ? DUPLEX_FULL :
4845                                                  DUPLEX_HALF));
4846                                         if (retval)
4847                                                 return retval;
4848                                 }
4849                                 if (netif_running(adapter->netdev))
4850                                         e1000_reinit_locked(adapter);
4851                                 else
4852                                         e1000_reset(adapter);
4853                                 break;
4854                         case M88E1000_PHY_SPEC_CTRL:
4855                         case M88E1000_EXT_PHY_SPEC_CTRL:
4856                                 if (e1000_phy_reset(hw))
4857                                         return -EIO;
4858                                 break;
4859                         }
4860                 } else {
4861                         switch (data->reg_num) {
4862                         case PHY_CTRL:
4863                                 if (mii_reg & MII_CR_POWER_DOWN)
4864                                         break;
4865                                 if (netif_running(adapter->netdev))
4866                                         e1000_reinit_locked(adapter);
4867                                 else
4868                                         e1000_reset(adapter);
4869                                 break;
4870                         }
4871                 }
4872                 break;
4873         default:
4874                 return -EOPNOTSUPP;
4875         }
4876         return E1000_SUCCESS;
4877 }
4878
4879 void e1000_pci_set_mwi(struct e1000_hw *hw)
4880 {
4881         struct e1000_adapter *adapter = hw->back;
4882         int ret_val = pci_set_mwi(adapter->pdev);
4883
4884         if (ret_val)
4885                 e_err(probe, "Error in setting MWI\n");
4886 }
4887
4888 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4889 {
4890         struct e1000_adapter *adapter = hw->back;
4891
4892         pci_clear_mwi(adapter->pdev);
4893 }
4894
4895 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4896 {
4897         struct e1000_adapter *adapter = hw->back;
4898         return pcix_get_mmrbc(adapter->pdev);
4899 }
4900
4901 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4902 {
4903         struct e1000_adapter *adapter = hw->back;
4904         pcix_set_mmrbc(adapter->pdev, mmrbc);
4905 }
4906
4907 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4908 {
4909         outl(value, port);
4910 }
4911
4912 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4913 {
4914         u16 vid;
4915
4916         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4917                 return true;
4918         return false;
4919 }
4920
4921 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4922                               netdev_features_t features)
4923 {
4924         struct e1000_hw *hw = &adapter->hw;
4925         u32 ctrl;
4926
4927         ctrl = er32(CTRL);
4928         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4929                 /* enable VLAN tag insert/strip */
4930                 ctrl |= E1000_CTRL_VME;
4931         } else {
4932                 /* disable VLAN tag insert/strip */
4933                 ctrl &= ~E1000_CTRL_VME;
4934         }
4935         ew32(CTRL, ctrl);
4936 }
4937 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4938                                      bool filter_on)
4939 {
4940         struct e1000_hw *hw = &adapter->hw;
4941         u32 rctl;
4942
4943         if (!test_bit(__E1000_DOWN, &adapter->flags))
4944                 e1000_irq_disable(adapter);
4945
4946         __e1000_vlan_mode(adapter, adapter->netdev->features);
4947         if (filter_on) {
4948                 /* enable VLAN receive filtering */
4949                 rctl = er32(RCTL);
4950                 rctl &= ~E1000_RCTL_CFIEN;
4951                 if (!(adapter->netdev->flags & IFF_PROMISC))
4952                         rctl |= E1000_RCTL_VFE;
4953                 ew32(RCTL, rctl);
4954                 e1000_update_mng_vlan(adapter);
4955         } else {
4956                 /* disable VLAN receive filtering */
4957                 rctl = er32(RCTL);
4958                 rctl &= ~E1000_RCTL_VFE;
4959                 ew32(RCTL, rctl);
4960         }
4961
4962         if (!test_bit(__E1000_DOWN, &adapter->flags))
4963                 e1000_irq_enable(adapter);
4964 }
4965
4966 static void e1000_vlan_mode(struct net_device *netdev,
4967                             netdev_features_t features)
4968 {
4969         struct e1000_adapter *adapter = netdev_priv(netdev);
4970
4971         if (!test_bit(__E1000_DOWN, &adapter->flags))
4972                 e1000_irq_disable(adapter);
4973
4974         __e1000_vlan_mode(adapter, features);
4975
4976         if (!test_bit(__E1000_DOWN, &adapter->flags))
4977                 e1000_irq_enable(adapter);
4978 }
4979
4980 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4981                                  __be16 proto, u16 vid)
4982 {
4983         struct e1000_adapter *adapter = netdev_priv(netdev);
4984         struct e1000_hw *hw = &adapter->hw;
4985         u32 vfta, index;
4986
4987         if ((hw->mng_cookie.status &
4988              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4989             (vid == adapter->mng_vlan_id))
4990                 return 0;
4991
4992         if (!e1000_vlan_used(adapter))
4993                 e1000_vlan_filter_on_off(adapter, true);
4994
4995         /* add VID to filter table */
4996         index = (vid >> 5) & 0x7F;
4997         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4998         vfta |= (1 << (vid & 0x1F));
4999         e1000_write_vfta(hw, index, vfta);
5000
5001         set_bit(vid, adapter->active_vlans);
5002
5003         return 0;
5004 }
5005
5006 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
5007                                   __be16 proto, u16 vid)
5008 {
5009         struct e1000_adapter *adapter = netdev_priv(netdev);
5010         struct e1000_hw *hw = &adapter->hw;
5011         u32 vfta, index;
5012
5013         if (!test_bit(__E1000_DOWN, &adapter->flags))
5014                 e1000_irq_disable(adapter);
5015         if (!test_bit(__E1000_DOWN, &adapter->flags))
5016                 e1000_irq_enable(adapter);
5017
5018         /* remove VID from filter table */
5019         index = (vid >> 5) & 0x7F;
5020         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5021         vfta &= ~(1 << (vid & 0x1F));
5022         e1000_write_vfta(hw, index, vfta);
5023
5024         clear_bit(vid, adapter->active_vlans);
5025
5026         if (!e1000_vlan_used(adapter))
5027                 e1000_vlan_filter_on_off(adapter, false);
5028
5029         return 0;
5030 }
5031
5032 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5033 {
5034         u16 vid;
5035
5036         if (!e1000_vlan_used(adapter))
5037                 return;
5038
5039         e1000_vlan_filter_on_off(adapter, true);
5040         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5041                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5042 }
5043
5044 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5045 {
5046         struct e1000_hw *hw = &adapter->hw;
5047
5048         hw->autoneg = 0;
5049
5050         /* Make sure dplx is at most 1 bit and lsb of speed is not set
5051          * for the switch() below to work
5052          */
5053         if ((spd & 1) || (dplx & ~1))
5054                 goto err_inval;
5055
5056         /* Fiber NICs only allow 1000 gbps Full duplex */
5057         if ((hw->media_type == e1000_media_type_fiber) &&
5058             spd != SPEED_1000 &&
5059             dplx != DUPLEX_FULL)
5060                 goto err_inval;
5061
5062         switch (spd + dplx) {
5063         case SPEED_10 + DUPLEX_HALF:
5064                 hw->forced_speed_duplex = e1000_10_half;
5065                 break;
5066         case SPEED_10 + DUPLEX_FULL:
5067                 hw->forced_speed_duplex = e1000_10_full;
5068                 break;
5069         case SPEED_100 + DUPLEX_HALF:
5070                 hw->forced_speed_duplex = e1000_100_half;
5071                 break;
5072         case SPEED_100 + DUPLEX_FULL:
5073                 hw->forced_speed_duplex = e1000_100_full;
5074                 break;
5075         case SPEED_1000 + DUPLEX_FULL:
5076                 hw->autoneg = 1;
5077                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5078                 break;
5079         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5080         default:
5081                 goto err_inval;
5082         }
5083
5084         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5085         hw->mdix = AUTO_ALL_MODES;
5086
5087         return 0;
5088
5089 err_inval:
5090         e_err(probe, "Unsupported Speed/Duplex configuration\n");
5091         return -EINVAL;
5092 }
5093
5094 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5095 {
5096         struct net_device *netdev = pci_get_drvdata(pdev);
5097         struct e1000_adapter *adapter = netdev_priv(netdev);
5098         struct e1000_hw *hw = &adapter->hw;
5099         u32 ctrl, ctrl_ext, rctl, status;
5100         u32 wufc = adapter->wol;
5101 #ifdef CONFIG_PM
5102         int retval = 0;
5103 #endif
5104
5105         netif_device_detach(netdev);
5106
5107         if (netif_running(netdev)) {
5108                 int count = E1000_CHECK_RESET_COUNT;
5109
5110                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5111                         usleep_range(10000, 20000);
5112
5113                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5114                 e1000_down(adapter);
5115         }
5116
5117 #ifdef CONFIG_PM
5118         retval = pci_save_state(pdev);
5119         if (retval)
5120                 return retval;
5121 #endif
5122
5123         status = er32(STATUS);
5124         if (status & E1000_STATUS_LU)
5125                 wufc &= ~E1000_WUFC_LNKC;
5126
5127         if (wufc) {
5128                 e1000_setup_rctl(adapter);
5129                 e1000_set_rx_mode(netdev);
5130
5131                 rctl = er32(RCTL);
5132
5133                 /* turn on all-multi mode if wake on multicast is enabled */
5134                 if (wufc & E1000_WUFC_MC)
5135                         rctl |= E1000_RCTL_MPE;
5136
5137                 /* enable receives in the hardware */
5138                 ew32(RCTL, rctl | E1000_RCTL_EN);
5139
5140                 if (hw->mac_type >= e1000_82540) {
5141                         ctrl = er32(CTRL);
5142                         /* advertise wake from D3Cold */
5143                         #define E1000_CTRL_ADVD3WUC 0x00100000
5144                         /* phy power management enable */
5145                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5146                         ctrl |= E1000_CTRL_ADVD3WUC |
5147                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5148                         ew32(CTRL, ctrl);
5149                 }
5150
5151                 if (hw->media_type == e1000_media_type_fiber ||
5152                     hw->media_type == e1000_media_type_internal_serdes) {
5153                         /* keep the laser running in D3 */
5154                         ctrl_ext = er32(CTRL_EXT);
5155                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5156                         ew32(CTRL_EXT, ctrl_ext);
5157                 }
5158
5159                 ew32(WUC, E1000_WUC_PME_EN);
5160                 ew32(WUFC, wufc);
5161         } else {
5162                 ew32(WUC, 0);
5163                 ew32(WUFC, 0);
5164         }
5165
5166         e1000_release_manageability(adapter);
5167
5168         *enable_wake = !!wufc;
5169
5170         /* make sure adapter isn't asleep if manageability is enabled */
5171         if (adapter->en_mng_pt)
5172                 *enable_wake = true;
5173
5174         if (netif_running(netdev))
5175                 e1000_free_irq(adapter);
5176
5177         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5178                 pci_disable_device(pdev);
5179
5180         return 0;
5181 }
5182
5183 #ifdef CONFIG_PM
5184 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5185 {
5186         int retval;
5187         bool wake;
5188
5189         retval = __e1000_shutdown(pdev, &wake);
5190         if (retval)
5191                 return retval;
5192
5193         if (wake) {
5194                 pci_prepare_to_sleep(pdev);
5195         } else {
5196                 pci_wake_from_d3(pdev, false);
5197                 pci_set_power_state(pdev, PCI_D3hot);
5198         }
5199
5200         return 0;
5201 }
5202
5203 static int e1000_resume(struct pci_dev *pdev)
5204 {
5205         struct net_device *netdev = pci_get_drvdata(pdev);
5206         struct e1000_adapter *adapter = netdev_priv(netdev);
5207         struct e1000_hw *hw = &adapter->hw;
5208         u32 err;
5209
5210         pci_set_power_state(pdev, PCI_D0);
5211         pci_restore_state(pdev);
5212         pci_save_state(pdev);
5213
5214         if (adapter->need_ioport)
5215                 err = pci_enable_device(pdev);
5216         else
5217                 err = pci_enable_device_mem(pdev);
5218         if (err) {
5219                 pr_err("Cannot enable PCI device from suspend\n");
5220                 return err;
5221         }
5222
5223         /* flush memory to make sure state is correct */
5224         smp_mb__before_atomic();
5225         clear_bit(__E1000_DISABLED, &adapter->flags);
5226         pci_set_master(pdev);
5227
5228         pci_enable_wake(pdev, PCI_D3hot, 0);
5229         pci_enable_wake(pdev, PCI_D3cold, 0);
5230
5231         if (netif_running(netdev)) {
5232                 err = e1000_request_irq(adapter);
5233                 if (err)
5234                         return err;
5235         }
5236
5237         e1000_power_up_phy(adapter);
5238         e1000_reset(adapter);
5239         ew32(WUS, ~0);
5240
5241         e1000_init_manageability(adapter);
5242
5243         if (netif_running(netdev))
5244                 e1000_up(adapter);
5245
5246         netif_device_attach(netdev);
5247
5248         return 0;
5249 }
5250 #endif
5251
5252 static void e1000_shutdown(struct pci_dev *pdev)
5253 {
5254         bool wake;
5255
5256         __e1000_shutdown(pdev, &wake);
5257
5258         if (system_state == SYSTEM_POWER_OFF) {
5259                 pci_wake_from_d3(pdev, wake);
5260                 pci_set_power_state(pdev, PCI_D3hot);
5261         }
5262 }
5263
5264 #ifdef CONFIG_NET_POLL_CONTROLLER
5265 /* Polling 'interrupt' - used by things like netconsole to send skbs
5266  * without having to re-enable interrupts. It's not called while
5267  * the interrupt routine is executing.
5268  */
5269 static void e1000_netpoll(struct net_device *netdev)
5270 {
5271         struct e1000_adapter *adapter = netdev_priv(netdev);
5272
5273         if (disable_hardirq(adapter->pdev->irq))
5274                 e1000_intr(adapter->pdev->irq, netdev);
5275         enable_irq(adapter->pdev->irq);
5276 }
5277 #endif
5278
5279 /**
5280  * e1000_io_error_detected - called when PCI error is detected
5281  * @pdev: Pointer to PCI device
5282  * @state: The current pci connection state
5283  *
5284  * This function is called after a PCI bus error affecting
5285  * this device has been detected.
5286  */
5287 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5288                                                 pci_channel_state_t state)
5289 {
5290         struct net_device *netdev = pci_get_drvdata(pdev);
5291         struct e1000_adapter *adapter = netdev_priv(netdev);
5292
5293         netif_device_detach(netdev);
5294
5295         if (state == pci_channel_io_perm_failure)
5296                 return PCI_ERS_RESULT_DISCONNECT;
5297
5298         if (netif_running(netdev))
5299                 e1000_down(adapter);
5300
5301         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5302                 pci_disable_device(pdev);
5303
5304         /* Request a slot slot reset. */
5305         return PCI_ERS_RESULT_NEED_RESET;
5306 }
5307
5308 /**
5309  * e1000_io_slot_reset - called after the pci bus has been reset.
5310  * @pdev: Pointer to PCI device
5311  *
5312  * Restart the card from scratch, as if from a cold-boot. Implementation
5313  * resembles the first-half of the e1000_resume routine.
5314  */
5315 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5316 {
5317         struct net_device *netdev = pci_get_drvdata(pdev);
5318         struct e1000_adapter *adapter = netdev_priv(netdev);
5319         struct e1000_hw *hw = &adapter->hw;
5320         int err;
5321
5322         if (adapter->need_ioport)
5323                 err = pci_enable_device(pdev);
5324         else
5325                 err = pci_enable_device_mem(pdev);
5326         if (err) {
5327                 pr_err("Cannot re-enable PCI device after reset.\n");
5328                 return PCI_ERS_RESULT_DISCONNECT;
5329         }
5330
5331         /* flush memory to make sure state is correct */
5332         smp_mb__before_atomic();
5333         clear_bit(__E1000_DISABLED, &adapter->flags);
5334         pci_set_master(pdev);
5335
5336         pci_enable_wake(pdev, PCI_D3hot, 0);
5337         pci_enable_wake(pdev, PCI_D3cold, 0);
5338
5339         e1000_reset(adapter);
5340         ew32(WUS, ~0);
5341
5342         return PCI_ERS_RESULT_RECOVERED;
5343 }
5344
5345 /**
5346  * e1000_io_resume - called when traffic can start flowing again.
5347  * @pdev: Pointer to PCI device
5348  *
5349  * This callback is called when the error recovery driver tells us that
5350  * its OK to resume normal operation. Implementation resembles the
5351  * second-half of the e1000_resume routine.
5352  */
5353 static void e1000_io_resume(struct pci_dev *pdev)
5354 {
5355         struct net_device *netdev = pci_get_drvdata(pdev);
5356         struct e1000_adapter *adapter = netdev_priv(netdev);
5357
5358         e1000_init_manageability(adapter);
5359
5360         if (netif_running(netdev)) {
5361                 if (e1000_up(adapter)) {
5362                         pr_info("can't bring device back up after reset\n");
5363                         return;
5364                 }
5365         }
5366
5367         netif_device_attach(netdev);
5368 }
5369
5370 /* e1000_main.c */