GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12 /*(DEBLOBBED)*/
13
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/in.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/ethtool.h>
32 #include <linux/mdio.h>
33 #include <linux/mii.h>
34 #include <linux/phy.h>
35 #include <linux/brcmphy.h>
36 #include <linux/if.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 #include <linux/ssb/ssb_driver_gige.h>
45 #include <linux/hwmon.h>
46 #include <linux/hwmon-sysfs.h>
47 #include <linux/crc32poly.h>
48
49 #include <net/checksum.h>
50 #include <net/ip.h>
51
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #include <uapi/linux/net_tstamp.h>
57 #include <linux/ptp_clock_kernel.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0   0
65 #define BAR_2   2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag)                              \
87         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)                          \
89         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)                        \
91         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME         "tg3"
94 #define TG3_MAJ_NUM                     3
95 #define TG3_MIN_NUM                     137
96 #define DRV_MODULE_VERSION      \
97         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE      "May 11, 2014"
99
100 #define RESET_KIND_SHUTDOWN     0
101 #define RESET_KIND_INIT         1
102 #define RESET_KIND_SUSPEND      2
103
104 #define TG3_DEF_RX_MODE         0
105 #define TG3_DEF_TX_MODE         0
106 #define TG3_DEF_MSG_ENABLE        \
107         (NETIF_MSG_DRV          | \
108          NETIF_MSG_PROBE        | \
109          NETIF_MSG_LINK         | \
110          NETIF_MSG_TIMER        | \
111          NETIF_MSG_IFDOWN       | \
112          NETIF_MSG_IFUP         | \
113          NETIF_MSG_RX_ERR       | \
114          NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
117
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121
122 #define TG3_TX_TIMEOUT                  (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU                     ETH_ZLEN
126 #define TG3_MAX_MTU(tp) \
127         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING         200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148
149 #define TG3_TX_RING_SIZE                512
150 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
159                                  TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB               64
163
164 #define TG3_RX_STD_DMA_SZ               1536
165 #define TG3_RX_JMB_DMA_SZ               9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD           256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
192 #else
193         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K            2048
205 #define TG3_TX_BD_DMA_MAX_4K            4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
210 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
211
212 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
213 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214
215 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
216 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
217 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
218 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
219
220 static char version[] =
221         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_VERSION(DRV_MODULE_VERSION);
227 /*(DEBLOBBED)*/
228
229 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
235
236 static const struct pci_device_id tg3_pci_tbl[] = {
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257                         TG3_DRV_DATA_FLAG_5705_10_100},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286                         PCI_VENDOR_ID_LENOVO,
287                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
344         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
345         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
346         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
347         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
348         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
350         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
351         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
352         {}
353 };
354
355 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
356
357 static const struct {
358         const char string[ETH_GSTRING_LEN];
359 } ethtool_stats_keys[] = {
360         { "rx_octets" },
361         { "rx_fragments" },
362         { "rx_ucast_packets" },
363         { "rx_mcast_packets" },
364         { "rx_bcast_packets" },
365         { "rx_fcs_errors" },
366         { "rx_align_errors" },
367         { "rx_xon_pause_rcvd" },
368         { "rx_xoff_pause_rcvd" },
369         { "rx_mac_ctrl_rcvd" },
370         { "rx_xoff_entered" },
371         { "rx_frame_too_long_errors" },
372         { "rx_jabbers" },
373         { "rx_undersize_packets" },
374         { "rx_in_length_errors" },
375         { "rx_out_length_errors" },
376         { "rx_64_or_less_octet_packets" },
377         { "rx_65_to_127_octet_packets" },
378         { "rx_128_to_255_octet_packets" },
379         { "rx_256_to_511_octet_packets" },
380         { "rx_512_to_1023_octet_packets" },
381         { "rx_1024_to_1522_octet_packets" },
382         { "rx_1523_to_2047_octet_packets" },
383         { "rx_2048_to_4095_octet_packets" },
384         { "rx_4096_to_8191_octet_packets" },
385         { "rx_8192_to_9022_octet_packets" },
386
387         { "tx_octets" },
388         { "tx_collisions" },
389
390         { "tx_xon_sent" },
391         { "tx_xoff_sent" },
392         { "tx_flow_control" },
393         { "tx_mac_errors" },
394         { "tx_single_collisions" },
395         { "tx_mult_collisions" },
396         { "tx_deferred" },
397         { "tx_excessive_collisions" },
398         { "tx_late_collisions" },
399         { "tx_collide_2times" },
400         { "tx_collide_3times" },
401         { "tx_collide_4times" },
402         { "tx_collide_5times" },
403         { "tx_collide_6times" },
404         { "tx_collide_7times" },
405         { "tx_collide_8times" },
406         { "tx_collide_9times" },
407         { "tx_collide_10times" },
408         { "tx_collide_11times" },
409         { "tx_collide_12times" },
410         { "tx_collide_13times" },
411         { "tx_collide_14times" },
412         { "tx_collide_15times" },
413         { "tx_ucast_packets" },
414         { "tx_mcast_packets" },
415         { "tx_bcast_packets" },
416         { "tx_carrier_sense_errors" },
417         { "tx_discards" },
418         { "tx_errors" },
419
420         { "dma_writeq_full" },
421         { "dma_write_prioq_full" },
422         { "rxbds_empty" },
423         { "rx_discards" },
424         { "rx_errors" },
425         { "rx_threshold_hit" },
426
427         { "dma_readq_full" },
428         { "dma_read_prioq_full" },
429         { "tx_comp_queue_full" },
430
431         { "ring_set_send_prod_index" },
432         { "ring_status_update" },
433         { "nic_irqs" },
434         { "nic_avoided_irqs" },
435         { "nic_tx_threshold_hit" },
436
437         { "mbuf_lwm_thresh_hit" },
438 };
439
440 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
441 #define TG3_NVRAM_TEST          0
442 #define TG3_LINK_TEST           1
443 #define TG3_REGISTER_TEST       2
444 #define TG3_MEMORY_TEST         3
445 #define TG3_MAC_LOOPB_TEST      4
446 #define TG3_PHY_LOOPB_TEST      5
447 #define TG3_EXT_LOOPB_TEST      6
448 #define TG3_INTERRUPT_TEST      7
449
450
451 static const struct {
452         const char string[ETH_GSTRING_LEN];
453 } ethtool_test_keys[] = {
454         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
455         [TG3_LINK_TEST]         = { "link test         (online) " },
456         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
457         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
458         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
459         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
460         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
461         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
462 };
463
464 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
465
466
467 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
468 {
469         writel(val, tp->regs + off);
470 }
471
472 static u32 tg3_read32(struct tg3 *tp, u32 off)
473 {
474         return readl(tp->regs + off);
475 }
476
477 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
478 {
479         writel(val, tp->aperegs + off);
480 }
481
482 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
483 {
484         return readl(tp->aperegs + off);
485 }
486
487 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
488 {
489         unsigned long flags;
490
491         spin_lock_irqsave(&tp->indirect_lock, flags);
492         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
493         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
498 {
499         writel(val, tp->regs + off);
500         readl(tp->regs + off);
501 }
502
503 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 {
505         unsigned long flags;
506         u32 val;
507
508         spin_lock_irqsave(&tp->indirect_lock, flags);
509         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
510         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
511         spin_unlock_irqrestore(&tp->indirect_lock, flags);
512         return val;
513 }
514
515 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
516 {
517         unsigned long flags;
518
519         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524         if (off == TG3_RX_STD_PROD_IDX_REG) {
525                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
526                                        TG3_64BIT_REG_LOW, val);
527                 return;
528         }
529
530         spin_lock_irqsave(&tp->indirect_lock, flags);
531         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
532         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
533         spin_unlock_irqrestore(&tp->indirect_lock, flags);
534
535         /* In indirect mode when disabling interrupts, we also need
536          * to clear the interrupt bit in the GRC local ctrl register.
537          */
538         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
539             (val == 0x1)) {
540                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
541                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
542         }
543 }
544
545 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 {
547         unsigned long flags;
548         u32 val;
549
550         spin_lock_irqsave(&tp->indirect_lock, flags);
551         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
552         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
553         spin_unlock_irqrestore(&tp->indirect_lock, flags);
554         return val;
555 }
556
557 /* usec_wait specifies the wait time in usec when writing to certain registers
558  * where it is unsafe to read back the register without some delay.
559  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
560  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
561  */
562 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
563 {
564         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
565                 /* Non-posted methods */
566                 tp->write32(tp, off, val);
567         else {
568                 /* Posted method */
569                 tg3_write32(tp, off, val);
570                 if (usec_wait)
571                         udelay(usec_wait);
572                 tp->read32(tp, off);
573         }
574         /* Wait again after the read for the posted method to guarantee that
575          * the wait time is met.
576          */
577         if (usec_wait)
578                 udelay(usec_wait);
579 }
580
581 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
582 {
583         tp->write32_mbox(tp, off, val);
584         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
585             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
586              !tg3_flag(tp, ICH_WORKAROUND)))
587                 tp->read32_mbox(tp, off);
588 }
589
590 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
591 {
592         void __iomem *mbox = tp->regs + off;
593         writel(val, mbox);
594         if (tg3_flag(tp, TXD_MBOX_HWBUG))
595                 writel(val, mbox);
596         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
597             tg3_flag(tp, FLUSH_POSTED_WRITES))
598                 readl(mbox);
599 }
600
601 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
602 {
603         return readl(tp->regs + off + GRCMBOX_BASE);
604 }
605
606 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
607 {
608         writel(val, tp->regs + off + GRCMBOX_BASE);
609 }
610
611 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
612 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
613 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
614 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
615 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
616
617 #define tw32(reg, val)                  tp->write32(tp, reg, val)
618 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
619 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
620 #define tr32(reg)                       tp->read32(tp, reg)
621
622 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
623 {
624         unsigned long flags;
625
626         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
627             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
628                 return;
629
630         spin_lock_irqsave(&tp->indirect_lock, flags);
631         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
634
635                 /* Always leave this as zero. */
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
637         } else {
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
639                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
640
641                 /* Always leave this as zero. */
642                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
643         }
644         spin_unlock_irqrestore(&tp->indirect_lock, flags);
645 }
646
647 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
648 {
649         unsigned long flags;
650
651         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
652             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653                 *val = 0;
654                 return;
655         }
656
657         spin_lock_irqsave(&tp->indirect_lock, flags);
658         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
660                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
661
662                 /* Always leave this as zero. */
663                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
664         } else {
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
666                 *val = tr32(TG3PCI_MEM_WIN_DATA);
667
668                 /* Always leave this as zero. */
669                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
670         }
671         spin_unlock_irqrestore(&tp->indirect_lock, flags);
672 }
673
674 static void tg3_ape_lock_init(struct tg3 *tp)
675 {
676         int i;
677         u32 regbase, bit;
678
679         if (tg3_asic_rev(tp) == ASIC_REV_5761)
680                 regbase = TG3_APE_LOCK_GRANT;
681         else
682                 regbase = TG3_APE_PER_LOCK_GRANT;
683
684         /* Make sure the driver hasn't any stale locks. */
685         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
686                 switch (i) {
687                 case TG3_APE_LOCK_PHY0:
688                 case TG3_APE_LOCK_PHY1:
689                 case TG3_APE_LOCK_PHY2:
690                 case TG3_APE_LOCK_PHY3:
691                         bit = APE_LOCK_GRANT_DRIVER;
692                         break;
693                 default:
694                         if (!tp->pci_fn)
695                                 bit = APE_LOCK_GRANT_DRIVER;
696                         else
697                                 bit = 1 << tp->pci_fn;
698                 }
699                 tg3_ape_write32(tp, regbase + 4 * i, bit);
700         }
701
702 }
703
704 static int tg3_ape_lock(struct tg3 *tp, int locknum)
705 {
706         int i, off;
707         int ret = 0;
708         u32 status, req, gnt, bit;
709
710         if (!tg3_flag(tp, ENABLE_APE))
711                 return 0;
712
713         switch (locknum) {
714         case TG3_APE_LOCK_GPIO:
715                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
716                         return 0;
717                 /* else: fall through */
718         case TG3_APE_LOCK_GRC:
719         case TG3_APE_LOCK_MEM:
720                 if (!tp->pci_fn)
721                         bit = APE_LOCK_REQ_DRIVER;
722                 else
723                         bit = 1 << tp->pci_fn;
724                 break;
725         case TG3_APE_LOCK_PHY0:
726         case TG3_APE_LOCK_PHY1:
727         case TG3_APE_LOCK_PHY2:
728         case TG3_APE_LOCK_PHY3:
729                 bit = APE_LOCK_REQ_DRIVER;
730                 break;
731         default:
732                 return -EINVAL;
733         }
734
735         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736                 req = TG3_APE_LOCK_REQ;
737                 gnt = TG3_APE_LOCK_GRANT;
738         } else {
739                 req = TG3_APE_PER_LOCK_REQ;
740                 gnt = TG3_APE_PER_LOCK_GRANT;
741         }
742
743         off = 4 * locknum;
744
745         tg3_ape_write32(tp, req + off, bit);
746
747         /* Wait for up to 1 millisecond to acquire lock. */
748         for (i = 0; i < 100; i++) {
749                 status = tg3_ape_read32(tp, gnt + off);
750                 if (status == bit)
751                         break;
752                 if (pci_channel_offline(tp->pdev))
753                         break;
754
755                 udelay(10);
756         }
757
758         if (status != bit) {
759                 /* Revoke the lock request. */
760                 tg3_ape_write32(tp, gnt + off, bit);
761                 ret = -EBUSY;
762         }
763
764         return ret;
765 }
766
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
768 {
769         u32 gnt, bit;
770
771         if (!tg3_flag(tp, ENABLE_APE))
772                 return;
773
774         switch (locknum) {
775         case TG3_APE_LOCK_GPIO:
776                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
777                         return;
778                 /* else: fall through */
779         case TG3_APE_LOCK_GRC:
780         case TG3_APE_LOCK_MEM:
781                 if (!tp->pci_fn)
782                         bit = APE_LOCK_GRANT_DRIVER;
783                 else
784                         bit = 1 << tp->pci_fn;
785                 break;
786         case TG3_APE_LOCK_PHY0:
787         case TG3_APE_LOCK_PHY1:
788         case TG3_APE_LOCK_PHY2:
789         case TG3_APE_LOCK_PHY3:
790                 bit = APE_LOCK_GRANT_DRIVER;
791                 break;
792         default:
793                 return;
794         }
795
796         if (tg3_asic_rev(tp) == ASIC_REV_5761)
797                 gnt = TG3_APE_LOCK_GRANT;
798         else
799                 gnt = TG3_APE_PER_LOCK_GRANT;
800
801         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
802 }
803
804 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
805 {
806         u32 apedata;
807
808         while (timeout_us) {
809                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
810                         return -EBUSY;
811
812                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
817
818                 udelay(10);
819                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
820         }
821
822         return timeout_us ? 0 : -EBUSY;
823 }
824
825 #ifdef CONFIG_TIGON3_HWMON
826 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
827 {
828         u32 i, apedata;
829
830         for (i = 0; i < timeout_us / 10; i++) {
831                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
832
833                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
834                         break;
835
836                 udelay(10);
837         }
838
839         return i == timeout_us / 10;
840 }
841
842 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
843                                    u32 len)
844 {
845         int err;
846         u32 i, bufoff, msgoff, maxlen, apedata;
847
848         if (!tg3_flag(tp, APE_HAS_NCSI))
849                 return 0;
850
851         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
852         if (apedata != APE_SEG_SIG_MAGIC)
853                 return -ENODEV;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
856         if (!(apedata & APE_FW_STATUS_READY))
857                 return -EAGAIN;
858
859         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
860                  TG3_APE_SHMEM_BASE;
861         msgoff = bufoff + 2 * sizeof(u32);
862         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
863
864         while (len) {
865                 u32 length;
866
867                 /* Cap xfer sizes to scratchpad limits. */
868                 length = (len > maxlen) ? maxlen : len;
869                 len -= length;
870
871                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
872                 if (!(apedata & APE_FW_STATUS_READY))
873                         return -EAGAIN;
874
875                 /* Wait for up to 1 msec for APE to service previous event. */
876                 err = tg3_ape_event_lock(tp, 1000);
877                 if (err)
878                         return err;
879
880                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
881                           APE_EVENT_STATUS_SCRTCHPD_READ |
882                           APE_EVENT_STATUS_EVENT_PENDING;
883                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
884
885                 tg3_ape_write32(tp, bufoff, base_off);
886                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
887
888                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
889                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
890
891                 base_off += length;
892
893                 if (tg3_ape_wait_for_event(tp, 30000))
894                         return -EAGAIN;
895
896                 for (i = 0; length; i += 4, length -= 4) {
897                         u32 val = tg3_ape_read32(tp, msgoff + i);
898                         memcpy(data, &val, sizeof(u32));
899                         data++;
900                 }
901         }
902
903         return 0;
904 }
905 #endif
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909         int err;
910         u32 apedata;
911
912         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913         if (apedata != APE_SEG_SIG_MAGIC)
914                 return -EAGAIN;
915
916         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917         if (!(apedata & APE_FW_STATUS_READY))
918                 return -EAGAIN;
919
920         /* Wait for up to 20 millisecond for APE to service previous event. */
921         err = tg3_ape_event_lock(tp, 20000);
922         if (err)
923                 return err;
924
925         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926                         event | APE_EVENT_STATUS_EVENT_PENDING);
927
928         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931         return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936         u32 event;
937         u32 apedata;
938
939         if (!tg3_flag(tp, ENABLE_APE))
940                 return;
941
942         switch (kind) {
943         case RESET_KIND_INIT:
944                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
945                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
946                                 APE_HOST_SEG_SIG_MAGIC);
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
948                                 APE_HOST_SEG_LEN_MAGIC);
949                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
950                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
951                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
952                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
953                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
954                                 APE_HOST_BEHAV_NO_PHYLOCK);
955                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
956                                     TG3_APE_HOST_DRVR_STATE_START);
957
958                 event = APE_EVENT_STATUS_STATE_START;
959                 break;
960         case RESET_KIND_SHUTDOWN:
961                 if (device_may_wakeup(&tp->pdev->dev) &&
962                     tg3_flag(tp, WOL_ENABLE)) {
963                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
964                                             TG3_APE_HOST_WOL_SPEED_AUTO);
965                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
966                 } else
967                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
968
969                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
970
971                 event = APE_EVENT_STATUS_STATE_UNLOAD;
972                 break;
973         default:
974                 return;
975         }
976
977         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
978
979         tg3_ape_send_event(tp, event);
980 }
981
982 static void tg3_send_ape_heartbeat(struct tg3 *tp,
983                                    unsigned long interval)
984 {
985         /* Check if hb interval has exceeded */
986         if (!tg3_flag(tp, ENABLE_APE) ||
987             time_before(jiffies, tp->ape_hb_jiffies + interval))
988                 return;
989
990         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
991         tp->ape_hb_jiffies = jiffies;
992 }
993
994 static void tg3_disable_ints(struct tg3 *tp)
995 {
996         int i;
997
998         tw32(TG3PCI_MISC_HOST_CTRL,
999              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1000         for (i = 0; i < tp->irq_max; i++)
1001                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1002 }
1003
1004 static void tg3_enable_ints(struct tg3 *tp)
1005 {
1006         int i;
1007
1008         tp->irq_sync = 0;
1009         wmb();
1010
1011         tw32(TG3PCI_MISC_HOST_CTRL,
1012              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1013
1014         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1015         for (i = 0; i < tp->irq_cnt; i++) {
1016                 struct tg3_napi *tnapi = &tp->napi[i];
1017
1018                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1019                 if (tg3_flag(tp, 1SHOT_MSI))
1020                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1021
1022                 tp->coal_now |= tnapi->coal_now;
1023         }
1024
1025         /* Force an initial interrupt */
1026         if (!tg3_flag(tp, TAGGED_STATUS) &&
1027             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1028                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1029         else
1030                 tw32(HOSTCC_MODE, tp->coal_now);
1031
1032         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1033 }
1034
1035 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1036 {
1037         struct tg3 *tp = tnapi->tp;
1038         struct tg3_hw_status *sblk = tnapi->hw_status;
1039         unsigned int work_exists = 0;
1040
1041         /* check for phy events */
1042         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1043                 if (sblk->status & SD_STATUS_LINK_CHG)
1044                         work_exists = 1;
1045         }
1046
1047         /* check for TX work to do */
1048         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1049                 work_exists = 1;
1050
1051         /* check for RX work to do */
1052         if (tnapi->rx_rcb_prod_idx &&
1053             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1054                 work_exists = 1;
1055
1056         return work_exists;
1057 }
1058
1059 /* tg3_int_reenable
1060  *  similar to tg3_enable_ints, but it accurately determines whether there
1061  *  is new work pending and can return without flushing the PIO write
1062  *  which reenables interrupts
1063  */
1064 static void tg3_int_reenable(struct tg3_napi *tnapi)
1065 {
1066         struct tg3 *tp = tnapi->tp;
1067
1068         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1069         mmiowb();
1070
1071         /* When doing tagged status, this work check is unnecessary.
1072          * The last_tag we write above tells the chip which piece of
1073          * work we've completed.
1074          */
1075         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079
1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082         u32 clock_ctrl;
1083         u32 orig_clock_ctrl;
1084
1085         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086                 return;
1087
1088         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089
1090         orig_clock_ctrl = clock_ctrl;
1091         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092                        CLOCK_CTRL_CLKRUN_OENABLE |
1093                        0x1f);
1094         tp->pci_clock_ctrl = clock_ctrl;
1095
1096         if (tg3_flag(tp, 5705_PLUS)) {
1097                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100                 }
1101         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103                             clock_ctrl |
1104                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105                             40);
1106                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108                             40);
1109         }
1110         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112
1113 #define PHY_BUSY_LOOPS  5000
1114
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116                          u32 *val)
1117 {
1118         u32 frame_val;
1119         unsigned int loops;
1120         int ret;
1121
1122         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123                 tw32_f(MAC_MI_MODE,
1124                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125                 udelay(80);
1126         }
1127
1128         tg3_ape_lock(tp, tp->phy_ape_lock);
1129
1130         *val = 0x0;
1131
1132         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133                       MI_COM_PHY_ADDR_MASK);
1134         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135                       MI_COM_REG_ADDR_MASK);
1136         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137
1138         tw32_f(MAC_MI_COM, frame_val);
1139
1140         loops = PHY_BUSY_LOOPS;
1141         while (loops != 0) {
1142                 udelay(10);
1143                 frame_val = tr32(MAC_MI_COM);
1144
1145                 if ((frame_val & MI_COM_BUSY) == 0) {
1146                         udelay(5);
1147                         frame_val = tr32(MAC_MI_COM);
1148                         break;
1149                 }
1150                 loops -= 1;
1151         }
1152
1153         ret = -EBUSY;
1154         if (loops != 0) {
1155                 *val = frame_val & MI_COM_DATA_MASK;
1156                 ret = 0;
1157         }
1158
1159         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1161                 udelay(80);
1162         }
1163
1164         tg3_ape_unlock(tp, tp->phy_ape_lock);
1165
1166         return ret;
1167 }
1168
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175                           u32 val)
1176 {
1177         u32 frame_val;
1178         unsigned int loops;
1179         int ret;
1180
1181         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183                 return 0;
1184
1185         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186                 tw32_f(MAC_MI_MODE,
1187                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188                 udelay(80);
1189         }
1190
1191         tg3_ape_lock(tp, tp->phy_ape_lock);
1192
1193         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194                       MI_COM_PHY_ADDR_MASK);
1195         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196                       MI_COM_REG_ADDR_MASK);
1197         frame_val |= (val & MI_COM_DATA_MASK);
1198         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199
1200         tw32_f(MAC_MI_COM, frame_val);
1201
1202         loops = PHY_BUSY_LOOPS;
1203         while (loops != 0) {
1204                 udelay(10);
1205                 frame_val = tr32(MAC_MI_COM);
1206                 if ((frame_val & MI_COM_BUSY) == 0) {
1207                         udelay(5);
1208                         frame_val = tr32(MAC_MI_COM);
1209                         break;
1210                 }
1211                 loops -= 1;
1212         }
1213
1214         ret = -EBUSY;
1215         if (loops != 0)
1216                 ret = 0;
1217
1218         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1220                 udelay(80);
1221         }
1222
1223         tg3_ape_unlock(tp, tp->phy_ape_lock);
1224
1225         return ret;
1226 }
1227
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235         int err;
1236
1237         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238         if (err)
1239                 goto done;
1240
1241         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251
1252 done:
1253         return err;
1254 }
1255
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258         int err;
1259
1260         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261         if (err)
1262                 goto done;
1263
1264         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274
1275 done:
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295         if (!err)
1296                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297
1298         return err;
1299 }
1300
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303         int err;
1304
1305         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1308         if (!err)
1309                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310
1311         return err;
1312 }
1313
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317                 set |= MII_TG3_AUXCTL_MISC_WREN;
1318
1319         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324         u32 val;
1325         int err;
1326
1327         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328
1329         if (err)
1330                 return err;
1331
1332         if (enable)
1333                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334         else
1335                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336
1337         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339
1340         return err;
1341 }
1342
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346                             reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351         u32 phy_control;
1352         int limit, err;
1353
1354         /* OK, reset it, and poll the BMCR_RESET bit until it
1355          * clears or we time out.
1356          */
1357         phy_control = BMCR_RESET;
1358         err = tg3_writephy(tp, MII_BMCR, phy_control);
1359         if (err != 0)
1360                 return -EBUSY;
1361
1362         limit = 5000;
1363         while (limit--) {
1364                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365                 if (err != 0)
1366                         return -EBUSY;
1367
1368                 if ((phy_control & BMCR_RESET) == 0) {
1369                         udelay(40);
1370                         break;
1371                 }
1372                 udelay(10);
1373         }
1374         if (limit < 0)
1375                 return -EBUSY;
1376
1377         return 0;
1378 }
1379
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 val;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_readphy(tp, mii_id, reg, &val))
1388                 val = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return val;
1393 }
1394
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397         struct tg3 *tp = bp->priv;
1398         u32 ret = 0;
1399
1400         spin_lock_bh(&tp->lock);
1401
1402         if (__tg3_writephy(tp, mii_id, reg, val))
1403                 ret = -EIO;
1404
1405         spin_unlock_bh(&tp->lock);
1406
1407         return ret;
1408 }
1409
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412         u32 val;
1413         struct phy_device *phydev;
1414
1415         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417         case PHY_ID_BCM50610:
1418         case PHY_ID_BCM50610M:
1419                 val = MAC_PHYCFG2_50610_LED_MODES;
1420                 break;
1421         case PHY_ID_BCMAC131:
1422                 val = MAC_PHYCFG2_AC131_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8211C:
1425                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426                 break;
1427         case PHY_ID_RTL8201E:
1428                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429                 break;
1430         default:
1431                 return;
1432         }
1433
1434         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435                 tw32(MAC_PHYCFG2, val);
1436
1437                 val = tr32(MAC_PHYCFG1);
1438                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441                 tw32(MAC_PHYCFG1, val);
1442
1443                 return;
1444         }
1445
1446         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448                        MAC_PHYCFG2_FMODE_MASK_MASK |
1449                        MAC_PHYCFG2_GMODE_MASK_MASK |
1450                        MAC_PHYCFG2_ACT_MASK_MASK   |
1451                        MAC_PHYCFG2_QUAL_MASK_MASK |
1452                        MAC_PHYCFG2_INBAND_ENABLE;
1453
1454         tw32(MAC_PHYCFG2, val);
1455
1456         val = tr32(MAC_PHYCFG1);
1457         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464         }
1465         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467         tw32(MAC_PHYCFG1, val);
1468
1469         val = tr32(MAC_EXT_RGMII_MODE);
1470         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471                  MAC_RGMII_MODE_RX_QUALITY |
1472                  MAC_RGMII_MODE_RX_ACTIVITY |
1473                  MAC_RGMII_MODE_RX_ENG_DET |
1474                  MAC_RGMII_MODE_TX_ENABLE |
1475                  MAC_RGMII_MODE_TX_LOWPWR |
1476                  MAC_RGMII_MODE_TX_RESET);
1477         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479                         val |= MAC_RGMII_MODE_RX_INT_B |
1480                                MAC_RGMII_MODE_RX_QUALITY |
1481                                MAC_RGMII_MODE_RX_ACTIVITY |
1482                                MAC_RGMII_MODE_RX_ENG_DET;
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484                         val |= MAC_RGMII_MODE_TX_ENABLE |
1485                                MAC_RGMII_MODE_TX_LOWPWR |
1486                                MAC_RGMII_MODE_TX_RESET;
1487         }
1488         tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494         tw32_f(MAC_MI_MODE, tp->mi_mode);
1495         udelay(80);
1496
1497         if (tg3_flag(tp, MDIOBUS_INITED) &&
1498             tg3_asic_rev(tp) == ASIC_REV_5785)
1499                 tg3_mdio_config_5785(tp);
1500 }
1501
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504         int i;
1505         u32 reg;
1506         struct phy_device *phydev;
1507
1508         if (tg3_flag(tp, 5717_PLUS)) {
1509                 u32 is_serdes;
1510
1511                 tp->phy_addr = tp->pci_fn + 1;
1512
1513                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515                 else
1516                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1518                 if (is_serdes)
1519                         tp->phy_addr += 7;
1520         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521                 int addr;
1522
1523                 addr = ssb_gige_get_phyaddr(tp->pdev);
1524                 if (addr < 0)
1525                         return addr;
1526                 tp->phy_addr = addr;
1527         } else
1528                 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530         tg3_mdio_start(tp);
1531
1532         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533                 return 0;
1534
1535         tp->mdio_bus = mdiobus_alloc();
1536         if (tp->mdio_bus == NULL)
1537                 return -ENOMEM;
1538
1539         tp->mdio_bus->name     = "tg3 mdio bus";
1540         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542         tp->mdio_bus->priv     = tp;
1543         tp->mdio_bus->parent   = &tp->pdev->dev;
1544         tp->mdio_bus->read     = &tg3_mdio_read;
1545         tp->mdio_bus->write    = &tg3_mdio_write;
1546         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547
1548         /* The bus registration will look for all the PHYs on the mdio bus.
1549          * Unfortunately, it does not ensure the PHY is powered up before
1550          * accessing the PHY ID registers.  A chip reset is the
1551          * quickest way to bring the device back to an operational state..
1552          */
1553         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554                 tg3_bmcr_reset(tp);
1555
1556         i = mdiobus_register(tp->mdio_bus);
1557         if (i) {
1558                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559                 mdiobus_free(tp->mdio_bus);
1560                 return i;
1561         }
1562
1563         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564
1565         if (!phydev || !phydev->drv) {
1566                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567                 mdiobus_unregister(tp->mdio_bus);
1568                 mdiobus_free(tp->mdio_bus);
1569                 return -ENODEV;
1570         }
1571
1572         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573         case PHY_ID_BCM57780:
1574                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576                 break;
1577         case PHY_ID_BCM50610:
1578         case PHY_ID_BCM50610M:
1579                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580                                      PHY_BRCM_RX_REFCLK_UNUSED |
1581                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1584                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1585                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1586                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1587                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1588                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1589                 /* fallthru */
1590         case PHY_ID_RTL8211C:
1591                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1592                 break;
1593         case PHY_ID_RTL8201E:
1594         case PHY_ID_BCMAC131:
1595                 phydev->interface = PHY_INTERFACE_MODE_MII;
1596                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1597                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1598                 break;
1599         }
1600
1601         tg3_flag_set(tp, MDIOBUS_INITED);
1602
1603         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1604                 tg3_mdio_config_5785(tp);
1605
1606         return 0;
1607 }
1608
1609 static void tg3_mdio_fini(struct tg3 *tp)
1610 {
1611         if (tg3_flag(tp, MDIOBUS_INITED)) {
1612                 tg3_flag_clear(tp, MDIOBUS_INITED);
1613                 mdiobus_unregister(tp->mdio_bus);
1614                 mdiobus_free(tp->mdio_bus);
1615         }
1616 }
1617
1618 /* tp->lock is held. */
1619 static inline void tg3_generate_fw_event(struct tg3 *tp)
1620 {
1621         u32 val;
1622
1623         val = tr32(GRC_RX_CPU_EVENT);
1624         val |= GRC_RX_CPU_DRIVER_EVENT;
1625         tw32_f(GRC_RX_CPU_EVENT, val);
1626
1627         tp->last_event_jiffies = jiffies;
1628 }
1629
1630 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1631
1632 /* tp->lock is held. */
1633 static void tg3_wait_for_event_ack(struct tg3 *tp)
1634 {
1635         int i;
1636         unsigned int delay_cnt;
1637         long time_remain;
1638
1639         /* If enough time has passed, no wait is necessary. */
1640         time_remain = (long)(tp->last_event_jiffies + 1 +
1641                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1642                       (long)jiffies;
1643         if (time_remain < 0)
1644                 return;
1645
1646         /* Check if we can shorten the wait time. */
1647         delay_cnt = jiffies_to_usecs(time_remain);
1648         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1649                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1650         delay_cnt = (delay_cnt >> 3) + 1;
1651
1652         for (i = 0; i < delay_cnt; i++) {
1653                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1654                         break;
1655                 if (pci_channel_offline(tp->pdev))
1656                         break;
1657
1658                 udelay(8);
1659         }
1660 }
1661
1662 /* tp->lock is held. */
1663 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1664 {
1665         u32 reg, val;
1666
1667         val = 0;
1668         if (!tg3_readphy(tp, MII_BMCR, &reg))
1669                 val = reg << 16;
1670         if (!tg3_readphy(tp, MII_BMSR, &reg))
1671                 val |= (reg & 0xffff);
1672         *data++ = val;
1673
1674         val = 0;
1675         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1676                 val = reg << 16;
1677         if (!tg3_readphy(tp, MII_LPA, &reg))
1678                 val |= (reg & 0xffff);
1679         *data++ = val;
1680
1681         val = 0;
1682         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1683                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1684                         val = reg << 16;
1685                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1686                         val |= (reg & 0xffff);
1687         }
1688         *data++ = val;
1689
1690         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1691                 val = reg << 16;
1692         else
1693                 val = 0;
1694         *data++ = val;
1695 }
1696
1697 /* tp->lock is held. */
1698 static void tg3_ump_link_report(struct tg3 *tp)
1699 {
1700         u32 data[4];
1701
1702         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1703                 return;
1704
1705         tg3_phy_gather_ump_data(tp, data);
1706
1707         tg3_wait_for_event_ack(tp);
1708
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1712         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1713         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1715
1716         tg3_generate_fw_event(tp);
1717 }
1718
1719 /* tp->lock is held. */
1720 static void tg3_stop_fw(struct tg3 *tp)
1721 {
1722         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1723                 /* Wait for RX cpu to ACK the previous event. */
1724                 tg3_wait_for_event_ack(tp);
1725
1726                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1727
1728                 tg3_generate_fw_event(tp);
1729
1730                 /* Wait for RX cpu to ACK this event. */
1731                 tg3_wait_for_event_ack(tp);
1732         }
1733 }
1734
1735 /* tp->lock is held. */
1736 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1737 {
1738         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1739                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1740
1741         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 /* tp->lock is held. */
1765 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1766 {
1767         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1768                 switch (kind) {
1769                 case RESET_KIND_INIT:
1770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771                                       DRV_STATE_START_DONE);
1772                         break;
1773
1774                 case RESET_KIND_SHUTDOWN:
1775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776                                       DRV_STATE_UNLOAD_DONE);
1777                         break;
1778
1779                 default:
1780                         break;
1781                 }
1782         }
1783 }
1784
1785 /* tp->lock is held. */
1786 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1787 {
1788         if (tg3_flag(tp, ENABLE_ASF)) {
1789                 switch (kind) {
1790                 case RESET_KIND_INIT:
1791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792                                       DRV_STATE_START);
1793                         break;
1794
1795                 case RESET_KIND_SHUTDOWN:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_UNLOAD);
1798                         break;
1799
1800                 case RESET_KIND_SUSPEND:
1801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802                                       DRV_STATE_SUSPEND);
1803                         break;
1804
1805                 default:
1806                         break;
1807                 }
1808         }
1809 }
1810
1811 static int tg3_poll_fw(struct tg3 *tp)
1812 {
1813         int i;
1814         u32 val;
1815
1816         if (tg3_flag(tp, NO_FWARE_REPORTED))
1817                 return 0;
1818
1819         if (tg3_flag(tp, IS_SSB_CORE)) {
1820                 /* We don't use firmware. */
1821                 return 0;
1822         }
1823
1824         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1825                 /* Wait up to 20ms for init done. */
1826                 for (i = 0; i < 200; i++) {
1827                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1828                                 return 0;
1829                         if (pci_channel_offline(tp->pdev))
1830                                 return -ENODEV;
1831
1832                         udelay(100);
1833                 }
1834                 return -ENODEV;
1835         }
1836
1837         /* Wait for firmware initialization to complete. */
1838         for (i = 0; i < 100000; i++) {
1839                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1840                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1841                         break;
1842                 if (pci_channel_offline(tp->pdev)) {
1843                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1844                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1845                                 netdev_info(tp->dev, "No firmware running\n");
1846                         }
1847
1848                         break;
1849                 }
1850
1851                 udelay(10);
1852         }
1853
1854         /* Chip might not be fitted with firmware.  Some Sun onboard
1855          * parts are configured like that.  So don't signal the timeout
1856          * of the above loop as an error, but do report the lack of
1857          * running firmware once.
1858          */
1859         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1860                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1861
1862                 netdev_info(tp->dev, "No firmware running\n");
1863         }
1864
1865         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1866                 /* The 57765 A0 needs a little more
1867                  * time to do some important work.
1868                  */
1869                 mdelay(10);
1870         }
1871
1872         return 0;
1873 }
1874
1875 static void tg3_link_report(struct tg3 *tp)
1876 {
1877         if (!netif_carrier_ok(tp->dev)) {
1878                 netif_info(tp, link, tp->dev, "Link is down\n");
1879                 tg3_ump_link_report(tp);
1880         } else if (netif_msg_link(tp)) {
1881                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1882                             (tp->link_config.active_speed == SPEED_1000 ?
1883                              1000 :
1884                              (tp->link_config.active_speed == SPEED_100 ?
1885                               100 : 10)),
1886                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1887                              "full" : "half"));
1888
1889                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1890                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1891                             "on" : "off",
1892                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1893                             "on" : "off");
1894
1895                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1896                         netdev_info(tp->dev, "EEE is %s\n",
1897                                     tp->setlpicnt ? "enabled" : "disabled");
1898
1899                 tg3_ump_link_report(tp);
1900         }
1901
1902         tp->link_up = netif_carrier_ok(tp->dev);
1903 }
1904
1905 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1906 {
1907         u32 flowctrl = 0;
1908
1909         if (adv & ADVERTISE_PAUSE_CAP) {
1910                 flowctrl |= FLOW_CTRL_RX;
1911                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1912                         flowctrl |= FLOW_CTRL_TX;
1913         } else if (adv & ADVERTISE_PAUSE_ASYM)
1914                 flowctrl |= FLOW_CTRL_TX;
1915
1916         return flowctrl;
1917 }
1918
1919 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1920 {
1921         u16 miireg;
1922
1923         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1924                 miireg = ADVERTISE_1000XPAUSE;
1925         else if (flow_ctrl & FLOW_CTRL_TX)
1926                 miireg = ADVERTISE_1000XPSE_ASYM;
1927         else if (flow_ctrl & FLOW_CTRL_RX)
1928                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929         else
1930                 miireg = 0;
1931
1932         return miireg;
1933 }
1934
1935 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1936 {
1937         u32 flowctrl = 0;
1938
1939         if (adv & ADVERTISE_1000XPAUSE) {
1940                 flowctrl |= FLOW_CTRL_RX;
1941                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1942                         flowctrl |= FLOW_CTRL_TX;
1943         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1944                 flowctrl |= FLOW_CTRL_TX;
1945
1946         return flowctrl;
1947 }
1948
1949 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1950 {
1951         u8 cap = 0;
1952
1953         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1954                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1955         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1956                 if (lcladv & ADVERTISE_1000XPAUSE)
1957                         cap = FLOW_CTRL_RX;
1958                 if (rmtadv & ADVERTISE_1000XPAUSE)
1959                         cap = FLOW_CTRL_TX;
1960         }
1961
1962         return cap;
1963 }
1964
1965 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1966 {
1967         u8 autoneg;
1968         u8 flowctrl = 0;
1969         u32 old_rx_mode = tp->rx_mode;
1970         u32 old_tx_mode = tp->tx_mode;
1971
1972         if (tg3_flag(tp, USE_PHYLIB))
1973                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1974         else
1975                 autoneg = tp->link_config.autoneg;
1976
1977         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1978                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1979                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1980                 else
1981                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1982         } else
1983                 flowctrl = tp->link_config.flowctrl;
1984
1985         tp->link_config.active_flowctrl = flowctrl;
1986
1987         if (flowctrl & FLOW_CTRL_RX)
1988                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1989         else
1990                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1991
1992         if (old_rx_mode != tp->rx_mode)
1993                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1994
1995         if (flowctrl & FLOW_CTRL_TX)
1996                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1997         else
1998                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1999
2000         if (old_tx_mode != tp->tx_mode)
2001                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2002 }
2003
2004 static void tg3_adjust_link(struct net_device *dev)
2005 {
2006         u8 oldflowctrl, linkmesg = 0;
2007         u32 mac_mode, lcl_adv, rmt_adv;
2008         struct tg3 *tp = netdev_priv(dev);
2009         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2010
2011         spin_lock_bh(&tp->lock);
2012
2013         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2014                                     MAC_MODE_HALF_DUPLEX);
2015
2016         oldflowctrl = tp->link_config.active_flowctrl;
2017
2018         if (phydev->link) {
2019                 lcl_adv = 0;
2020                 rmt_adv = 0;
2021
2022                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2023                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2024                 else if (phydev->speed == SPEED_1000 ||
2025                          tg3_asic_rev(tp) != ASIC_REV_5785)
2026                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2027                 else
2028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2029
2030                 if (phydev->duplex == DUPLEX_HALF)
2031                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2032                 else {
2033                         lcl_adv = mii_advertise_flowctrl(
2034                                   tp->link_config.flowctrl);
2035
2036                         if (phydev->pause)
2037                                 rmt_adv = LPA_PAUSE_CAP;
2038                         if (phydev->asym_pause)
2039                                 rmt_adv |= LPA_PAUSE_ASYM;
2040                 }
2041
2042                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2043         } else
2044                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2045
2046         if (mac_mode != tp->mac_mode) {
2047                 tp->mac_mode = mac_mode;
2048                 tw32_f(MAC_MODE, tp->mac_mode);
2049                 udelay(40);
2050         }
2051
2052         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2053                 if (phydev->speed == SPEED_10)
2054                         tw32(MAC_MI_STAT,
2055                              MAC_MI_STAT_10MBPS_MODE |
2056                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057                 else
2058                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2059         }
2060
2061         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2062                 tw32(MAC_TX_LENGTHS,
2063                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064                       (6 << TX_LENGTHS_IPG_SHIFT) |
2065                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066         else
2067                 tw32(MAC_TX_LENGTHS,
2068                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069                       (6 << TX_LENGTHS_IPG_SHIFT) |
2070                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071
2072         if (phydev->link != tp->old_link ||
2073             phydev->speed != tp->link_config.active_speed ||
2074             phydev->duplex != tp->link_config.active_duplex ||
2075             oldflowctrl != tp->link_config.active_flowctrl)
2076                 linkmesg = 1;
2077
2078         tp->old_link = phydev->link;
2079         tp->link_config.active_speed = phydev->speed;
2080         tp->link_config.active_duplex = phydev->duplex;
2081
2082         spin_unlock_bh(&tp->lock);
2083
2084         if (linkmesg)
2085                 tg3_link_report(tp);
2086 }
2087
2088 static int tg3_phy_init(struct tg3 *tp)
2089 {
2090         struct phy_device *phydev;
2091
2092         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2093                 return 0;
2094
2095         /* Bring the PHY back to a known state. */
2096         tg3_bmcr_reset(tp);
2097
2098         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2099
2100         /* Attach the MAC to the PHY. */
2101         phydev = phy_connect(tp->dev, phydev_name(phydev),
2102                              tg3_adjust_link, phydev->interface);
2103         if (IS_ERR(phydev)) {
2104                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2105                 return PTR_ERR(phydev);
2106         }
2107
2108         /* Mask with MAC supported features. */
2109         switch (phydev->interface) {
2110         case PHY_INTERFACE_MODE_GMII:
2111         case PHY_INTERFACE_MODE_RGMII:
2112                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2113                         phydev->supported &= (PHY_GBIT_FEATURES |
2114                                               SUPPORTED_Pause |
2115                                               SUPPORTED_Asym_Pause);
2116                         break;
2117                 }
2118                 /* fallthru */
2119         case PHY_INTERFACE_MODE_MII:
2120                 phydev->supported &= (PHY_BASIC_FEATURES |
2121                                       SUPPORTED_Pause |
2122                                       SUPPORTED_Asym_Pause);
2123                 break;
2124         default:
2125                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2126                 return -EINVAL;
2127         }
2128
2129         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2130
2131         phydev->advertising = phydev->supported;
2132
2133         phy_attached_info(phydev);
2134
2135         return 0;
2136 }
2137
2138 static void tg3_phy_start(struct tg3 *tp)
2139 {
2140         struct phy_device *phydev;
2141
2142         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2143                 return;
2144
2145         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2146
2147         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2148                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2149                 phydev->speed = tp->link_config.speed;
2150                 phydev->duplex = tp->link_config.duplex;
2151                 phydev->autoneg = tp->link_config.autoneg;
2152                 phydev->advertising = tp->link_config.advertising;
2153         }
2154
2155         phy_start(phydev);
2156
2157         phy_start_aneg(phydev);
2158 }
2159
2160 static void tg3_phy_stop(struct tg3 *tp)
2161 {
2162         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2163                 return;
2164
2165         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2166 }
2167
2168 static void tg3_phy_fini(struct tg3 *tp)
2169 {
2170         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2171                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2172                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2173         }
2174 }
2175
2176 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2177 {
2178         int err;
2179         u32 val;
2180
2181         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2182                 return 0;
2183
2184         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2185                 /* Cannot do read-modify-write on 5401 */
2186                 err = tg3_phy_auxctl_write(tp,
2187                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2188                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2189                                            0x4c20);
2190                 goto done;
2191         }
2192
2193         err = tg3_phy_auxctl_read(tp,
2194                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2195         if (err)
2196                 return err;
2197
2198         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2199         err = tg3_phy_auxctl_write(tp,
2200                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2201
2202 done:
2203         return err;
2204 }
2205
2206 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2207 {
2208         u32 phytest;
2209
2210         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2211                 u32 phy;
2212
2213                 tg3_writephy(tp, MII_TG3_FET_TEST,
2214                              phytest | MII_TG3_FET_SHADOW_EN);
2215                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2216                         if (enable)
2217                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2218                         else
2219                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2220                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2221                 }
2222                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2223         }
2224 }
2225
2226 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2227 {
2228         u32 reg;
2229
2230         if (!tg3_flag(tp, 5705_PLUS) ||
2231             (tg3_flag(tp, 5717_PLUS) &&
2232              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2233                 return;
2234
2235         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2236                 tg3_phy_fet_toggle_apd(tp, enable);
2237                 return;
2238         }
2239
2240         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2241               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2242               MII_TG3_MISC_SHDW_SCR5_SDTL |
2243               MII_TG3_MISC_SHDW_SCR5_C125OE;
2244         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2245                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2246
2247         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2248
2249
2250         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2251         if (enable)
2252                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2253
2254         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2255 }
2256
2257 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2258 {
2259         u32 phy;
2260
2261         if (!tg3_flag(tp, 5705_PLUS) ||
2262             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2263                 return;
2264
2265         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2266                 u32 ephy;
2267
2268                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2269                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2270
2271                         tg3_writephy(tp, MII_TG3_FET_TEST,
2272                                      ephy | MII_TG3_FET_SHADOW_EN);
2273                         if (!tg3_readphy(tp, reg, &phy)) {
2274                                 if (enable)
2275                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2276                                 else
2277                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2278                                 tg3_writephy(tp, reg, phy);
2279                         }
2280                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2281                 }
2282         } else {
2283                 int ret;
2284
2285                 ret = tg3_phy_auxctl_read(tp,
2286                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2287                 if (!ret) {
2288                         if (enable)
2289                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2290                         else
2291                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2292                         tg3_phy_auxctl_write(tp,
2293                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2294                 }
2295         }
2296 }
2297
2298 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2299 {
2300         int ret;
2301         u32 val;
2302
2303         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2304                 return;
2305
2306         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2307         if (!ret)
2308                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2309                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2310 }
2311
2312 static void tg3_phy_apply_otp(struct tg3 *tp)
2313 {
2314         u32 otp, phy;
2315
2316         if (!tp->phy_otp)
2317                 return;
2318
2319         otp = tp->phy_otp;
2320
2321         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2322                 return;
2323
2324         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2325         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2326         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2327
2328         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2329               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2330         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2331
2332         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2333         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2334         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2335
2336         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2337         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2338
2339         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2340         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2341
2342         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2343               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2344         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2345
2346         tg3_phy_toggle_auxctl_smdsp(tp, false);
2347 }
2348
2349 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2350 {
2351         u32 val;
2352         struct ethtool_eee *dest = &tp->eee;
2353
2354         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2355                 return;
2356
2357         if (eee)
2358                 dest = eee;
2359
2360         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2361                 return;
2362
2363         /* Pull eee_active */
2364         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2365             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2366                 dest->eee_active = 1;
2367         } else
2368                 dest->eee_active = 0;
2369
2370         /* Pull lp advertised settings */
2371         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2372                 return;
2373         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374
2375         /* Pull advertised and eee_enabled settings */
2376         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2377                 return;
2378         dest->eee_enabled = !!val;
2379         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380
2381         /* Pull tx_lpi_enabled */
2382         val = tr32(TG3_CPMU_EEE_MODE);
2383         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2384
2385         /* Pull lpi timer value */
2386         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2387 }
2388
2389 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2390 {
2391         u32 val;
2392
2393         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2394                 return;
2395
2396         tp->setlpicnt = 0;
2397
2398         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2399             current_link_up &&
2400             tp->link_config.active_duplex == DUPLEX_FULL &&
2401             (tp->link_config.active_speed == SPEED_100 ||
2402              tp->link_config.active_speed == SPEED_1000)) {
2403                 u32 eeectl;
2404
2405                 if (tp->link_config.active_speed == SPEED_1000)
2406                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2407                 else
2408                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2409
2410                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2411
2412                 tg3_eee_pull_config(tp, NULL);
2413                 if (tp->eee.eee_active)
2414                         tp->setlpicnt = 2;
2415         }
2416
2417         if (!tp->setlpicnt) {
2418                 if (current_link_up &&
2419                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2420                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2421                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2422                 }
2423
2424                 val = tr32(TG3_CPMU_EEE_MODE);
2425                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2426         }
2427 }
2428
2429 static void tg3_phy_eee_enable(struct tg3 *tp)
2430 {
2431         u32 val;
2432
2433         if (tp->link_config.active_speed == SPEED_1000 &&
2434             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2435              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2436              tg3_flag(tp, 57765_CLASS)) &&
2437             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2438                 val = MII_TG3_DSP_TAP26_ALNOKO |
2439                       MII_TG3_DSP_TAP26_RMRXSTO;
2440                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2441                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2442         }
2443
2444         val = tr32(TG3_CPMU_EEE_MODE);
2445         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2446 }
2447
2448 static int tg3_wait_macro_done(struct tg3 *tp)
2449 {
2450         int limit = 100;
2451
2452         while (limit--) {
2453                 u32 tmp32;
2454
2455                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2456                         if ((tmp32 & 0x1000) == 0)
2457                                 break;
2458                 }
2459         }
2460         if (limit < 0)
2461                 return -EBUSY;
2462
2463         return 0;
2464 }
2465
2466 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2467 {
2468         static const u32 test_pat[4][6] = {
2469         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2470         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2471         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2472         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2473         };
2474         int chan;
2475
2476         for (chan = 0; chan < 4; chan++) {
2477                 int i;
2478
2479                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2480                              (chan * 0x2000) | 0x0200);
2481                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2482
2483                 for (i = 0; i < 6; i++)
2484                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2485                                      test_pat[chan][i]);
2486
2487                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2488                 if (tg3_wait_macro_done(tp)) {
2489                         *resetp = 1;
2490                         return -EBUSY;
2491                 }
2492
2493                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2494                              (chan * 0x2000) | 0x0200);
2495                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2496                 if (tg3_wait_macro_done(tp)) {
2497                         *resetp = 1;
2498                         return -EBUSY;
2499                 }
2500
2501                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2502                 if (tg3_wait_macro_done(tp)) {
2503                         *resetp = 1;
2504                         return -EBUSY;
2505                 }
2506
2507                 for (i = 0; i < 6; i += 2) {
2508                         u32 low, high;
2509
2510                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2511                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2512                             tg3_wait_macro_done(tp)) {
2513                                 *resetp = 1;
2514                                 return -EBUSY;
2515                         }
2516                         low &= 0x7fff;
2517                         high &= 0x000f;
2518                         if (low != test_pat[chan][i] ||
2519                             high != test_pat[chan][i+1]) {
2520                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2521                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2522                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523
2524                                 return -EBUSY;
2525                         }
2526                 }
2527         }
2528
2529         return 0;
2530 }
2531
2532 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2533 {
2534         int chan;
2535
2536         for (chan = 0; chan < 4; chan++) {
2537                 int i;
2538
2539                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2540                              (chan * 0x2000) | 0x0200);
2541                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2542                 for (i = 0; i < 6; i++)
2543                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2544                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2545                 if (tg3_wait_macro_done(tp))
2546                         return -EBUSY;
2547         }
2548
2549         return 0;
2550 }
2551
2552 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2553 {
2554         u32 reg32, phy9_orig;
2555         int retries, do_phy_reset, err;
2556
2557         retries = 10;
2558         do_phy_reset = 1;
2559         do {
2560                 if (do_phy_reset) {
2561                         err = tg3_bmcr_reset(tp);
2562                         if (err)
2563                                 return err;
2564                         do_phy_reset = 0;
2565                 }
2566
2567                 /* Disable transmitter and interrupt.  */
2568                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2569                         continue;
2570
2571                 reg32 |= 0x3000;
2572                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2573
2574                 /* Set full-duplex, 1000 mbps.  */
2575                 tg3_writephy(tp, MII_BMCR,
2576                              BMCR_FULLDPLX | BMCR_SPEED1000);
2577
2578                 /* Set to master mode.  */
2579                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2580                         continue;
2581
2582                 tg3_writephy(tp, MII_CTRL1000,
2583                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2584
2585                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2586                 if (err)
2587                         return err;
2588
2589                 /* Block the PHY control access.  */
2590                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2591
2592                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2593                 if (!err)
2594                         break;
2595         } while (--retries);
2596
2597         err = tg3_phy_reset_chanpat(tp);
2598         if (err)
2599                 return err;
2600
2601         tg3_phydsp_write(tp, 0x8005, 0x0000);
2602
2603         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2604         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2605
2606         tg3_phy_toggle_auxctl_smdsp(tp, false);
2607
2608         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2609
2610         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2611         if (err)
2612                 return err;
2613
2614         reg32 &= ~0x3000;
2615         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2616
2617         return 0;
2618 }
2619
2620 static void tg3_carrier_off(struct tg3 *tp)
2621 {
2622         netif_carrier_off(tp->dev);
2623         tp->link_up = false;
2624 }
2625
2626 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2627 {
2628         if (tg3_flag(tp, ENABLE_ASF))
2629                 netdev_warn(tp->dev,
2630                             "Management side-band traffic will be interrupted during phy settings change\n");
2631 }
2632
2633 /* This will reset the tigon3 PHY if there is no valid
2634  * link unless the FORCE argument is non-zero.
2635  */
2636 static int tg3_phy_reset(struct tg3 *tp)
2637 {
2638         u32 val, cpmuctrl;
2639         int err;
2640
2641         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2642                 val = tr32(GRC_MISC_CFG);
2643                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2644                 udelay(40);
2645         }
2646         err  = tg3_readphy(tp, MII_BMSR, &val);
2647         err |= tg3_readphy(tp, MII_BMSR, &val);
2648         if (err != 0)
2649                 return -EBUSY;
2650
2651         if (netif_running(tp->dev) && tp->link_up) {
2652                 netif_carrier_off(tp->dev);
2653                 tg3_link_report(tp);
2654         }
2655
2656         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2657             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2658             tg3_asic_rev(tp) == ASIC_REV_5705) {
2659                 err = tg3_phy_reset_5703_4_5(tp);
2660                 if (err)
2661                         return err;
2662                 goto out;
2663         }
2664
2665         cpmuctrl = 0;
2666         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2667             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2668                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2669                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2670                         tw32(TG3_CPMU_CTRL,
2671                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2672         }
2673
2674         err = tg3_bmcr_reset(tp);
2675         if (err)
2676                 return err;
2677
2678         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2679                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2680                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2681
2682                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2683         }
2684
2685         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2686             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2687                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2688                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2689                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2690                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2691                         udelay(40);
2692                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2693                 }
2694         }
2695
2696         if (tg3_flag(tp, 5717_PLUS) &&
2697             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2698                 return 0;
2699
2700         tg3_phy_apply_otp(tp);
2701
2702         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2703                 tg3_phy_toggle_apd(tp, true);
2704         else
2705                 tg3_phy_toggle_apd(tp, false);
2706
2707 out:
2708         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2709             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2710                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2711                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2712                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2713         }
2714
2715         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2716                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2717                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2718         }
2719
2720         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2721                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2723                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2724                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2725                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2726                 }
2727         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2728                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2729                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2730                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2731                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2732                                 tg3_writephy(tp, MII_TG3_TEST1,
2733                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2734                         } else
2735                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2736
2737                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2738                 }
2739         }
2740
2741         /* Set Extended packet length bit (bit 14) on all chips that */
2742         /* support jumbo frames */
2743         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2744                 /* Cannot do read-modify-write on 5401 */
2745                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2746         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2747                 /* Set bit 14 with read-modify-write to preserve other bits */
2748                 err = tg3_phy_auxctl_read(tp,
2749                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2750                 if (!err)
2751                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2752                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2753         }
2754
2755         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2756          * jumbo frames transmission.
2757          */
2758         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2759                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2760                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2761                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2762         }
2763
2764         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2765                 /* adjust output voltage */
2766                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2767         }
2768
2769         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2770                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2771
2772         tg3_phy_toggle_automdix(tp, true);
2773         tg3_phy_set_wirespeed(tp);
2774         return 0;
2775 }
2776
2777 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2778 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2779 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2780                                           TG3_GPIO_MSG_NEED_VAUX)
2781 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2782         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2783          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2784          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2785          (TG3_GPIO_MSG_DRVR_PRES << 12))
2786
2787 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2788         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2789          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2790          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2791          (TG3_GPIO_MSG_NEED_VAUX << 12))
2792
2793 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2794 {
2795         u32 status, shift;
2796
2797         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2798             tg3_asic_rev(tp) == ASIC_REV_5719)
2799                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2800         else
2801                 status = tr32(TG3_CPMU_DRV_STATUS);
2802
2803         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2804         status &= ~(TG3_GPIO_MSG_MASK << shift);
2805         status |= (newstat << shift);
2806
2807         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2808             tg3_asic_rev(tp) == ASIC_REV_5719)
2809                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2810         else
2811                 tw32(TG3_CPMU_DRV_STATUS, status);
2812
2813         return status >> TG3_APE_GPIO_MSG_SHIFT;
2814 }
2815
2816 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2817 {
2818         if (!tg3_flag(tp, IS_NIC))
2819                 return 0;
2820
2821         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2822             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2823             tg3_asic_rev(tp) == ASIC_REV_5720) {
2824                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2825                         return -EIO;
2826
2827                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2828
2829                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2830                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2831
2832                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2833         } else {
2834                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836         }
2837
2838         return 0;
2839 }
2840
2841 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2842 {
2843         u32 grc_local_ctrl;
2844
2845         if (!tg3_flag(tp, IS_NIC) ||
2846             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2847             tg3_asic_rev(tp) == ASIC_REV_5701)
2848                 return;
2849
2850         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2851
2852         tw32_wait_f(GRC_LOCAL_CTRL,
2853                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2855
2856         tw32_wait_f(GRC_LOCAL_CTRL,
2857                     grc_local_ctrl,
2858                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2859
2860         tw32_wait_f(GRC_LOCAL_CTRL,
2861                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2862                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 }
2864
2865 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2866 {
2867         if (!tg3_flag(tp, IS_NIC))
2868                 return;
2869
2870         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2871             tg3_asic_rev(tp) == ASIC_REV_5701) {
2872                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2873                             (GRC_LCLCTRL_GPIO_OE0 |
2874                              GRC_LCLCTRL_GPIO_OE1 |
2875                              GRC_LCLCTRL_GPIO_OE2 |
2876                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2877                              GRC_LCLCTRL_GPIO_OUTPUT1),
2878                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2879         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2880                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2881                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2882                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2883                                      GRC_LCLCTRL_GPIO_OE1 |
2884                                      GRC_LCLCTRL_GPIO_OE2 |
2885                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2886                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2887                                      tp->grc_local_ctrl;
2888                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2890
2891                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2892                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2894
2895                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2896                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2898         } else {
2899                 u32 no_gpio2;
2900                 u32 grc_local_ctrl = 0;
2901
2902                 /* Workaround to prevent overdrawing Amps. */
2903                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2904                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2905                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2906                                     grc_local_ctrl,
2907                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2908                 }
2909
2910                 /* On 5753 and variants, GPIO2 cannot be used. */
2911                 no_gpio2 = tp->nic_sram_data_cfg &
2912                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2913
2914                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2915                                   GRC_LCLCTRL_GPIO_OE1 |
2916                                   GRC_LCLCTRL_GPIO_OE2 |
2917                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2918                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2919                 if (no_gpio2) {
2920                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2921                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2922                 }
2923                 tw32_wait_f(GRC_LOCAL_CTRL,
2924                             tp->grc_local_ctrl | grc_local_ctrl,
2925                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2926
2927                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2928
2929                 tw32_wait_f(GRC_LOCAL_CTRL,
2930                             tp->grc_local_ctrl | grc_local_ctrl,
2931                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2932
2933                 if (!no_gpio2) {
2934                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2935                         tw32_wait_f(GRC_LOCAL_CTRL,
2936                                     tp->grc_local_ctrl | grc_local_ctrl,
2937                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2938                 }
2939         }
2940 }
2941
2942 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2943 {
2944         u32 msg = 0;
2945
2946         /* Serialize power state transitions */
2947         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2948                 return;
2949
2950         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2951                 msg = TG3_GPIO_MSG_NEED_VAUX;
2952
2953         msg = tg3_set_function_status(tp, msg);
2954
2955         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2956                 goto done;
2957
2958         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2959                 tg3_pwrsrc_switch_to_vaux(tp);
2960         else
2961                 tg3_pwrsrc_die_with_vmain(tp);
2962
2963 done:
2964         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2965 }
2966
2967 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2968 {
2969         bool need_vaux = false;
2970
2971         /* The GPIOs do something completely different on 57765. */
2972         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2973                 return;
2974
2975         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2976             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2977             tg3_asic_rev(tp) == ASIC_REV_5720) {
2978                 tg3_frob_aux_power_5717(tp, include_wol ?
2979                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2980                 return;
2981         }
2982
2983         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2984                 struct net_device *dev_peer;
2985
2986                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2987
2988                 /* remove_one() may have been run on the peer. */
2989                 if (dev_peer) {
2990                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2991
2992                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2993                                 return;
2994
2995                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2996                             tg3_flag(tp_peer, ENABLE_ASF))
2997                                 need_vaux = true;
2998                 }
2999         }
3000
3001         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3002             tg3_flag(tp, ENABLE_ASF))
3003                 need_vaux = true;
3004
3005         if (need_vaux)
3006                 tg3_pwrsrc_switch_to_vaux(tp);
3007         else
3008                 tg3_pwrsrc_die_with_vmain(tp);
3009 }
3010
3011 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3012 {
3013         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3014                 return 1;
3015         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3016                 if (speed != SPEED_10)
3017                         return 1;
3018         } else if (speed == SPEED_10)
3019                 return 1;
3020
3021         return 0;
3022 }
3023
3024 static bool tg3_phy_power_bug(struct tg3 *tp)
3025 {
3026         switch (tg3_asic_rev(tp)) {
3027         case ASIC_REV_5700:
3028         case ASIC_REV_5704:
3029                 return true;
3030         case ASIC_REV_5780:
3031                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3032                         return true;
3033                 return false;
3034         case ASIC_REV_5717:
3035                 if (!tp->pci_fn)
3036                         return true;
3037                 return false;
3038         case ASIC_REV_5719:
3039         case ASIC_REV_5720:
3040                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3041                     !tp->pci_fn)
3042                         return true;
3043                 return false;
3044         }
3045
3046         return false;
3047 }
3048
3049 static bool tg3_phy_led_bug(struct tg3 *tp)
3050 {
3051         switch (tg3_asic_rev(tp)) {
3052         case ASIC_REV_5719:
3053         case ASIC_REV_5720:
3054                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3055                     !tp->pci_fn)
3056                         return true;
3057                 return false;
3058         }
3059
3060         return false;
3061 }
3062
3063 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3064 {
3065         u32 val;
3066
3067         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3068                 return;
3069
3070         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3071                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3072                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3073                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3074
3075                         sg_dig_ctrl |=
3076                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3077                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3078                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3079                 }
3080                 return;
3081         }
3082
3083         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3084                 tg3_bmcr_reset(tp);
3085                 val = tr32(GRC_MISC_CFG);
3086                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3087                 udelay(40);
3088                 return;
3089         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3090                 u32 phytest;
3091                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3092                         u32 phy;
3093
3094                         tg3_writephy(tp, MII_ADVERTISE, 0);
3095                         tg3_writephy(tp, MII_BMCR,
3096                                      BMCR_ANENABLE | BMCR_ANRESTART);
3097
3098                         tg3_writephy(tp, MII_TG3_FET_TEST,
3099                                      phytest | MII_TG3_FET_SHADOW_EN);
3100                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3101                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3102                                 tg3_writephy(tp,
3103                                              MII_TG3_FET_SHDW_AUXMODE4,
3104                                              phy);
3105                         }
3106                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3107                 }
3108                 return;
3109         } else if (do_low_power) {
3110                 if (!tg3_phy_led_bug(tp))
3111                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3112                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3113
3114                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3115                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3116                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3117                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3118         }
3119
3120         /* The PHY should not be powered down on some chips because
3121          * of bugs.
3122          */
3123         if (tg3_phy_power_bug(tp))
3124                 return;
3125
3126         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3127             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3128                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3129                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3130                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3131                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3132         }
3133
3134         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3135 }
3136
3137 /* tp->lock is held. */
3138 static int tg3_nvram_lock(struct tg3 *tp)
3139 {
3140         if (tg3_flag(tp, NVRAM)) {
3141                 int i;
3142
3143                 if (tp->nvram_lock_cnt == 0) {
3144                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3145                         for (i = 0; i < 8000; i++) {
3146                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3147                                         break;
3148                                 udelay(20);
3149                         }
3150                         if (i == 8000) {
3151                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3152                                 return -ENODEV;
3153                         }
3154                 }
3155                 tp->nvram_lock_cnt++;
3156         }
3157         return 0;
3158 }
3159
3160 /* tp->lock is held. */
3161 static void tg3_nvram_unlock(struct tg3 *tp)
3162 {
3163         if (tg3_flag(tp, NVRAM)) {
3164                 if (tp->nvram_lock_cnt > 0)
3165                         tp->nvram_lock_cnt--;
3166                 if (tp->nvram_lock_cnt == 0)
3167                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3168         }
3169 }
3170
3171 /* tp->lock is held. */
3172 static void tg3_enable_nvram_access(struct tg3 *tp)
3173 {
3174         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3175                 u32 nvaccess = tr32(NVRAM_ACCESS);
3176
3177                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3178         }
3179 }
3180
3181 /* tp->lock is held. */
3182 static void tg3_disable_nvram_access(struct tg3 *tp)
3183 {
3184         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3185                 u32 nvaccess = tr32(NVRAM_ACCESS);
3186
3187                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3188         }
3189 }
3190
3191 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3192                                         u32 offset, u32 *val)
3193 {
3194         u32 tmp;
3195         int i;
3196
3197         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3198                 return -EINVAL;
3199
3200         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3201                                         EEPROM_ADDR_DEVID_MASK |
3202                                         EEPROM_ADDR_READ);
3203         tw32(GRC_EEPROM_ADDR,
3204              tmp |
3205              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3206              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3207               EEPROM_ADDR_ADDR_MASK) |
3208              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3209
3210         for (i = 0; i < 1000; i++) {
3211                 tmp = tr32(GRC_EEPROM_ADDR);
3212
3213                 if (tmp & EEPROM_ADDR_COMPLETE)
3214                         break;
3215                 msleep(1);
3216         }
3217         if (!(tmp & EEPROM_ADDR_COMPLETE))
3218                 return -EBUSY;
3219
3220         tmp = tr32(GRC_EEPROM_DATA);
3221
3222         /*
3223          * The data will always be opposite the native endian
3224          * format.  Perform a blind byteswap to compensate.
3225          */
3226         *val = swab32(tmp);
3227
3228         return 0;
3229 }
3230
3231 #define NVRAM_CMD_TIMEOUT 10000
3232
3233 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3234 {
3235         int i;
3236
3237         tw32(NVRAM_CMD, nvram_cmd);
3238         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3239                 usleep_range(10, 40);
3240                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3241                         udelay(10);
3242                         break;
3243                 }
3244         }
3245
3246         if (i == NVRAM_CMD_TIMEOUT)
3247                 return -EBUSY;
3248
3249         return 0;
3250 }
3251
3252 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3253 {
3254         if (tg3_flag(tp, NVRAM) &&
3255             tg3_flag(tp, NVRAM_BUFFERED) &&
3256             tg3_flag(tp, FLASH) &&
3257             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3258             (tp->nvram_jedecnum == JEDEC_ATMEL))
3259
3260                 addr = ((addr / tp->nvram_pagesize) <<
3261                         ATMEL_AT45DB0X1B_PAGE_POS) +
3262                        (addr % tp->nvram_pagesize);
3263
3264         return addr;
3265 }
3266
3267 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3268 {
3269         if (tg3_flag(tp, NVRAM) &&
3270             tg3_flag(tp, NVRAM_BUFFERED) &&
3271             tg3_flag(tp, FLASH) &&
3272             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3273             (tp->nvram_jedecnum == JEDEC_ATMEL))
3274
3275                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3276                         tp->nvram_pagesize) +
3277                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3278
3279         return addr;
3280 }
3281
3282 /* NOTE: Data read in from NVRAM is byteswapped according to
3283  * the byteswapping settings for all other register accesses.
3284  * tg3 devices are BE devices, so on a BE machine, the data
3285  * returned will be exactly as it is seen in NVRAM.  On a LE
3286  * machine, the 32-bit value will be byteswapped.
3287  */
3288 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3289 {
3290         int ret;
3291
3292         if (!tg3_flag(tp, NVRAM))
3293                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3294
3295         offset = tg3_nvram_phys_addr(tp, offset);
3296
3297         if (offset > NVRAM_ADDR_MSK)
3298                 return -EINVAL;
3299
3300         ret = tg3_nvram_lock(tp);
3301         if (ret)
3302                 return ret;
3303
3304         tg3_enable_nvram_access(tp);
3305
3306         tw32(NVRAM_ADDR, offset);
3307         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3308                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3309
3310         if (ret == 0)
3311                 *val = tr32(NVRAM_RDDATA);
3312
3313         tg3_disable_nvram_access(tp);
3314
3315         tg3_nvram_unlock(tp);
3316
3317         return ret;
3318 }
3319
3320 /* Ensures NVRAM data is in bytestream format. */
3321 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3322 {
3323         u32 v;
3324         int res = tg3_nvram_read(tp, offset, &v);
3325         if (!res)
3326                 *val = cpu_to_be32(v);
3327         return res;
3328 }
3329
3330 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3331                                     u32 offset, u32 len, u8 *buf)
3332 {
3333         int i, j, rc = 0;
3334         u32 val;
3335
3336         for (i = 0; i < len; i += 4) {
3337                 u32 addr;
3338                 __be32 data;
3339
3340                 addr = offset + i;
3341
3342                 memcpy(&data, buf + i, 4);
3343
3344                 /*
3345                  * The SEEPROM interface expects the data to always be opposite
3346                  * the native endian format.  We accomplish this by reversing
3347                  * all the operations that would have been performed on the
3348                  * data from a call to tg3_nvram_read_be32().
3349                  */
3350                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3351
3352                 val = tr32(GRC_EEPROM_ADDR);
3353                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3354
3355                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3356                         EEPROM_ADDR_READ);
3357                 tw32(GRC_EEPROM_ADDR, val |
3358                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3359                         (addr & EEPROM_ADDR_ADDR_MASK) |
3360                         EEPROM_ADDR_START |
3361                         EEPROM_ADDR_WRITE);
3362
3363                 for (j = 0; j < 1000; j++) {
3364                         val = tr32(GRC_EEPROM_ADDR);
3365
3366                         if (val & EEPROM_ADDR_COMPLETE)
3367                                 break;
3368                         msleep(1);
3369                 }
3370                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3371                         rc = -EBUSY;
3372                         break;
3373                 }
3374         }
3375
3376         return rc;
3377 }
3378
3379 /* offset and length are dword aligned */
3380 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3381                 u8 *buf)
3382 {
3383         int ret = 0;
3384         u32 pagesize = tp->nvram_pagesize;
3385         u32 pagemask = pagesize - 1;
3386         u32 nvram_cmd;
3387         u8 *tmp;
3388
3389         tmp = kmalloc(pagesize, GFP_KERNEL);
3390         if (tmp == NULL)
3391                 return -ENOMEM;
3392
3393         while (len) {
3394                 int j;
3395                 u32 phy_addr, page_off, size;
3396
3397                 phy_addr = offset & ~pagemask;
3398
3399                 for (j = 0; j < pagesize; j += 4) {
3400                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3401                                                   (__be32 *) (tmp + j));
3402                         if (ret)
3403                                 break;
3404                 }
3405                 if (ret)
3406                         break;
3407
3408                 page_off = offset & pagemask;
3409                 size = pagesize;
3410                 if (len < size)
3411                         size = len;
3412
3413                 len -= size;
3414
3415                 memcpy(tmp + page_off, buf, size);
3416
3417                 offset = offset + (pagesize - page_off);
3418
3419                 tg3_enable_nvram_access(tp);
3420
3421                 /*
3422                  * Before we can erase the flash page, we need
3423                  * to issue a special "write enable" command.
3424                  */
3425                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3426
3427                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428                         break;
3429
3430                 /* Erase the target page */
3431                 tw32(NVRAM_ADDR, phy_addr);
3432
3433                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3434                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3435
3436                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437                         break;
3438
3439                 /* Issue another write enable to start the write. */
3440                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3441
3442                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3443                         break;
3444
3445                 for (j = 0; j < pagesize; j += 4) {
3446                         __be32 data;
3447
3448                         data = *((__be32 *) (tmp + j));
3449
3450                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3451
3452                         tw32(NVRAM_ADDR, phy_addr + j);
3453
3454                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3455                                 NVRAM_CMD_WR;
3456
3457                         if (j == 0)
3458                                 nvram_cmd |= NVRAM_CMD_FIRST;
3459                         else if (j == (pagesize - 4))
3460                                 nvram_cmd |= NVRAM_CMD_LAST;
3461
3462                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3463                         if (ret)
3464                                 break;
3465                 }
3466                 if (ret)
3467                         break;
3468         }
3469
3470         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3471         tg3_nvram_exec_cmd(tp, nvram_cmd);
3472
3473         kfree(tmp);
3474
3475         return ret;
3476 }
3477
3478 /* offset and length are dword aligned */
3479 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3480                 u8 *buf)
3481 {
3482         int i, ret = 0;
3483
3484         for (i = 0; i < len; i += 4, offset += 4) {
3485                 u32 page_off, phy_addr, nvram_cmd;
3486                 __be32 data;
3487
3488                 memcpy(&data, buf + i, 4);
3489                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3490
3491                 page_off = offset % tp->nvram_pagesize;
3492
3493                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3494
3495                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3496
3497                 if (page_off == 0 || i == 0)
3498                         nvram_cmd |= NVRAM_CMD_FIRST;
3499                 if (page_off == (tp->nvram_pagesize - 4))
3500                         nvram_cmd |= NVRAM_CMD_LAST;
3501
3502                 if (i == (len - 4))
3503                         nvram_cmd |= NVRAM_CMD_LAST;
3504
3505                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3506                     !tg3_flag(tp, FLASH) ||
3507                     !tg3_flag(tp, 57765_PLUS))
3508                         tw32(NVRAM_ADDR, phy_addr);
3509
3510                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3511                     !tg3_flag(tp, 5755_PLUS) &&
3512                     (tp->nvram_jedecnum == JEDEC_ST) &&
3513                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3514                         u32 cmd;
3515
3516                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3517                         ret = tg3_nvram_exec_cmd(tp, cmd);
3518                         if (ret)
3519                                 break;
3520                 }
3521                 if (!tg3_flag(tp, FLASH)) {
3522                         /* We always do complete word writes to eeprom. */
3523                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3524                 }
3525
3526                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3527                 if (ret)
3528                         break;
3529         }
3530         return ret;
3531 }
3532
3533 /* offset and length are dword aligned */
3534 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3535 {
3536         int ret;
3537
3538         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3539                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3540                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3541                 udelay(40);
3542         }
3543
3544         if (!tg3_flag(tp, NVRAM)) {
3545                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3546         } else {
3547                 u32 grc_mode;
3548
3549                 ret = tg3_nvram_lock(tp);
3550                 if (ret)
3551                         return ret;
3552
3553                 tg3_enable_nvram_access(tp);
3554                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3555                         tw32(NVRAM_WRITE1, 0x406);
3556
3557                 grc_mode = tr32(GRC_MODE);
3558                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3559
3560                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3561                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3562                                 buf);
3563                 } else {
3564                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3565                                 buf);
3566                 }
3567
3568                 grc_mode = tr32(GRC_MODE);
3569                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3570
3571                 tg3_disable_nvram_access(tp);
3572                 tg3_nvram_unlock(tp);
3573         }
3574
3575         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3576                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3577                 udelay(40);
3578         }
3579
3580         return ret;
3581 }
3582
3583 #define RX_CPU_SCRATCH_BASE     0x30000
3584 #define RX_CPU_SCRATCH_SIZE     0x04000
3585 #define TX_CPU_SCRATCH_BASE     0x34000
3586 #define TX_CPU_SCRATCH_SIZE     0x04000
3587
3588 /* tp->lock is held. */
3589 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3590 {
3591         int i;
3592         const int iters = 10000;
3593
3594         for (i = 0; i < iters; i++) {
3595                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3596                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3597                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3598                         break;
3599                 if (pci_channel_offline(tp->pdev))
3600                         return -EBUSY;
3601         }
3602
3603         return (i == iters) ? -EBUSY : 0;
3604 }
3605
3606 /* tp->lock is held. */
3607 static int tg3_rxcpu_pause(struct tg3 *tp)
3608 {
3609         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3610
3611         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3612         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3613         udelay(10);
3614
3615         return rc;
3616 }
3617
3618 /* tp->lock is held. */
3619 static int tg3_txcpu_pause(struct tg3 *tp)
3620 {
3621         return tg3_pause_cpu(tp, TX_CPU_BASE);
3622 }
3623
3624 /* tp->lock is held. */
3625 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3626 {
3627         tw32(cpu_base + CPU_STATE, 0xffffffff);
3628         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3629 }
3630
3631 /* tp->lock is held. */
3632 static void tg3_rxcpu_resume(struct tg3 *tp)
3633 {
3634         tg3_resume_cpu(tp, RX_CPU_BASE);
3635 }
3636
3637 /* tp->lock is held. */
3638 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3639 {
3640         int rc;
3641
3642         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3643
3644         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3645                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3646
3647                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3648                 return 0;
3649         }
3650         if (cpu_base == RX_CPU_BASE) {
3651                 rc = tg3_rxcpu_pause(tp);
3652         } else {
3653                 /*
3654                  * There is only an Rx CPU for the 5750 derivative in the
3655                  * BCM4785.
3656                  */
3657                 if (tg3_flag(tp, IS_SSB_CORE))
3658                         return 0;
3659
3660                 rc = tg3_txcpu_pause(tp);
3661         }
3662
3663         if (rc) {
3664                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3665                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3666                 return -ENODEV;
3667         }
3668
3669         /* Clear firmware's nvram arbitration. */
3670         if (tg3_flag(tp, NVRAM))
3671                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3672         return 0;
3673 }
3674
3675 static int tg3_fw_data_len(struct tg3 *tp,
3676                            const struct tg3_firmware_hdr *fw_hdr)
3677 {
3678         int fw_len;
3679
3680         /* Non fragmented firmware have one firmware header followed by a
3681          * contiguous chunk of data to be written. The length field in that
3682          * header is not the length of data to be written but the complete
3683          * length of the bss. The data length is determined based on
3684          * tp->fw->size minus headers.
3685          *
3686          * Fragmented firmware have a main header followed by multiple
3687          * fragments. Each fragment is identical to non fragmented firmware
3688          * with a firmware header followed by a contiguous chunk of data. In
3689          * the main header, the length field is unused and set to 0xffffffff.
3690          * In each fragment header the length is the entire size of that
3691          * fragment i.e. fragment data + header length. Data length is
3692          * therefore length field in the header minus TG3_FW_HDR_LEN.
3693          */
3694         if (tp->fw_len == 0xffffffff)
3695                 fw_len = be32_to_cpu(fw_hdr->len);
3696         else
3697                 fw_len = tp->fw->size;
3698
3699         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3700 }
3701
3702 /* tp->lock is held. */
3703 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3704                                  u32 cpu_scratch_base, int cpu_scratch_size,
3705                                  const struct tg3_firmware_hdr *fw_hdr)
3706 {
3707         int err, i;
3708         void (*write_op)(struct tg3 *, u32, u32);
3709         int total_len = tp->fw->size;
3710
3711         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3712                 netdev_err(tp->dev,
3713                            "%s: Trying to load TX cpu firmware which is 5705\n",
3714                            __func__);
3715                 return -EINVAL;
3716         }
3717
3718         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3719                 write_op = tg3_write_mem;
3720         else
3721                 write_op = tg3_write_indirect_reg32;
3722
3723         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3724                 /* It is possible that bootcode is still loading at this point.
3725                  * Get the nvram lock first before halting the cpu.
3726                  */
3727                 int lock_err = tg3_nvram_lock(tp);
3728                 err = tg3_halt_cpu(tp, cpu_base);
3729                 if (!lock_err)
3730                         tg3_nvram_unlock(tp);
3731                 if (err)
3732                         goto out;
3733
3734                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3735                         write_op(tp, cpu_scratch_base + i, 0);
3736                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737                 tw32(cpu_base + CPU_MODE,
3738                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3739         } else {
3740                 /* Subtract additional main header for fragmented firmware and
3741                  * advance to the first fragment
3742                  */
3743                 total_len -= TG3_FW_HDR_LEN;
3744                 fw_hdr++;
3745         }
3746
3747         do {
3748                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3749                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3750                         write_op(tp, cpu_scratch_base +
3751                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3752                                      (i * sizeof(u32)),
3753                                  be32_to_cpu(fw_data[i]));
3754
3755                 total_len -= be32_to_cpu(fw_hdr->len);
3756
3757                 /* Advance to next fragment */
3758                 fw_hdr = (struct tg3_firmware_hdr *)
3759                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3760         } while (total_len > 0);
3761
3762         err = 0;
3763
3764 out:
3765         return err;
3766 }
3767
3768 /* tp->lock is held. */
3769 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3770 {
3771         int i;
3772         const int iters = 5;
3773
3774         tw32(cpu_base + CPU_STATE, 0xffffffff);
3775         tw32_f(cpu_base + CPU_PC, pc);
3776
3777         for (i = 0; i < iters; i++) {
3778                 if (tr32(cpu_base + CPU_PC) == pc)
3779                         break;
3780                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3781                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3782                 tw32_f(cpu_base + CPU_PC, pc);
3783                 udelay(1000);
3784         }
3785
3786         return (i == iters) ? -EBUSY : 0;
3787 }
3788
3789 /* tp->lock is held. */
3790 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3791 {
3792         const struct tg3_firmware_hdr *fw_hdr;
3793         int err;
3794
3795         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3796
3797         /* Firmware blob starts with version numbers, followed by
3798            start address and length. We are setting complete length.
3799            length = end_address_of_bss - start_address_of_text.
3800            Remainder is the blob to be loaded contiguously
3801            from start address. */
3802
3803         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3804                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3805                                     fw_hdr);
3806         if (err)
3807                 return err;
3808
3809         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3810                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3811                                     fw_hdr);
3812         if (err)
3813                 return err;
3814
3815         /* Now startup only the RX cpu. */
3816         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3817                                        be32_to_cpu(fw_hdr->base_addr));
3818         if (err) {
3819                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3820                            "should be %08x\n", __func__,
3821                            tr32(RX_CPU_BASE + CPU_PC),
3822                                 be32_to_cpu(fw_hdr->base_addr));
3823                 return -ENODEV;
3824         }
3825
3826         tg3_rxcpu_resume(tp);
3827
3828         return 0;
3829 }
3830
3831 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3832 {
3833         const int iters = 1000;
3834         int i;
3835         u32 val;
3836
3837         /* Wait for boot code to complete initialization and enter service
3838          * loop. It is then safe to download service patches
3839          */
3840         for (i = 0; i < iters; i++) {
3841                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3842                         break;
3843
3844                 udelay(10);
3845         }
3846
3847         if (i == iters) {
3848                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3849                 return -EBUSY;
3850         }
3851
3852         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3853         if (val & 0xff) {
3854                 netdev_warn(tp->dev,
3855                             "Other patches exist. Not downloading EEE patch\n");
3856                 return -EEXIST;
3857         }
3858
3859         return 0;
3860 }
3861
3862 /* tp->lock is held. */
3863 static void tg3_load_57766_firmware(struct tg3 *tp)
3864 {
3865         struct tg3_firmware_hdr *fw_hdr;
3866
3867         if (!tg3_flag(tp, NO_NVRAM))
3868                 return;
3869
3870         if (tg3_validate_rxcpu_state(tp))
3871                 return;
3872
3873         if (!tp->fw)
3874                 return;
3875
3876         /* This firmware blob has a different format than older firmware
3877          * releases as given below. The main difference is we have fragmented
3878          * data to be written to non-contiguous locations.
3879          *
3880          * In the beginning we have a firmware header identical to other
3881          * firmware which consists of version, base addr and length. The length
3882          * here is unused and set to 0xffffffff.
3883          *
3884          * This is followed by a series of firmware fragments which are
3885          * individually identical to previous firmware. i.e. they have the
3886          * firmware header and followed by data for that fragment. The version
3887          * field of the individual fragment header is unused.
3888          */
3889
3890         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3891         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3892                 return;
3893
3894         if (tg3_rxcpu_pause(tp))
3895                 return;
3896
3897         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3898         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3899
3900         tg3_rxcpu_resume(tp);
3901 }
3902
3903 /* tp->lock is held. */
3904 static int tg3_load_tso_firmware(struct tg3 *tp)
3905 {
3906         const struct tg3_firmware_hdr *fw_hdr;
3907         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3908         int err;
3909
3910         if (!tg3_flag(tp, FW_TSO))
3911                 return 0;
3912
3913         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3914
3915         /* Firmware blob starts with version numbers, followed by
3916            start address and length. We are setting complete length.
3917            length = end_address_of_bss - start_address_of_text.
3918            Remainder is the blob to be loaded contiguously
3919            from start address. */
3920
3921         cpu_scratch_size = tp->fw_len;
3922
3923         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3924                 cpu_base = RX_CPU_BASE;
3925                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3926         } else {
3927                 cpu_base = TX_CPU_BASE;
3928                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3929                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3930         }
3931
3932         err = tg3_load_firmware_cpu(tp, cpu_base,
3933                                     cpu_scratch_base, cpu_scratch_size,
3934                                     fw_hdr);
3935         if (err)
3936                 return err;
3937
3938         /* Now startup the cpu. */
3939         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3940                                        be32_to_cpu(fw_hdr->base_addr));
3941         if (err) {
3942                 netdev_err(tp->dev,
3943                            "%s fails to set CPU PC, is %08x should be %08x\n",
3944                            __func__, tr32(cpu_base + CPU_PC),
3945                            be32_to_cpu(fw_hdr->base_addr));
3946                 return -ENODEV;
3947         }
3948
3949         tg3_resume_cpu(tp, cpu_base);
3950         return 0;
3951 }
3952
3953 /* tp->lock is held. */
3954 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3955 {
3956         u32 addr_high, addr_low;
3957
3958         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3959         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3960                     (mac_addr[4] <<  8) | mac_addr[5]);
3961
3962         if (index < 4) {
3963                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3964                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3965         } else {
3966                 index -= 4;
3967                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3968                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3969         }
3970 }
3971
3972 /* tp->lock is held. */
3973 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3974 {
3975         u32 addr_high;
3976         int i;
3977
3978         for (i = 0; i < 4; i++) {
3979                 if (i == 1 && skip_mac_1)
3980                         continue;
3981                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982         }
3983
3984         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3985             tg3_asic_rev(tp) == ASIC_REV_5704) {
3986                 for (i = 4; i < 16; i++)
3987                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3988         }
3989
3990         addr_high = (tp->dev->dev_addr[0] +
3991                      tp->dev->dev_addr[1] +
3992                      tp->dev->dev_addr[2] +
3993                      tp->dev->dev_addr[3] +
3994                      tp->dev->dev_addr[4] +
3995                      tp->dev->dev_addr[5]) &
3996                 TX_BACKOFF_SEED_MASK;
3997         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3998 }
3999
4000 static void tg3_enable_register_access(struct tg3 *tp)
4001 {
4002         /*
4003          * Make sure register accesses (indirect or otherwise) will function
4004          * correctly.
4005          */
4006         pci_write_config_dword(tp->pdev,
4007                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4008 }
4009
4010 static int tg3_power_up(struct tg3 *tp)
4011 {
4012         int err;
4013
4014         tg3_enable_register_access(tp);
4015
4016         err = pci_set_power_state(tp->pdev, PCI_D0);
4017         if (!err) {
4018                 /* Switch out of Vaux if it is a NIC */
4019                 tg3_pwrsrc_switch_to_vmain(tp);
4020         } else {
4021                 netdev_err(tp->dev, "Transition to D0 failed\n");
4022         }
4023
4024         return err;
4025 }
4026
4027 static int tg3_setup_phy(struct tg3 *, bool);
4028
4029 static int tg3_power_down_prepare(struct tg3 *tp)
4030 {
4031         u32 misc_host_ctrl;
4032         bool device_should_wake, do_low_power;
4033
4034         tg3_enable_register_access(tp);
4035
4036         /* Restore the CLKREQ setting. */
4037         if (tg3_flag(tp, CLKREQ_BUG))
4038                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4039                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4040
4041         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4042         tw32(TG3PCI_MISC_HOST_CTRL,
4043              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4044
4045         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4046                              tg3_flag(tp, WOL_ENABLE);
4047
4048         if (tg3_flag(tp, USE_PHYLIB)) {
4049                 do_low_power = false;
4050                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4051                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4052                         struct phy_device *phydev;
4053                         u32 phyid, advertising;
4054
4055                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4056
4057                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4058
4059                         tp->link_config.speed = phydev->speed;
4060                         tp->link_config.duplex = phydev->duplex;
4061                         tp->link_config.autoneg = phydev->autoneg;
4062                         tp->link_config.advertising = phydev->advertising;
4063
4064                         advertising = ADVERTISED_TP |
4065                                       ADVERTISED_Pause |
4066                                       ADVERTISED_Autoneg |
4067                                       ADVERTISED_10baseT_Half;
4068
4069                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4071                                         advertising |=
4072                                                 ADVERTISED_100baseT_Half |
4073                                                 ADVERTISED_100baseT_Full |
4074                                                 ADVERTISED_10baseT_Full;
4075                                 else
4076                                         advertising |= ADVERTISED_10baseT_Full;
4077                         }
4078
4079                         phydev->advertising = advertising;
4080
4081                         phy_start_aneg(phydev);
4082
4083                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4084                         if (phyid != PHY_ID_BCMAC131) {
4085                                 phyid &= PHY_BCM_OUI_MASK;
4086                                 if (phyid == PHY_BCM_OUI_1 ||
4087                                     phyid == PHY_BCM_OUI_2 ||
4088                                     phyid == PHY_BCM_OUI_3)
4089                                         do_low_power = true;
4090                         }
4091                 }
4092         } else {
4093                 do_low_power = true;
4094
4095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4096                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4097
4098                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4099                         tg3_setup_phy(tp, false);
4100         }
4101
4102         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4103                 u32 val;
4104
4105                 val = tr32(GRC_VCPU_EXT_CTRL);
4106                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4107         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4108                 int i;
4109                 u32 val;
4110
4111                 for (i = 0; i < 200; i++) {
4112                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4113                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4114                                 break;
4115                         msleep(1);
4116                 }
4117         }
4118         if (tg3_flag(tp, WOL_CAP))
4119                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4120                                                      WOL_DRV_STATE_SHUTDOWN |
4121                                                      WOL_DRV_WOL |
4122                                                      WOL_SET_MAGIC_PKT);
4123
4124         if (device_should_wake) {
4125                 u32 mac_mode;
4126
4127                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4128                         if (do_low_power &&
4129                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4130                                 tg3_phy_auxctl_write(tp,
4131                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4132                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4133                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4134                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4135                                 udelay(40);
4136                         }
4137
4138                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4139                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140                         else if (tp->phy_flags &
4141                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4142                                 if (tp->link_config.active_speed == SPEED_1000)
4143                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4144                                 else
4145                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4146                         } else
4147                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4148
4149                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4150                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4151                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4152                                              SPEED_100 : SPEED_10;
4153                                 if (tg3_5700_link_polarity(tp, speed))
4154                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4155                                 else
4156                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4157                         }
4158                 } else {
4159                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4160                 }
4161
4162                 if (!tg3_flag(tp, 5750_PLUS))
4163                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4164
4165                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4166                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4167                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4168                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4169
4170                 if (tg3_flag(tp, ENABLE_APE))
4171                         mac_mode |= MAC_MODE_APE_TX_EN |
4172                                     MAC_MODE_APE_RX_EN |
4173                                     MAC_MODE_TDE_ENABLE;
4174
4175                 tw32_f(MAC_MODE, mac_mode);
4176                 udelay(100);
4177
4178                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4179                 udelay(10);
4180         }
4181
4182         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4183             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4185                 u32 base_val;
4186
4187                 base_val = tp->pci_clock_ctrl;
4188                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4189                              CLOCK_CTRL_TXCLK_DISABLE);
4190
4191                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4192                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4193         } else if (tg3_flag(tp, 5780_CLASS) ||
4194                    tg3_flag(tp, CPMU_PRESENT) ||
4195                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4196                 /* do nothing */
4197         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4198                 u32 newbits1, newbits2;
4199
4200                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4201                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4202                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4203                                     CLOCK_CTRL_TXCLK_DISABLE |
4204                                     CLOCK_CTRL_ALTCLK);
4205                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206                 } else if (tg3_flag(tp, 5705_PLUS)) {
4207                         newbits1 = CLOCK_CTRL_625_CORE;
4208                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4209                 } else {
4210                         newbits1 = CLOCK_CTRL_ALTCLK;
4211                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212                 }
4213
4214                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4215                             40);
4216
4217                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4218                             40);
4219
4220                 if (!tg3_flag(tp, 5705_PLUS)) {
4221                         u32 newbits3;
4222
4223                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4224                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4225                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4226                                             CLOCK_CTRL_TXCLK_DISABLE |
4227                                             CLOCK_CTRL_44MHZ_CORE);
4228                         } else {
4229                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4230                         }
4231
4232                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4233                                     tp->pci_clock_ctrl | newbits3, 40);
4234                 }
4235         }
4236
4237         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4238                 tg3_power_down_phy(tp, do_low_power);
4239
4240         tg3_frob_aux_power(tp, true);
4241
4242         /* Workaround for unstable PLL clock */
4243         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4244             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4245              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4246                 u32 val = tr32(0x7d00);
4247
4248                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4249                 tw32(0x7d00, val);
4250                 if (!tg3_flag(tp, ENABLE_ASF)) {
4251                         int err;
4252
4253                         err = tg3_nvram_lock(tp);
4254                         tg3_halt_cpu(tp, RX_CPU_BASE);
4255                         if (!err)
4256                                 tg3_nvram_unlock(tp);
4257                 }
4258         }
4259
4260         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4261
4262         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4263
4264         return 0;
4265 }
4266
4267 static void tg3_power_down(struct tg3 *tp)
4268 {
4269         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4270         pci_set_power_state(tp->pdev, PCI_D3hot);
4271 }
4272
4273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4274 {
4275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4276         case MII_TG3_AUX_STAT_10HALF:
4277                 *speed = SPEED_10;
4278                 *duplex = DUPLEX_HALF;
4279                 break;
4280
4281         case MII_TG3_AUX_STAT_10FULL:
4282                 *speed = SPEED_10;
4283                 *duplex = DUPLEX_FULL;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_100HALF:
4287                 *speed = SPEED_100;
4288                 *duplex = DUPLEX_HALF;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_100FULL:
4292                 *speed = SPEED_100;
4293                 *duplex = DUPLEX_FULL;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_1000HALF:
4297                 *speed = SPEED_1000;
4298                 *duplex = DUPLEX_HALF;
4299                 break;
4300
4301         case MII_TG3_AUX_STAT_1000FULL:
4302                 *speed = SPEED_1000;
4303                 *duplex = DUPLEX_FULL;
4304                 break;
4305
4306         default:
4307                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4308                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4309                                  SPEED_10;
4310                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4311                                   DUPLEX_HALF;
4312                         break;
4313                 }
4314                 *speed = SPEED_UNKNOWN;
4315                 *duplex = DUPLEX_UNKNOWN;
4316                 break;
4317         }
4318 }
4319
4320 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4321 {
4322         int err = 0;
4323         u32 val, new_adv;
4324
4325         new_adv = ADVERTISE_CSMA;
4326         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4327         new_adv |= mii_advertise_flowctrl(flowctrl);
4328
4329         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4330         if (err)
4331                 goto done;
4332
4333         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4334                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4335
4336                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4337                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4338                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4339
4340                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4341                 if (err)
4342                         goto done;
4343         }
4344
4345         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4346                 goto done;
4347
4348         tw32(TG3_CPMU_EEE_MODE,
4349              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4350
4351         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4352         if (!err) {
4353                 u32 err2;
4354
4355                 val = 0;
4356                 /* Advertise 100-BaseTX EEE ability */
4357                 if (advertise & ADVERTISED_100baseT_Full)
4358                         val |= MDIO_AN_EEE_ADV_100TX;
4359                 /* Advertise 1000-BaseT EEE ability */
4360                 if (advertise & ADVERTISED_1000baseT_Full)
4361                         val |= MDIO_AN_EEE_ADV_1000T;
4362
4363                 if (!tp->eee.eee_enabled) {
4364                         val = 0;
4365                         tp->eee.advertised = 0;
4366                 } else {
4367                         tp->eee.advertised = advertise &
4368                                              (ADVERTISED_100baseT_Full |
4369                                               ADVERTISED_1000baseT_Full);
4370                 }
4371
4372                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4373                 if (err)
4374                         val = 0;
4375
4376                 switch (tg3_asic_rev(tp)) {
4377                 case ASIC_REV_5717:
4378                 case ASIC_REV_57765:
4379                 case ASIC_REV_57766:
4380                 case ASIC_REV_5719:
4381                         /* If we advertised any eee advertisements above... */
4382                         if (val)
4383                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4384                                       MII_TG3_DSP_TAP26_RMRXSTO |
4385                                       MII_TG3_DSP_TAP26_OPCSINPT;
4386                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4387                         /* Fall through */
4388                 case ASIC_REV_5720:
4389                 case ASIC_REV_5762:
4390                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4391                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4392                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4393                 }
4394
4395                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4396                 if (!err)
4397                         err = err2;
4398         }
4399
4400 done:
4401         return err;
4402 }
4403
4404 static void tg3_phy_copper_begin(struct tg3 *tp)
4405 {
4406         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4407             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4408                 u32 adv, fc;
4409
4410                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4411                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4412                         adv = ADVERTISED_10baseT_Half |
4413                               ADVERTISED_10baseT_Full;
4414                         if (tg3_flag(tp, WOL_SPEED_100MB))
4415                                 adv |= ADVERTISED_100baseT_Half |
4416                                        ADVERTISED_100baseT_Full;
4417                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4418                                 if (!(tp->phy_flags &
4419                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4420                                         adv |= ADVERTISED_1000baseT_Half;
4421                                 adv |= ADVERTISED_1000baseT_Full;
4422                         }
4423
4424                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4425                 } else {
4426                         adv = tp->link_config.advertising;
4427                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4428                                 adv &= ~(ADVERTISED_1000baseT_Half |
4429                                          ADVERTISED_1000baseT_Full);
4430
4431                         fc = tp->link_config.flowctrl;
4432                 }
4433
4434                 tg3_phy_autoneg_cfg(tp, adv, fc);
4435
4436                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4437                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4438                         /* Normally during power down we want to autonegotiate
4439                          * the lowest possible speed for WOL. However, to avoid
4440                          * link flap, we leave it untouched.
4441                          */
4442                         return;
4443                 }
4444
4445                 tg3_writephy(tp, MII_BMCR,
4446                              BMCR_ANENABLE | BMCR_ANRESTART);
4447         } else {
4448                 int i;
4449                 u32 bmcr, orig_bmcr;
4450
4451                 tp->link_config.active_speed = tp->link_config.speed;
4452                 tp->link_config.active_duplex = tp->link_config.duplex;
4453
4454                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4455                         /* With autoneg disabled, 5715 only links up when the
4456                          * advertisement register has the configured speed
4457                          * enabled.
4458                          */
4459                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4460                 }
4461
4462                 bmcr = 0;
4463                 switch (tp->link_config.speed) {
4464                 default:
4465                 case SPEED_10:
4466                         break;
4467
4468                 case SPEED_100:
4469                         bmcr |= BMCR_SPEED100;
4470                         break;
4471
4472                 case SPEED_1000:
4473                         bmcr |= BMCR_SPEED1000;
4474                         break;
4475                 }
4476
4477                 if (tp->link_config.duplex == DUPLEX_FULL)
4478                         bmcr |= BMCR_FULLDPLX;
4479
4480                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4481                     (bmcr != orig_bmcr)) {
4482                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4483                         for (i = 0; i < 1500; i++) {
4484                                 u32 tmp;
4485
4486                                 udelay(10);
4487                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4488                                     tg3_readphy(tp, MII_BMSR, &tmp))
4489                                         continue;
4490                                 if (!(tmp & BMSR_LSTATUS)) {
4491                                         udelay(40);
4492                                         break;
4493                                 }
4494                         }
4495                         tg3_writephy(tp, MII_BMCR, bmcr);
4496                         udelay(40);
4497                 }
4498         }
4499 }
4500
4501 static int tg3_phy_pull_config(struct tg3 *tp)
4502 {
4503         int err;
4504         u32 val;
4505
4506         err = tg3_readphy(tp, MII_BMCR, &val);
4507         if (err)
4508                 goto done;
4509
4510         if (!(val & BMCR_ANENABLE)) {
4511                 tp->link_config.autoneg = AUTONEG_DISABLE;
4512                 tp->link_config.advertising = 0;
4513                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4514
4515                 err = -EIO;
4516
4517                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4518                 case 0:
4519                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520                                 goto done;
4521
4522                         tp->link_config.speed = SPEED_10;
4523                         break;
4524                 case BMCR_SPEED100:
4525                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526                                 goto done;
4527
4528                         tp->link_config.speed = SPEED_100;
4529                         break;
4530                 case BMCR_SPEED1000:
4531                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4532                                 tp->link_config.speed = SPEED_1000;
4533                                 break;
4534                         }
4535                         /* Fall through */
4536                 default:
4537                         goto done;
4538                 }
4539
4540                 if (val & BMCR_FULLDPLX)
4541                         tp->link_config.duplex = DUPLEX_FULL;
4542                 else
4543                         tp->link_config.duplex = DUPLEX_HALF;
4544
4545                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4546
4547                 err = 0;
4548                 goto done;
4549         }
4550
4551         tp->link_config.autoneg = AUTONEG_ENABLE;
4552         tp->link_config.advertising = ADVERTISED_Autoneg;
4553         tg3_flag_set(tp, PAUSE_AUTONEG);
4554
4555         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4556                 u32 adv;
4557
4558                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4559                 if (err)
4560                         goto done;
4561
4562                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4563                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4564
4565                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4566         } else {
4567                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4568         }
4569
4570         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4571                 u32 adv;
4572
4573                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4574                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4575                         if (err)
4576                                 goto done;
4577
4578                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4579                 } else {
4580                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4581                         if (err)
4582                                 goto done;
4583
4584                         adv = tg3_decode_flowctrl_1000X(val);
4585                         tp->link_config.flowctrl = adv;
4586
4587                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4588                         adv = mii_adv_to_ethtool_adv_x(val);
4589                 }
4590
4591                 tp->link_config.advertising |= adv;
4592         }
4593
4594 done:
4595         return err;
4596 }
4597
4598 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4599 {
4600         int err;
4601
4602         /* Turn off tap power management. */
4603         /* Set Extended packet length bit */
4604         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4605
4606         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4607         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4608         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4609         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4610         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4611
4612         udelay(40);
4613
4614         return err;
4615 }
4616
4617 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4618 {
4619         struct ethtool_eee eee;
4620
4621         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4622                 return true;
4623
4624         tg3_eee_pull_config(tp, &eee);
4625
4626         if (tp->eee.eee_enabled) {
4627                 if (tp->eee.advertised != eee.advertised ||
4628                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4629                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4630                         return false;
4631         } else {
4632                 /* EEE is disabled but we're advertising */
4633                 if (eee.advertised)
4634                         return false;
4635         }
4636
4637         return true;
4638 }
4639
4640 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4641 {
4642         u32 advmsk, tgtadv, advertising;
4643
4644         advertising = tp->link_config.advertising;
4645         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4646
4647         advmsk = ADVERTISE_ALL;
4648         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4649                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4650                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4651         }
4652
4653         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4654                 return false;
4655
4656         if ((*lcladv & advmsk) != tgtadv)
4657                 return false;
4658
4659         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4660                 u32 tg3_ctrl;
4661
4662                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4663
4664                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4665                         return false;
4666
4667                 if (tgtadv &&
4668                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4669                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4670                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4671                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4672                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4673                 } else {
4674                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4675                 }
4676
4677                 if (tg3_ctrl != tgtadv)
4678                         return false;
4679         }
4680
4681         return true;
4682 }
4683
4684 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4685 {
4686         u32 lpeth = 0;
4687
4688         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4689                 u32 val;
4690
4691                 if (tg3_readphy(tp, MII_STAT1000, &val))
4692                         return false;
4693
4694                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4695         }
4696
4697         if (tg3_readphy(tp, MII_LPA, rmtadv))
4698                 return false;
4699
4700         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4701         tp->link_config.rmt_adv = lpeth;
4702
4703         return true;
4704 }
4705
4706 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4707 {
4708         if (curr_link_up != tp->link_up) {
4709                 if (curr_link_up) {
4710                         netif_carrier_on(tp->dev);
4711                 } else {
4712                         netif_carrier_off(tp->dev);
4713                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4714                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4715                 }
4716
4717                 tg3_link_report(tp);
4718                 return true;
4719         }
4720
4721         return false;
4722 }
4723
4724 static void tg3_clear_mac_status(struct tg3 *tp)
4725 {
4726         tw32(MAC_EVENT, 0);
4727
4728         tw32_f(MAC_STATUS,
4729                MAC_STATUS_SYNC_CHANGED |
4730                MAC_STATUS_CFG_CHANGED |
4731                MAC_STATUS_MI_COMPLETION |
4732                MAC_STATUS_LNKSTATE_CHANGED);
4733         udelay(40);
4734 }
4735
4736 static void tg3_setup_eee(struct tg3 *tp)
4737 {
4738         u32 val;
4739
4740         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4741               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4742         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4743                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4744
4745         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4746
4747         tw32_f(TG3_CPMU_EEE_CTRL,
4748                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4749
4750         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4751               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4752               TG3_CPMU_EEEMD_LPI_IN_RX |
4753               TG3_CPMU_EEEMD_EEE_ENABLE;
4754
4755         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4756                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4757
4758         if (tg3_flag(tp, ENABLE_APE))
4759                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4760
4761         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4762
4763         tw32_f(TG3_CPMU_EEE_DBTMR1,
4764                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4765                (tp->eee.tx_lpi_timer & 0xffff));
4766
4767         tw32_f(TG3_CPMU_EEE_DBTMR2,
4768                TG3_CPMU_DBTMR2_APE_TX_2047US |
4769                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4770 }
4771
4772 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4773 {
4774         bool current_link_up;
4775         u32 bmsr, val;
4776         u32 lcl_adv, rmt_adv;
4777         u16 current_speed;
4778         u8 current_duplex;
4779         int i, err;
4780
4781         tg3_clear_mac_status(tp);
4782
4783         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4784                 tw32_f(MAC_MI_MODE,
4785                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4786                 udelay(80);
4787         }
4788
4789         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4790
4791         /* Some third-party PHYs need to be reset on link going
4792          * down.
4793          */
4794         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4795              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4796              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4797             tp->link_up) {
4798                 tg3_readphy(tp, MII_BMSR, &bmsr);
4799                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4800                     !(bmsr & BMSR_LSTATUS))
4801                         force_reset = true;
4802         }
4803         if (force_reset)
4804                 tg3_phy_reset(tp);
4805
4806         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4807                 tg3_readphy(tp, MII_BMSR, &bmsr);
4808                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4809                     !tg3_flag(tp, INIT_COMPLETE))
4810                         bmsr = 0;
4811
4812                 if (!(bmsr & BMSR_LSTATUS)) {
4813                         err = tg3_init_5401phy_dsp(tp);
4814                         if (err)
4815                                 return err;
4816
4817                         tg3_readphy(tp, MII_BMSR, &bmsr);
4818                         for (i = 0; i < 1000; i++) {
4819                                 udelay(10);
4820                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4821                                     (bmsr & BMSR_LSTATUS)) {
4822                                         udelay(40);
4823                                         break;
4824                                 }
4825                         }
4826
4827                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4828                             TG3_PHY_REV_BCM5401_B0 &&
4829                             !(bmsr & BMSR_LSTATUS) &&
4830                             tp->link_config.active_speed == SPEED_1000) {
4831                                 err = tg3_phy_reset(tp);
4832                                 if (!err)
4833                                         err = tg3_init_5401phy_dsp(tp);
4834                                 if (err)
4835                                         return err;
4836                         }
4837                 }
4838         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4839                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4840                 /* 5701 {A0,B0} CRC bug workaround */
4841                 tg3_writephy(tp, 0x15, 0x0a75);
4842                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4843                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845         }
4846
4847         /* Clear pending interrupts... */
4848         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850
4851         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4852                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4853         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4854                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4855
4856         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4857             tg3_asic_rev(tp) == ASIC_REV_5701) {
4858                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4859                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4860                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4861                 else
4862                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4863         }
4864
4865         current_link_up = false;
4866         current_speed = SPEED_UNKNOWN;
4867         current_duplex = DUPLEX_UNKNOWN;
4868         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4869         tp->link_config.rmt_adv = 0;
4870
4871         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4872                 err = tg3_phy_auxctl_read(tp,
4873                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874                                           &val);
4875                 if (!err && !(val & (1 << 10))) {
4876                         tg3_phy_auxctl_write(tp,
4877                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4878                                              val | (1 << 10));
4879                         goto relink;
4880                 }
4881         }
4882
4883         bmsr = 0;
4884         for (i = 0; i < 100; i++) {
4885                 tg3_readphy(tp, MII_BMSR, &bmsr);
4886                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4887                     (bmsr & BMSR_LSTATUS))
4888                         break;
4889                 udelay(40);
4890         }
4891
4892         if (bmsr & BMSR_LSTATUS) {
4893                 u32 aux_stat, bmcr;
4894
4895                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4896                 for (i = 0; i < 2000; i++) {
4897                         udelay(10);
4898                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4899                             aux_stat)
4900                                 break;
4901                 }
4902
4903                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4904                                              &current_speed,
4905                                              &current_duplex);
4906
4907                 bmcr = 0;
4908                 for (i = 0; i < 200; i++) {
4909                         tg3_readphy(tp, MII_BMCR, &bmcr);
4910                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4911                                 continue;
4912                         if (bmcr && bmcr != 0x7fff)
4913                                 break;
4914                         udelay(10);
4915                 }
4916
4917                 lcl_adv = 0;
4918                 rmt_adv = 0;
4919
4920                 tp->link_config.active_speed = current_speed;
4921                 tp->link_config.active_duplex = current_duplex;
4922
4923                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4924                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4925
4926                         if ((bmcr & BMCR_ANENABLE) &&
4927                             eee_config_ok &&
4928                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4929                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4930                                 current_link_up = true;
4931
4932                         /* EEE settings changes take effect only after a phy
4933                          * reset.  If we have skipped a reset due to Link Flap
4934                          * Avoidance being enabled, do it now.
4935                          */
4936                         if (!eee_config_ok &&
4937                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4938                             !force_reset) {
4939                                 tg3_setup_eee(tp);
4940                                 tg3_phy_reset(tp);
4941                         }
4942                 } else {
4943                         if (!(bmcr & BMCR_ANENABLE) &&
4944                             tp->link_config.speed == current_speed &&
4945                             tp->link_config.duplex == current_duplex) {
4946                                 current_link_up = true;
4947                         }
4948                 }
4949
4950                 if (current_link_up &&
4951                     tp->link_config.active_duplex == DUPLEX_FULL) {
4952                         u32 reg, bit;
4953
4954                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4955                                 reg = MII_TG3_FET_GEN_STAT;
4956                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4957                         } else {
4958                                 reg = MII_TG3_EXT_STAT;
4959                                 bit = MII_TG3_EXT_STAT_MDIX;
4960                         }
4961
4962                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4963                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4964
4965                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4966                 }
4967         }
4968
4969 relink:
4970         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4971                 tg3_phy_copper_begin(tp);
4972
4973                 if (tg3_flag(tp, ROBOSWITCH)) {
4974                         current_link_up = true;
4975                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4976                         current_speed = SPEED_1000;
4977                         current_duplex = DUPLEX_FULL;
4978                         tp->link_config.active_speed = current_speed;
4979                         tp->link_config.active_duplex = current_duplex;
4980                 }
4981
4982                 tg3_readphy(tp, MII_BMSR, &bmsr);
4983                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4984                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4985                         current_link_up = true;
4986         }
4987
4988         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4989         if (current_link_up) {
4990                 if (tp->link_config.active_speed == SPEED_100 ||
4991                     tp->link_config.active_speed == SPEED_10)
4992                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993                 else
4994                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4996                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4997         else
4998                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4999
5000         /* In order for the 5750 core in BCM4785 chip to work properly
5001          * in RGMII mode, the Led Control Register must be set up.
5002          */
5003         if (tg3_flag(tp, RGMII_MODE)) {
5004                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5005                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5006
5007                 if (tp->link_config.active_speed == SPEED_10)
5008                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5009                 else if (tp->link_config.active_speed == SPEED_100)
5010                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011                                      LED_CTRL_100MBPS_ON);
5012                 else if (tp->link_config.active_speed == SPEED_1000)
5013                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014                                      LED_CTRL_1000MBPS_ON);
5015
5016                 tw32(MAC_LED_CTRL, led_ctrl);
5017                 udelay(40);
5018         }
5019
5020         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5021         if (tp->link_config.active_duplex == DUPLEX_HALF)
5022                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5023
5024         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5025                 if (current_link_up &&
5026                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5027                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5028                 else
5029                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5030         }
5031
5032         /* ??? Without this setting Netgear GA302T PHY does not
5033          * ??? send/receive packets...
5034          */
5035         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5036             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5037                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5039                 udelay(80);
5040         }
5041
5042         tw32_f(MAC_MODE, tp->mac_mode);
5043         udelay(40);
5044
5045         tg3_phy_eee_adjust(tp, current_link_up);
5046
5047         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5048                 /* Polled via timer. */
5049                 tw32_f(MAC_EVENT, 0);
5050         } else {
5051                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5052         }
5053         udelay(40);
5054
5055         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5056             current_link_up &&
5057             tp->link_config.active_speed == SPEED_1000 &&
5058             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5059                 udelay(120);
5060                 tw32_f(MAC_STATUS,
5061                      (MAC_STATUS_SYNC_CHANGED |
5062                       MAC_STATUS_CFG_CHANGED));
5063                 udelay(40);
5064                 tg3_write_mem(tp,
5065                               NIC_SRAM_FIRMWARE_MBOX,
5066                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5067         }
5068
5069         /* Prevent send BD corruption. */
5070         if (tg3_flag(tp, CLKREQ_BUG)) {
5071                 if (tp->link_config.active_speed == SPEED_100 ||
5072                     tp->link_config.active_speed == SPEED_10)
5073                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5074                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5075                 else
5076                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5077                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5078         }
5079
5080         tg3_test_and_report_link_chg(tp, current_link_up);
5081
5082         return 0;
5083 }
5084
5085 struct tg3_fiber_aneginfo {
5086         int state;
5087 #define ANEG_STATE_UNKNOWN              0
5088 #define ANEG_STATE_AN_ENABLE            1
5089 #define ANEG_STATE_RESTART_INIT         2
5090 #define ANEG_STATE_RESTART              3
5091 #define ANEG_STATE_DISABLE_LINK_OK      4
5092 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5093 #define ANEG_STATE_ABILITY_DETECT       6
5094 #define ANEG_STATE_ACK_DETECT_INIT      7
5095 #define ANEG_STATE_ACK_DETECT           8
5096 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5097 #define ANEG_STATE_COMPLETE_ACK         10
5098 #define ANEG_STATE_IDLE_DETECT_INIT     11
5099 #define ANEG_STATE_IDLE_DETECT          12
5100 #define ANEG_STATE_LINK_OK              13
5101 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5102 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5103
5104         u32 flags;
5105 #define MR_AN_ENABLE            0x00000001
5106 #define MR_RESTART_AN           0x00000002
5107 #define MR_AN_COMPLETE          0x00000004
5108 #define MR_PAGE_RX              0x00000008
5109 #define MR_NP_LOADED            0x00000010
5110 #define MR_TOGGLE_TX            0x00000020
5111 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5112 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5113 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5114 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5115 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5116 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5117 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5118 #define MR_TOGGLE_RX            0x00002000
5119 #define MR_NP_RX                0x00004000
5120
5121 #define MR_LINK_OK              0x80000000
5122
5123         unsigned long link_time, cur_time;
5124
5125         u32 ability_match_cfg;
5126         int ability_match_count;
5127
5128         char ability_match, idle_match, ack_match;
5129
5130         u32 txconfig, rxconfig;
5131 #define ANEG_CFG_NP             0x00000080
5132 #define ANEG_CFG_ACK            0x00000040
5133 #define ANEG_CFG_RF2            0x00000020
5134 #define ANEG_CFG_RF1            0x00000010
5135 #define ANEG_CFG_PS2            0x00000001
5136 #define ANEG_CFG_PS1            0x00008000
5137 #define ANEG_CFG_HD             0x00004000
5138 #define ANEG_CFG_FD             0x00002000
5139 #define ANEG_CFG_INVAL          0x00001f06
5140
5141 };
5142 #define ANEG_OK         0
5143 #define ANEG_DONE       1
5144 #define ANEG_TIMER_ENAB 2
5145 #define ANEG_FAILED     -1
5146
5147 #define ANEG_STATE_SETTLE_TIME  10000
5148
5149 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5150                                    struct tg3_fiber_aneginfo *ap)
5151 {
5152         u16 flowctrl;
5153         unsigned long delta;
5154         u32 rx_cfg_reg;
5155         int ret;
5156
5157         if (ap->state == ANEG_STATE_UNKNOWN) {
5158                 ap->rxconfig = 0;
5159                 ap->link_time = 0;
5160                 ap->cur_time = 0;
5161                 ap->ability_match_cfg = 0;
5162                 ap->ability_match_count = 0;
5163                 ap->ability_match = 0;
5164                 ap->idle_match = 0;
5165                 ap->ack_match = 0;
5166         }
5167         ap->cur_time++;
5168
5169         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5170                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5171
5172                 if (rx_cfg_reg != ap->ability_match_cfg) {
5173                         ap->ability_match_cfg = rx_cfg_reg;
5174                         ap->ability_match = 0;
5175                         ap->ability_match_count = 0;
5176                 } else {
5177                         if (++ap->ability_match_count > 1) {
5178                                 ap->ability_match = 1;
5179                                 ap->ability_match_cfg = rx_cfg_reg;
5180                         }
5181                 }
5182                 if (rx_cfg_reg & ANEG_CFG_ACK)
5183                         ap->ack_match = 1;
5184                 else
5185                         ap->ack_match = 0;
5186
5187                 ap->idle_match = 0;
5188         } else {
5189                 ap->idle_match = 1;
5190                 ap->ability_match_cfg = 0;
5191                 ap->ability_match_count = 0;
5192                 ap->ability_match = 0;
5193                 ap->ack_match = 0;
5194
5195                 rx_cfg_reg = 0;
5196         }
5197
5198         ap->rxconfig = rx_cfg_reg;
5199         ret = ANEG_OK;
5200
5201         switch (ap->state) {
5202         case ANEG_STATE_UNKNOWN:
5203                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5204                         ap->state = ANEG_STATE_AN_ENABLE;
5205
5206                 /* fallthru */
5207         case ANEG_STATE_AN_ENABLE:
5208                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5209                 if (ap->flags & MR_AN_ENABLE) {
5210                         ap->link_time = 0;
5211                         ap->cur_time = 0;
5212                         ap->ability_match_cfg = 0;
5213                         ap->ability_match_count = 0;
5214                         ap->ability_match = 0;
5215                         ap->idle_match = 0;
5216                         ap->ack_match = 0;
5217
5218                         ap->state = ANEG_STATE_RESTART_INIT;
5219                 } else {
5220                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5221                 }
5222                 break;
5223
5224         case ANEG_STATE_RESTART_INIT:
5225                 ap->link_time = ap->cur_time;
5226                 ap->flags &= ~(MR_NP_LOADED);
5227                 ap->txconfig = 0;
5228                 tw32(MAC_TX_AUTO_NEG, 0);
5229                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230                 tw32_f(MAC_MODE, tp->mac_mode);
5231                 udelay(40);
5232
5233                 ret = ANEG_TIMER_ENAB;
5234                 ap->state = ANEG_STATE_RESTART;
5235
5236                 /* fallthru */
5237         case ANEG_STATE_RESTART:
5238                 delta = ap->cur_time - ap->link_time;
5239                 if (delta > ANEG_STATE_SETTLE_TIME)
5240                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5241                 else
5242                         ret = ANEG_TIMER_ENAB;
5243                 break;
5244
5245         case ANEG_STATE_DISABLE_LINK_OK:
5246                 ret = ANEG_DONE;
5247                 break;
5248
5249         case ANEG_STATE_ABILITY_DETECT_INIT:
5250                 ap->flags &= ~(MR_TOGGLE_TX);
5251                 ap->txconfig = ANEG_CFG_FD;
5252                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5253                 if (flowctrl & ADVERTISE_1000XPAUSE)
5254                         ap->txconfig |= ANEG_CFG_PS1;
5255                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5256                         ap->txconfig |= ANEG_CFG_PS2;
5257                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5258                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5259                 tw32_f(MAC_MODE, tp->mac_mode);
5260                 udelay(40);
5261
5262                 ap->state = ANEG_STATE_ABILITY_DETECT;
5263                 break;
5264
5265         case ANEG_STATE_ABILITY_DETECT:
5266                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5267                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5268                 break;
5269
5270         case ANEG_STATE_ACK_DETECT_INIT:
5271                 ap->txconfig |= ANEG_CFG_ACK;
5272                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5273                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5274                 tw32_f(MAC_MODE, tp->mac_mode);
5275                 udelay(40);
5276
5277                 ap->state = ANEG_STATE_ACK_DETECT;
5278
5279                 /* fallthru */
5280         case ANEG_STATE_ACK_DETECT:
5281                 if (ap->ack_match != 0) {
5282                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5283                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5284                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5285                         } else {
5286                                 ap->state = ANEG_STATE_AN_ENABLE;
5287                         }
5288                 } else if (ap->ability_match != 0 &&
5289                            ap->rxconfig == 0) {
5290                         ap->state = ANEG_STATE_AN_ENABLE;
5291                 }
5292                 break;
5293
5294         case ANEG_STATE_COMPLETE_ACK_INIT:
5295                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5296                         ret = ANEG_FAILED;
5297                         break;
5298                 }
5299                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5300                                MR_LP_ADV_HALF_DUPLEX |
5301                                MR_LP_ADV_SYM_PAUSE |
5302                                MR_LP_ADV_ASYM_PAUSE |
5303                                MR_LP_ADV_REMOTE_FAULT1 |
5304                                MR_LP_ADV_REMOTE_FAULT2 |
5305                                MR_LP_ADV_NEXT_PAGE |
5306                                MR_TOGGLE_RX |
5307                                MR_NP_RX);
5308                 if (ap->rxconfig & ANEG_CFG_FD)
5309                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5310                 if (ap->rxconfig & ANEG_CFG_HD)
5311                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5312                 if (ap->rxconfig & ANEG_CFG_PS1)
5313                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5314                 if (ap->rxconfig & ANEG_CFG_PS2)
5315                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5316                 if (ap->rxconfig & ANEG_CFG_RF1)
5317                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5318                 if (ap->rxconfig & ANEG_CFG_RF2)
5319                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5320                 if (ap->rxconfig & ANEG_CFG_NP)
5321                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5322
5323                 ap->link_time = ap->cur_time;
5324
5325                 ap->flags ^= (MR_TOGGLE_TX);
5326                 if (ap->rxconfig & 0x0008)
5327                         ap->flags |= MR_TOGGLE_RX;
5328                 if (ap->rxconfig & ANEG_CFG_NP)
5329                         ap->flags |= MR_NP_RX;
5330                 ap->flags |= MR_PAGE_RX;
5331
5332                 ap->state = ANEG_STATE_COMPLETE_ACK;
5333                 ret = ANEG_TIMER_ENAB;
5334                 break;
5335
5336         case ANEG_STATE_COMPLETE_ACK:
5337                 if (ap->ability_match != 0 &&
5338                     ap->rxconfig == 0) {
5339                         ap->state = ANEG_STATE_AN_ENABLE;
5340                         break;
5341                 }
5342                 delta = ap->cur_time - ap->link_time;
5343                 if (delta > ANEG_STATE_SETTLE_TIME) {
5344                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5345                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346                         } else {
5347                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5348                                     !(ap->flags & MR_NP_RX)) {
5349                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5350                                 } else {
5351                                         ret = ANEG_FAILED;
5352                                 }
5353                         }
5354                 }
5355                 break;
5356
5357         case ANEG_STATE_IDLE_DETECT_INIT:
5358                 ap->link_time = ap->cur_time;
5359                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5360                 tw32_f(MAC_MODE, tp->mac_mode);
5361                 udelay(40);
5362
5363                 ap->state = ANEG_STATE_IDLE_DETECT;
5364                 ret = ANEG_TIMER_ENAB;
5365                 break;
5366
5367         case ANEG_STATE_IDLE_DETECT:
5368                 if (ap->ability_match != 0 &&
5369                     ap->rxconfig == 0) {
5370                         ap->state = ANEG_STATE_AN_ENABLE;
5371                         break;
5372                 }
5373                 delta = ap->cur_time - ap->link_time;
5374                 if (delta > ANEG_STATE_SETTLE_TIME) {
5375                         /* XXX another gem from the Broadcom driver :( */
5376                         ap->state = ANEG_STATE_LINK_OK;
5377                 }
5378                 break;
5379
5380         case ANEG_STATE_LINK_OK:
5381                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5382                 ret = ANEG_DONE;
5383                 break;
5384
5385         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5386                 /* ??? unimplemented */
5387                 break;
5388
5389         case ANEG_STATE_NEXT_PAGE_WAIT:
5390                 /* ??? unimplemented */
5391                 break;
5392
5393         default:
5394                 ret = ANEG_FAILED;
5395                 break;
5396         }
5397
5398         return ret;
5399 }
5400
5401 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5402 {
5403         int res = 0;
5404         struct tg3_fiber_aneginfo aninfo;
5405         int status = ANEG_FAILED;
5406         unsigned int tick;
5407         u32 tmp;
5408
5409         tw32_f(MAC_TX_AUTO_NEG, 0);
5410
5411         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5412         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5413         udelay(40);
5414
5415         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5416         udelay(40);
5417
5418         memset(&aninfo, 0, sizeof(aninfo));
5419         aninfo.flags |= MR_AN_ENABLE;
5420         aninfo.state = ANEG_STATE_UNKNOWN;
5421         aninfo.cur_time = 0;
5422         tick = 0;
5423         while (++tick < 195000) {
5424                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5425                 if (status == ANEG_DONE || status == ANEG_FAILED)
5426                         break;
5427
5428                 udelay(1);
5429         }
5430
5431         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5432         tw32_f(MAC_MODE, tp->mac_mode);
5433         udelay(40);
5434
5435         *txflags = aninfo.txconfig;
5436         *rxflags = aninfo.flags;
5437
5438         if (status == ANEG_DONE &&
5439             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5440                              MR_LP_ADV_FULL_DUPLEX)))
5441                 res = 1;
5442
5443         return res;
5444 }
5445
5446 static void tg3_init_bcm8002(struct tg3 *tp)
5447 {
5448         u32 mac_status = tr32(MAC_STATUS);
5449         int i;
5450
5451         /* Reset when initting first time or we have a link. */
5452         if (tg3_flag(tp, INIT_COMPLETE) &&
5453             !(mac_status & MAC_STATUS_PCS_SYNCED))
5454                 return;
5455
5456         /* Set PLL lock range. */
5457         tg3_writephy(tp, 0x16, 0x8007);
5458
5459         /* SW reset */
5460         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5461
5462         /* Wait for reset to complete. */
5463         /* XXX schedule_timeout() ... */
5464         for (i = 0; i < 500; i++)
5465                 udelay(10);
5466
5467         /* Config mode; select PMA/Ch 1 regs. */
5468         tg3_writephy(tp, 0x10, 0x8411);
5469
5470         /* Enable auto-lock and comdet, select txclk for tx. */
5471         tg3_writephy(tp, 0x11, 0x0a10);
5472
5473         tg3_writephy(tp, 0x18, 0x00a0);
5474         tg3_writephy(tp, 0x16, 0x41ff);
5475
5476         /* Assert and deassert POR. */
5477         tg3_writephy(tp, 0x13, 0x0400);
5478         udelay(40);
5479         tg3_writephy(tp, 0x13, 0x0000);
5480
5481         tg3_writephy(tp, 0x11, 0x0a50);
5482         udelay(40);
5483         tg3_writephy(tp, 0x11, 0x0a10);
5484
5485         /* Wait for signal to stabilize */
5486         /* XXX schedule_timeout() ... */
5487         for (i = 0; i < 15000; i++)
5488                 udelay(10);
5489
5490         /* Deselect the channel register so we can read the PHYID
5491          * later.
5492          */
5493         tg3_writephy(tp, 0x10, 0x8011);
5494 }
5495
5496 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5497 {
5498         u16 flowctrl;
5499         bool current_link_up;
5500         u32 sg_dig_ctrl, sg_dig_status;
5501         u32 serdes_cfg, expected_sg_dig_ctrl;
5502         int workaround, port_a;
5503
5504         serdes_cfg = 0;
5505         expected_sg_dig_ctrl = 0;
5506         workaround = 0;
5507         port_a = 1;
5508         current_link_up = false;
5509
5510         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5512                 workaround = 1;
5513                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514                         port_a = 0;
5515
5516                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517                 /* preserve bits 20-23 for voltage regulator */
5518                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519         }
5520
5521         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522
5523         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5525                         if (workaround) {
5526                                 u32 val = serdes_cfg;
5527
5528                                 if (port_a)
5529                                         val |= 0xc010000;
5530                                 else
5531                                         val |= 0x4010000;
5532                                 tw32_f(MAC_SERDES_CFG, val);
5533                         }
5534
5535                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536                 }
5537                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538                         tg3_setup_flow_control(tp, 0, 0);
5539                         current_link_up = true;
5540                 }
5541                 goto out;
5542         }
5543
5544         /* Want auto-negotiation.  */
5545         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546
5547         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548         if (flowctrl & ADVERTISE_1000XPAUSE)
5549                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552
5553         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555                     tp->serdes_counter &&
5556                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557                                     MAC_STATUS_RCVD_CFG)) ==
5558                      MAC_STATUS_PCS_SYNCED)) {
5559                         tp->serdes_counter--;
5560                         current_link_up = true;
5561                         goto out;
5562                 }
5563 restart_autoneg:
5564                 if (workaround)
5565                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5567                 udelay(5);
5568                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569
5570                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573                                  MAC_STATUS_SIGNAL_DET)) {
5574                 sg_dig_status = tr32(SG_DIG_STATUS);
5575                 mac_status = tr32(MAC_STATUS);
5576
5577                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579                         u32 local_adv = 0, remote_adv = 0;
5580
5581                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582                                 local_adv |= ADVERTISE_1000XPAUSE;
5583                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5585
5586                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587                                 remote_adv |= LPA_1000XPAUSE;
5588                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5590
5591                         tp->link_config.rmt_adv =
5592                                            mii_adv_to_ethtool_adv_x(remote_adv);
5593
5594                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5595                         current_link_up = true;
5596                         tp->serdes_counter = 0;
5597                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599                         if (tp->serdes_counter)
5600                                 tp->serdes_counter--;
5601                         else {
5602                                 if (workaround) {
5603                                         u32 val = serdes_cfg;
5604
5605                                         if (port_a)
5606                                                 val |= 0xc010000;
5607                                         else
5608                                                 val |= 0x4010000;
5609
5610                                         tw32_f(MAC_SERDES_CFG, val);
5611                                 }
5612
5613                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614                                 udelay(40);
5615
5616                                 /* Link parallel detection - link is up */
5617                                 /* only if we have PCS_SYNC and not */
5618                                 /* receiving config code words */
5619                                 mac_status = tr32(MAC_STATUS);
5620                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622                                         tg3_setup_flow_control(tp, 0, 0);
5623                                         current_link_up = true;
5624                                         tp->phy_flags |=
5625                                                 TG3_PHYFLG_PARALLEL_DETECT;
5626                                         tp->serdes_counter =
5627                                                 SERDES_PARALLEL_DET_TIMEOUT;
5628                                 } else
5629                                         goto restart_autoneg;
5630                         }
5631                 }
5632         } else {
5633                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5635         }
5636
5637 out:
5638         return current_link_up;
5639 }
5640
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 {
5643         bool current_link_up = false;
5644
5645         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646                 goto out;
5647
5648         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649                 u32 txflags, rxflags;
5650                 int i;
5651
5652                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653                         u32 local_adv = 0, remote_adv = 0;
5654
5655                         if (txflags & ANEG_CFG_PS1)
5656                                 local_adv |= ADVERTISE_1000XPAUSE;
5657                         if (txflags & ANEG_CFG_PS2)
5658                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5659
5660                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661                                 remote_adv |= LPA_1000XPAUSE;
5662                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5664
5665                         tp->link_config.rmt_adv =
5666                                            mii_adv_to_ethtool_adv_x(remote_adv);
5667
5668                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5669
5670                         current_link_up = true;
5671                 }
5672                 for (i = 0; i < 30; i++) {
5673                         udelay(20);
5674                         tw32_f(MAC_STATUS,
5675                                (MAC_STATUS_SYNC_CHANGED |
5676                                 MAC_STATUS_CFG_CHANGED));
5677                         udelay(40);
5678                         if ((tr32(MAC_STATUS) &
5679                              (MAC_STATUS_SYNC_CHANGED |
5680                               MAC_STATUS_CFG_CHANGED)) == 0)
5681                                 break;
5682                 }
5683
5684                 mac_status = tr32(MAC_STATUS);
5685                 if (!current_link_up &&
5686                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687                     !(mac_status & MAC_STATUS_RCVD_CFG))
5688                         current_link_up = true;
5689         } else {
5690                 tg3_setup_flow_control(tp, 0, 0);
5691
5692                 /* Forcing 1000FD link up. */
5693                 current_link_up = true;
5694
5695                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696                 udelay(40);
5697
5698                 tw32_f(MAC_MODE, tp->mac_mode);
5699                 udelay(40);
5700         }
5701
5702 out:
5703         return current_link_up;
5704 }
5705
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 {
5708         u32 orig_pause_cfg;
5709         u16 orig_active_speed;
5710         u8 orig_active_duplex;
5711         u32 mac_status;
5712         bool current_link_up;
5713         int i;
5714
5715         orig_pause_cfg = tp->link_config.active_flowctrl;
5716         orig_active_speed = tp->link_config.active_speed;
5717         orig_active_duplex = tp->link_config.active_duplex;
5718
5719         if (!tg3_flag(tp, HW_AUTONEG) &&
5720             tp->link_up &&
5721             tg3_flag(tp, INIT_COMPLETE)) {
5722                 mac_status = tr32(MAC_STATUS);
5723                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724                                MAC_STATUS_SIGNAL_DET |
5725                                MAC_STATUS_CFG_CHANGED |
5726                                MAC_STATUS_RCVD_CFG);
5727                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728                                    MAC_STATUS_SIGNAL_DET)) {
5729                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730                                             MAC_STATUS_CFG_CHANGED));
5731                         return 0;
5732                 }
5733         }
5734
5735         tw32_f(MAC_TX_AUTO_NEG, 0);
5736
5737         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739         tw32_f(MAC_MODE, tp->mac_mode);
5740         udelay(40);
5741
5742         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743                 tg3_init_bcm8002(tp);
5744
5745         /* Enable link change event even when serdes polling.  */
5746         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747         udelay(40);
5748
5749         current_link_up = false;
5750         tp->link_config.rmt_adv = 0;
5751         mac_status = tr32(MAC_STATUS);
5752
5753         if (tg3_flag(tp, HW_AUTONEG))
5754                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755         else
5756                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758         tp->napi[0].hw_status->status =
5759                 (SD_STATUS_UPDATED |
5760                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762         for (i = 0; i < 100; i++) {
5763                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764                                     MAC_STATUS_CFG_CHANGED));
5765                 udelay(5);
5766                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767                                          MAC_STATUS_CFG_CHANGED |
5768                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769                         break;
5770         }
5771
5772         mac_status = tr32(MAC_STATUS);
5773         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774                 current_link_up = false;
5775                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776                     tp->serdes_counter == 0) {
5777                         tw32_f(MAC_MODE, (tp->mac_mode |
5778                                           MAC_MODE_SEND_CONFIGS));
5779                         udelay(1);
5780                         tw32_f(MAC_MODE, tp->mac_mode);
5781                 }
5782         }
5783
5784         if (current_link_up) {
5785                 tp->link_config.active_speed = SPEED_1000;
5786                 tp->link_config.active_duplex = DUPLEX_FULL;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_1000MBPS_ON));
5790         } else {
5791                 tp->link_config.active_speed = SPEED_UNKNOWN;
5792                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794                                     LED_CTRL_LNKLED_OVERRIDE |
5795                                     LED_CTRL_TRAFFIC_OVERRIDE));
5796         }
5797
5798         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800                 if (orig_pause_cfg != now_pause_cfg ||
5801                     orig_active_speed != tp->link_config.active_speed ||
5802                     orig_active_duplex != tp->link_config.active_duplex)
5803                         tg3_link_report(tp);
5804         }
5805
5806         return 0;
5807 }
5808
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811         int err = 0;
5812         u32 bmsr, bmcr;
5813         u16 current_speed = SPEED_UNKNOWN;
5814         u8 current_duplex = DUPLEX_UNKNOWN;
5815         bool current_link_up = false;
5816         u32 local_adv, remote_adv, sgsr;
5817
5818         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821              (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823                 if (force_reset)
5824                         tg3_phy_reset(tp);
5825
5826                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830                 } else {
5831                         current_link_up = true;
5832                         if (sgsr & SERDES_TG3_SPEED_1000) {
5833                                 current_speed = SPEED_1000;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5836                                 current_speed = SPEED_100;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         } else {
5839                                 current_speed = SPEED_10;
5840                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841                         }
5842
5843                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844                                 current_duplex = DUPLEX_FULL;
5845                         else
5846                                 current_duplex = DUPLEX_HALF;
5847                 }
5848
5849                 tw32_f(MAC_MODE, tp->mac_mode);
5850                 udelay(40);
5851
5852                 tg3_clear_mac_status(tp);
5853
5854                 goto fiber_setup_done;
5855         }
5856
5857         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858         tw32_f(MAC_MODE, tp->mac_mode);
5859         udelay(40);
5860
5861         tg3_clear_mac_status(tp);
5862
5863         if (force_reset)
5864                 tg3_phy_reset(tp);
5865
5866         tp->link_config.rmt_adv = 0;
5867
5868         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872                         bmsr |= BMSR_LSTATUS;
5873                 else
5874                         bmsr &= ~BMSR_LSTATUS;
5875         }
5876
5877         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881                 /* do nothing, just check for link up at the end */
5882         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883                 u32 adv, newadv;
5884
5885                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887                                  ADVERTISE_1000XPAUSE |
5888                                  ADVERTISE_1000XPSE_ASYM |
5889                                  ADVERTISE_SLCT);
5890
5891                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5896                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897                         tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903                         return err;
5904                 }
5905         } else {
5906                 u32 new_bmcr;
5907
5908                 bmcr &= ~BMCR_SPEED1000;
5909                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911                 if (tp->link_config.duplex == DUPLEX_FULL)
5912                         new_bmcr |= BMCR_FULLDPLX;
5913
5914                 if (new_bmcr != bmcr) {
5915                         /* BMCR_SPEED1000 is a reserved bit that needs
5916                          * to be set on write.
5917                          */
5918                         new_bmcr |= BMCR_SPEED1000;
5919
5920                         /* Force a linkdown */
5921                         if (tp->link_up) {
5922                                 u32 adv;
5923
5924                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925                                 adv &= ~(ADVERTISE_1000XFULL |
5926                                          ADVERTISE_1000XHALF |
5927                                          ADVERTISE_SLCT);
5928                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5929                                 tg3_writephy(tp, MII_BMCR, bmcr |
5930                                                            BMCR_ANRESTART |
5931                                                            BMCR_ANENABLE);
5932                                 udelay(10);
5933                                 tg3_carrier_off(tp);
5934                         }
5935                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5936                         bmcr = new_bmcr;
5937                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941                                         bmsr |= BMSR_LSTATUS;
5942                                 else
5943                                         bmsr &= ~BMSR_LSTATUS;
5944                         }
5945                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946                 }
5947         }
5948
5949         if (bmsr & BMSR_LSTATUS) {
5950                 current_speed = SPEED_1000;
5951                 current_link_up = true;
5952                 if (bmcr & BMCR_FULLDPLX)
5953                         current_duplex = DUPLEX_FULL;
5954                 else
5955                         current_duplex = DUPLEX_HALF;
5956
5957                 local_adv = 0;
5958                 remote_adv = 0;
5959
5960                 if (bmcr & BMCR_ANENABLE) {
5961                         u32 common;
5962
5963                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965                         common = local_adv & remote_adv;
5966                         if (common & (ADVERTISE_1000XHALF |
5967                                       ADVERTISE_1000XFULL)) {
5968                                 if (common & ADVERTISE_1000XFULL)
5969                                         current_duplex = DUPLEX_FULL;
5970                                 else
5971                                         current_duplex = DUPLEX_HALF;
5972
5973                                 tp->link_config.rmt_adv =
5974                                            mii_adv_to_ethtool_adv_x(remote_adv);
5975                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5976                                 /* Link is up via parallel detect */
5977                         } else {
5978                                 current_link_up = false;
5979                         }
5980                 }
5981         }
5982
5983 fiber_setup_done:
5984         if (current_link_up && current_duplex == DUPLEX_FULL)
5985                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988         if (tp->link_config.active_duplex == DUPLEX_HALF)
5989                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991         tw32_f(MAC_MODE, tp->mac_mode);
5992         udelay(40);
5993
5994         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996         tp->link_config.active_speed = current_speed;
5997         tp->link_config.active_duplex = current_duplex;
5998
5999         tg3_test_and_report_link_chg(tp, current_link_up);
6000         return err;
6001 }
6002
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005         if (tp->serdes_counter) {
6006                 /* Give autoneg time to complete. */
6007                 tp->serdes_counter--;
6008                 return;
6009         }
6010
6011         if (!tp->link_up &&
6012             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013                 u32 bmcr;
6014
6015                 tg3_readphy(tp, MII_BMCR, &bmcr);
6016                 if (bmcr & BMCR_ANENABLE) {
6017                         u32 phy1, phy2;
6018
6019                         /* Select shadow register 0x1f */
6020                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023                         /* Select expansion interrupt status register */
6024                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025                                          MII_TG3_DSP_EXP1_INT_STAT);
6026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030                                 /* We have signal detect and not receiving
6031                                  * config code words, link is up by parallel
6032                                  * detection.
6033                                  */
6034
6035                                 bmcr &= ~BMCR_ANENABLE;
6036                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037                                 tg3_writephy(tp, MII_BMCR, bmcr);
6038                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039                         }
6040                 }
6041         } else if (tp->link_up &&
6042                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044                 u32 phy2;
6045
6046                 /* Select expansion interrupt status register */
6047                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048                                  MII_TG3_DSP_EXP1_INT_STAT);
6049                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050                 if (phy2 & 0x20) {
6051                         u32 bmcr;
6052
6053                         /* Config code words received, turn on autoneg. */
6054                         tg3_readphy(tp, MII_BMCR, &bmcr);
6055                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059                 }
6060         }
6061 }
6062
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065         u32 val;
6066         int err;
6067
6068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069                 err = tg3_setup_fiber_phy(tp, force_reset);
6070         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072         else
6073                 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076                 u32 scale;
6077
6078                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080                         scale = 65;
6081                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082                         scale = 6;
6083                 else
6084                         scale = 12;
6085
6086                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088                 tw32(GRC_MISC_CFG, val);
6089         }
6090
6091         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092               (6 << TX_LENGTHS_IPG_SHIFT);
6093         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094             tg3_asic_rev(tp) == ASIC_REV_5762)
6095                 val |= tr32(MAC_TX_LENGTHS) &
6096                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099         if (tp->link_config.active_speed == SPEED_1000 &&
6100             tp->link_config.active_duplex == DUPLEX_HALF)
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103         else
6104                 tw32(MAC_TX_LENGTHS, val |
6105                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107         if (!tg3_flag(tp, 5705_PLUS)) {
6108                 if (tp->link_up) {
6109                         tw32(HOSTCC_STAT_COAL_TICKS,
6110                              tp->coal.stats_block_coalesce_usecs);
6111                 } else {
6112                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113                 }
6114         }
6115
6116         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117                 val = tr32(PCIE_PWR_MGMT_THRESH);
6118                 if (!tp->link_up)
6119                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120                               tp->pwrmgmt_thresh;
6121                 else
6122                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123                 tw32(PCIE_PWR_MGMT_THRESH, val);
6124         }
6125
6126         return err;
6127 }
6128
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp)
6131 {
6132         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6133         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6134 }
6135
6136 /* tp->lock must be held */
6137 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6138 {
6139         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6140
6141         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6142         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6143         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6144         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6145 }
6146
6147 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6148 static inline void tg3_full_unlock(struct tg3 *tp);
6149 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6150 {
6151         struct tg3 *tp = netdev_priv(dev);
6152
6153         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6154                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6155                                 SOF_TIMESTAMPING_SOFTWARE;
6156
6157         if (tg3_flag(tp, PTP_CAPABLE)) {
6158                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6159                                         SOF_TIMESTAMPING_RX_HARDWARE |
6160                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6161         }
6162
6163         if (tp->ptp_clock)
6164                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6165         else
6166                 info->phc_index = -1;
6167
6168         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6169
6170         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6171                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6172                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6173                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6174         return 0;
6175 }
6176
6177 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6178 {
6179         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6180         bool neg_adj = false;
6181         u32 correction = 0;
6182
6183         if (ppb < 0) {
6184                 neg_adj = true;
6185                 ppb = -ppb;
6186         }
6187
6188         /* Frequency adjustment is performed using hardware with a 24 bit
6189          * accumulator and a programmable correction value. On each clk, the
6190          * correction value gets added to the accumulator and when it
6191          * overflows, the time counter is incremented/decremented.
6192          *
6193          * So conversion from ppb to correction value is
6194          *              ppb * (1 << 24) / 1000000000
6195          */
6196         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6197                      TG3_EAV_REF_CLK_CORRECT_MASK;
6198
6199         tg3_full_lock(tp, 0);
6200
6201         if (correction)
6202                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6203                      TG3_EAV_REF_CLK_CORRECT_EN |
6204                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6205         else
6206                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6207
6208         tg3_full_unlock(tp);
6209
6210         return 0;
6211 }
6212
6213 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6214 {
6215         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216
6217         tg3_full_lock(tp, 0);
6218         tp->ptp_adjust += delta;
6219         tg3_full_unlock(tp);
6220
6221         return 0;
6222 }
6223
6224 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6225 {
6226         u64 ns;
6227         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229         tg3_full_lock(tp, 0);
6230         ns = tg3_refclk_read(tp);
6231         ns += tp->ptp_adjust;
6232         tg3_full_unlock(tp);
6233
6234         *ts = ns_to_timespec64(ns);
6235
6236         return 0;
6237 }
6238
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240                            const struct timespec64 *ts)
6241 {
6242         u64 ns;
6243         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244
6245         ns = timespec64_to_ns(ts);
6246
6247         tg3_full_lock(tp, 0);
6248         tg3_refclk_write(tp, ns);
6249         tp->ptp_adjust = 0;
6250         tg3_full_unlock(tp);
6251
6252         return 0;
6253 }
6254
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256                           struct ptp_clock_request *rq, int on)
6257 {
6258         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6259         u32 clock_ctl;
6260         int rval = 0;
6261
6262         switch (rq->type) {
6263         case PTP_CLK_REQ_PEROUT:
6264                 if (rq->perout.index != 0)
6265                         return -EINVAL;
6266
6267                 tg3_full_lock(tp, 0);
6268                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6269                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6270
6271                 if (on) {
6272                         u64 nsec;
6273
6274                         nsec = rq->perout.start.sec * 1000000000ULL +
6275                                rq->perout.start.nsec;
6276
6277                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6278                                 netdev_warn(tp->dev,
6279                                             "Device supports only a one-shot timesync output, period must be 0\n");
6280                                 rval = -EINVAL;
6281                                 goto err_out;
6282                         }
6283
6284                         if (nsec & (1ULL << 63)) {
6285                                 netdev_warn(tp->dev,
6286                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6287                                 rval = -EINVAL;
6288                                 goto err_out;
6289                         }
6290
6291                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6292                         tw32(TG3_EAV_WATCHDOG0_MSB,
6293                              TG3_EAV_WATCHDOG0_EN |
6294                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6295
6296                         tw32(TG3_EAV_REF_CLCK_CTL,
6297                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6298                 } else {
6299                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6300                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6301                 }
6302
6303 err_out:
6304                 tg3_full_unlock(tp);
6305                 return rval;
6306
6307         default:
6308                 break;
6309         }
6310
6311         return -EOPNOTSUPP;
6312 }
6313
6314 static const struct ptp_clock_info tg3_ptp_caps = {
6315         .owner          = THIS_MODULE,
6316         .name           = "tg3 clock",
6317         .max_adj        = 250000000,
6318         .n_alarm        = 0,
6319         .n_ext_ts       = 0,
6320         .n_per_out      = 1,
6321         .n_pins         = 0,
6322         .pps            = 0,
6323         .adjfreq        = tg3_ptp_adjfreq,
6324         .adjtime        = tg3_ptp_adjtime,
6325         .gettime64      = tg3_ptp_gettime,
6326         .settime64      = tg3_ptp_settime,
6327         .enable         = tg3_ptp_enable,
6328 };
6329
6330 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6331                                      struct skb_shared_hwtstamps *timestamp)
6332 {
6333         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6334         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6335                                            tp->ptp_adjust);
6336 }
6337
6338 /* tp->lock must be held */
6339 static void tg3_ptp_init(struct tg3 *tp)
6340 {
6341         if (!tg3_flag(tp, PTP_CAPABLE))
6342                 return;
6343
6344         /* Initialize the hardware clock to the system time. */
6345         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6346         tp->ptp_adjust = 0;
6347         tp->ptp_info = tg3_ptp_caps;
6348 }
6349
6350 /* tp->lock must be held */
6351 static void tg3_ptp_resume(struct tg3 *tp)
6352 {
6353         if (!tg3_flag(tp, PTP_CAPABLE))
6354                 return;
6355
6356         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6357         tp->ptp_adjust = 0;
6358 }
6359
6360 static void tg3_ptp_fini(struct tg3 *tp)
6361 {
6362         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6363                 return;
6364
6365         ptp_clock_unregister(tp->ptp_clock);
6366         tp->ptp_clock = NULL;
6367         tp->ptp_adjust = 0;
6368 }
6369
6370 static inline int tg3_irq_sync(struct tg3 *tp)
6371 {
6372         return tp->irq_sync;
6373 }
6374
6375 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6376 {
6377         int i;
6378
6379         dst = (u32 *)((u8 *)dst + off);
6380         for (i = 0; i < len; i += sizeof(u32))
6381                 *dst++ = tr32(off + i);
6382 }
6383
6384 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6385 {
6386         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6387         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6388         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6389         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6390         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6391         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6392         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6393         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6394         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6395         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6396         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6397         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6398         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6399         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6400         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6401         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6402         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6403         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6404         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6405
6406         if (tg3_flag(tp, SUPPORT_MSIX))
6407                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6408
6409         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6410         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6411         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6412         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6413         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6414         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6415         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6416         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6417
6418         if (!tg3_flag(tp, 5705_PLUS)) {
6419                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6420                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6421                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6422         }
6423
6424         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6425         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6426         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6427         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6428         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6429
6430         if (tg3_flag(tp, NVRAM))
6431                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6432 }
6433
6434 static void tg3_dump_state(struct tg3 *tp)
6435 {
6436         int i;
6437         u32 *regs;
6438
6439         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6440         if (!regs)
6441                 return;
6442
6443         if (tg3_flag(tp, PCI_EXPRESS)) {
6444                 /* Read up to but not including private PCI registers */
6445                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6446                         regs[i / sizeof(u32)] = tr32(i);
6447         } else
6448                 tg3_dump_legacy_regs(tp, regs);
6449
6450         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6451                 if (!regs[i + 0] && !regs[i + 1] &&
6452                     !regs[i + 2] && !regs[i + 3])
6453                         continue;
6454
6455                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6456                            i * 4,
6457                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6458         }
6459
6460         kfree(regs);
6461
6462         for (i = 0; i < tp->irq_cnt; i++) {
6463                 struct tg3_napi *tnapi = &tp->napi[i];
6464
6465                 /* SW status block */
6466                 netdev_err(tp->dev,
6467                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6468                            i,
6469                            tnapi->hw_status->status,
6470                            tnapi->hw_status->status_tag,
6471                            tnapi->hw_status->rx_jumbo_consumer,
6472                            tnapi->hw_status->rx_consumer,
6473                            tnapi->hw_status->rx_mini_consumer,
6474                            tnapi->hw_status->idx[0].rx_producer,
6475                            tnapi->hw_status->idx[0].tx_consumer);
6476
6477                 netdev_err(tp->dev,
6478                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6479                            i,
6480                            tnapi->last_tag, tnapi->last_irq_tag,
6481                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6482                            tnapi->rx_rcb_ptr,
6483                            tnapi->prodring.rx_std_prod_idx,
6484                            tnapi->prodring.rx_std_cons_idx,
6485                            tnapi->prodring.rx_jmb_prod_idx,
6486                            tnapi->prodring.rx_jmb_cons_idx);
6487         }
6488 }
6489
6490 /* This is called whenever we suspect that the system chipset is re-
6491  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6492  * is bogus tx completions. We try to recover by setting the
6493  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6494  * in the workqueue.
6495  */
6496 static void tg3_tx_recover(struct tg3 *tp)
6497 {
6498         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6499                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6500
6501         netdev_warn(tp->dev,
6502                     "The system may be re-ordering memory-mapped I/O "
6503                     "cycles to the network device, attempting to recover. "
6504                     "Please report the problem to the driver maintainer "
6505                     "and include system chipset information.\n");
6506
6507         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6508 }
6509
6510 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6511 {
6512         /* Tell compiler to fetch tx indices from memory. */
6513         barrier();
6514         return tnapi->tx_pending -
6515                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6516 }
6517
6518 /* Tigon3 never reports partial packet sends.  So we do not
6519  * need special logic to handle SKBs that have not had all
6520  * of their frags sent yet, like SunGEM does.
6521  */
6522 static void tg3_tx(struct tg3_napi *tnapi)
6523 {
6524         struct tg3 *tp = tnapi->tp;
6525         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6526         u32 sw_idx = tnapi->tx_cons;
6527         struct netdev_queue *txq;
6528         int index = tnapi - tp->napi;
6529         unsigned int pkts_compl = 0, bytes_compl = 0;
6530
6531         if (tg3_flag(tp, ENABLE_TSS))
6532                 index--;
6533
6534         txq = netdev_get_tx_queue(tp->dev, index);
6535
6536         while (sw_idx != hw_idx) {
6537                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6538                 struct sk_buff *skb = ri->skb;
6539                 int i, tx_bug = 0;
6540
6541                 if (unlikely(skb == NULL)) {
6542                         tg3_tx_recover(tp);
6543                         return;
6544                 }
6545
6546                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6547                         struct skb_shared_hwtstamps timestamp;
6548                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6549                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6550
6551                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6552
6553                         skb_tstamp_tx(skb, &timestamp);
6554                 }
6555
6556                 pci_unmap_single(tp->pdev,
6557                                  dma_unmap_addr(ri, mapping),
6558                                  skb_headlen(skb),
6559                                  PCI_DMA_TODEVICE);
6560
6561                 ri->skb = NULL;
6562
6563                 while (ri->fragmented) {
6564                         ri->fragmented = false;
6565                         sw_idx = NEXT_TX(sw_idx);
6566                         ri = &tnapi->tx_buffers[sw_idx];
6567                 }
6568
6569                 sw_idx = NEXT_TX(sw_idx);
6570
6571                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6572                         ri = &tnapi->tx_buffers[sw_idx];
6573                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6574                                 tx_bug = 1;
6575
6576                         pci_unmap_page(tp->pdev,
6577                                        dma_unmap_addr(ri, mapping),
6578                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6579                                        PCI_DMA_TODEVICE);
6580
6581                         while (ri->fragmented) {
6582                                 ri->fragmented = false;
6583                                 sw_idx = NEXT_TX(sw_idx);
6584                                 ri = &tnapi->tx_buffers[sw_idx];
6585                         }
6586
6587                         sw_idx = NEXT_TX(sw_idx);
6588                 }
6589
6590                 pkts_compl++;
6591                 bytes_compl += skb->len;
6592
6593                 dev_consume_skb_any(skb);
6594
6595                 if (unlikely(tx_bug)) {
6596                         tg3_tx_recover(tp);
6597                         return;
6598                 }
6599         }
6600
6601         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6602
6603         tnapi->tx_cons = sw_idx;
6604
6605         /* Need to make the tx_cons update visible to tg3_start_xmit()
6606          * before checking for netif_queue_stopped().  Without the
6607          * memory barrier, there is a small possibility that tg3_start_xmit()
6608          * will miss it and cause the queue to be stopped forever.
6609          */
6610         smp_mb();
6611
6612         if (unlikely(netif_tx_queue_stopped(txq) &&
6613                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6614                 __netif_tx_lock(txq, smp_processor_id());
6615                 if (netif_tx_queue_stopped(txq) &&
6616                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6617                         netif_tx_wake_queue(txq);
6618                 __netif_tx_unlock(txq);
6619         }
6620 }
6621
6622 static void tg3_frag_free(bool is_frag, void *data)
6623 {
6624         if (is_frag)
6625                 skb_free_frag(data);
6626         else
6627                 kfree(data);
6628 }
6629
6630 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6631 {
6632         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6633                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6634
6635         if (!ri->data)
6636                 return;
6637
6638         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6639                          map_sz, PCI_DMA_FROMDEVICE);
6640         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6641         ri->data = NULL;
6642 }
6643
6644
6645 /* Returns size of skb allocated or < 0 on error.
6646  *
6647  * We only need to fill in the address because the other members
6648  * of the RX descriptor are invariant, see tg3_init_rings.
6649  *
6650  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6651  * posting buffers we only dirty the first cache line of the RX
6652  * descriptor (containing the address).  Whereas for the RX status
6653  * buffers the cpu only reads the last cacheline of the RX descriptor
6654  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6655  */
6656 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6657                              u32 opaque_key, u32 dest_idx_unmasked,
6658                              unsigned int *frag_size)
6659 {
6660         struct tg3_rx_buffer_desc *desc;
6661         struct ring_info *map;
6662         u8 *data;
6663         dma_addr_t mapping;
6664         int skb_size, data_size, dest_idx;
6665
6666         switch (opaque_key) {
6667         case RXD_OPAQUE_RING_STD:
6668                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6669                 desc = &tpr->rx_std[dest_idx];
6670                 map = &tpr->rx_std_buffers[dest_idx];
6671                 data_size = tp->rx_pkt_map_sz;
6672                 break;
6673
6674         case RXD_OPAQUE_RING_JUMBO:
6675                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6676                 desc = &tpr->rx_jmb[dest_idx].std;
6677                 map = &tpr->rx_jmb_buffers[dest_idx];
6678                 data_size = TG3_RX_JMB_MAP_SZ;
6679                 break;
6680
6681         default:
6682                 return -EINVAL;
6683         }
6684
6685         /* Do not overwrite any of the map or rp information
6686          * until we are sure we can commit to a new buffer.
6687          *
6688          * Callers depend upon this behavior and assume that
6689          * we leave everything unchanged if we fail.
6690          */
6691         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6692                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6693         if (skb_size <= PAGE_SIZE) {
6694                 data = netdev_alloc_frag(skb_size);
6695                 *frag_size = skb_size;
6696         } else {
6697                 data = kmalloc(skb_size, GFP_ATOMIC);
6698                 *frag_size = 0;
6699         }
6700         if (!data)
6701                 return -ENOMEM;
6702
6703         mapping = pci_map_single(tp->pdev,
6704                                  data + TG3_RX_OFFSET(tp),
6705                                  data_size,
6706                                  PCI_DMA_FROMDEVICE);
6707         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6708                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6709                 return -EIO;
6710         }
6711
6712         map->data = data;
6713         dma_unmap_addr_set(map, mapping, mapping);
6714
6715         desc->addr_hi = ((u64)mapping >> 32);
6716         desc->addr_lo = ((u64)mapping & 0xffffffff);
6717
6718         return data_size;
6719 }
6720
6721 /* We only need to move over in the address because the other
6722  * members of the RX descriptor are invariant.  See notes above
6723  * tg3_alloc_rx_data for full details.
6724  */
6725 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6726                            struct tg3_rx_prodring_set *dpr,
6727                            u32 opaque_key, int src_idx,
6728                            u32 dest_idx_unmasked)
6729 {
6730         struct tg3 *tp = tnapi->tp;
6731         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6732         struct ring_info *src_map, *dest_map;
6733         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6734         int dest_idx;
6735
6736         switch (opaque_key) {
6737         case RXD_OPAQUE_RING_STD:
6738                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6739                 dest_desc = &dpr->rx_std[dest_idx];
6740                 dest_map = &dpr->rx_std_buffers[dest_idx];
6741                 src_desc = &spr->rx_std[src_idx];
6742                 src_map = &spr->rx_std_buffers[src_idx];
6743                 break;
6744
6745         case RXD_OPAQUE_RING_JUMBO:
6746                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6747                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6748                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6749                 src_desc = &spr->rx_jmb[src_idx].std;
6750                 src_map = &spr->rx_jmb_buffers[src_idx];
6751                 break;
6752
6753         default:
6754                 return;
6755         }
6756
6757         dest_map->data = src_map->data;
6758         dma_unmap_addr_set(dest_map, mapping,
6759                            dma_unmap_addr(src_map, mapping));
6760         dest_desc->addr_hi = src_desc->addr_hi;
6761         dest_desc->addr_lo = src_desc->addr_lo;
6762
6763         /* Ensure that the update to the skb happens after the physical
6764          * addresses have been transferred to the new BD location.
6765          */
6766         smp_wmb();
6767
6768         src_map->data = NULL;
6769 }
6770
6771 /* The RX ring scheme is composed of multiple rings which post fresh
6772  * buffers to the chip, and one special ring the chip uses to report
6773  * status back to the host.
6774  *
6775  * The special ring reports the status of received packets to the
6776  * host.  The chip does not write into the original descriptor the
6777  * RX buffer was obtained from.  The chip simply takes the original
6778  * descriptor as provided by the host, updates the status and length
6779  * field, then writes this into the next status ring entry.
6780  *
6781  * Each ring the host uses to post buffers to the chip is described
6782  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6783  * it is first placed into the on-chip ram.  When the packet's length
6784  * is known, it walks down the TG3_BDINFO entries to select the ring.
6785  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6786  * which is within the range of the new packet's length is chosen.
6787  *
6788  * The "separate ring for rx status" scheme may sound queer, but it makes
6789  * sense from a cache coherency perspective.  If only the host writes
6790  * to the buffer post rings, and only the chip writes to the rx status
6791  * rings, then cache lines never move beyond shared-modified state.
6792  * If both the host and chip were to write into the same ring, cache line
6793  * eviction could occur since both entities want it in an exclusive state.
6794  */
6795 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 {
6797         struct tg3 *tp = tnapi->tp;
6798         u32 work_mask, rx_std_posted = 0;
6799         u32 std_prod_idx, jmb_prod_idx;
6800         u32 sw_idx = tnapi->rx_rcb_ptr;
6801         u16 hw_idx;
6802         int received;
6803         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804
6805         hw_idx = *(tnapi->rx_rcb_prod_idx);
6806         /*
6807          * We need to order the read of hw_idx and the read of
6808          * the opaque cookie.
6809          */
6810         rmb();
6811         work_mask = 0;
6812         received = 0;
6813         std_prod_idx = tpr->rx_std_prod_idx;
6814         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6815         while (sw_idx != hw_idx && budget > 0) {
6816                 struct ring_info *ri;
6817                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6818                 unsigned int len;
6819                 struct sk_buff *skb;
6820                 dma_addr_t dma_addr;
6821                 u32 opaque_key, desc_idx, *post_ptr;
6822                 u8 *data;
6823                 u64 tstamp = 0;
6824
6825                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6826                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6827                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6828                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6829                         dma_addr = dma_unmap_addr(ri, mapping);
6830                         data = ri->data;
6831                         post_ptr = &std_prod_idx;
6832                         rx_std_posted++;
6833                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6834                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6835                         dma_addr = dma_unmap_addr(ri, mapping);
6836                         data = ri->data;
6837                         post_ptr = &jmb_prod_idx;
6838                 } else
6839                         goto next_pkt_nopost;
6840
6841                 work_mask |= opaque_key;
6842
6843                 if (desc->err_vlan & RXD_ERR_MASK) {
6844                 drop_it:
6845                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6846                                        desc_idx, *post_ptr);
6847                 drop_it_no_recycle:
6848                         /* Other statistics kept track of by card. */
6849                         tp->rx_dropped++;
6850                         goto next_pkt;
6851                 }
6852
6853                 prefetch(data + TG3_RX_OFFSET(tp));
6854                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6855                       ETH_FCS_LEN;
6856
6857                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858                      RXD_FLAG_PTPSTAT_PTPV1 ||
6859                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860                      RXD_FLAG_PTPSTAT_PTPV2) {
6861                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6863                 }
6864
6865                 if (len > TG3_RX_COPY_THRESH(tp)) {
6866                         int skb_size;
6867                         unsigned int frag_size;
6868
6869                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870                                                     *post_ptr, &frag_size);
6871                         if (skb_size < 0)
6872                                 goto drop_it;
6873
6874                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6875                                          PCI_DMA_FROMDEVICE);
6876
6877                         /* Ensure that the update to the data happens
6878                          * after the usage of the old DMA mapping.
6879                          */
6880                         smp_wmb();
6881
6882                         ri->data = NULL;
6883
6884                         skb = build_skb(data, frag_size);
6885                         if (!skb) {
6886                                 tg3_frag_free(frag_size != 0, data);
6887                                 goto drop_it_no_recycle;
6888                         }
6889                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6890                 } else {
6891                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6892                                        desc_idx, *post_ptr);
6893
6894                         skb = netdev_alloc_skb(tp->dev,
6895                                                len + TG3_RAW_IP_ALIGN);
6896                         if (skb == NULL)
6897                                 goto drop_it_no_recycle;
6898
6899                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6900                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6901                         memcpy(skb->data,
6902                                data + TG3_RX_OFFSET(tp),
6903                                len);
6904                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6905                 }
6906
6907                 skb_put(skb, len);
6908                 if (tstamp)
6909                         tg3_hwclock_to_timestamp(tp, tstamp,
6910                                                  skb_hwtstamps(skb));
6911
6912                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6913                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6914                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6915                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6916                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6917                 else
6918                         skb_checksum_none_assert(skb);
6919
6920                 skb->protocol = eth_type_trans(skb, tp->dev);
6921
6922                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6923                     skb->protocol != htons(ETH_P_8021Q) &&
6924                     skb->protocol != htons(ETH_P_8021AD)) {
6925                         dev_kfree_skb_any(skb);
6926                         goto drop_it_no_recycle;
6927                 }
6928
6929                 if (desc->type_flags & RXD_FLAG_VLAN &&
6930                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6931                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6932                                                desc->err_vlan & RXD_VLAN_MASK);
6933
6934                 napi_gro_receive(&tnapi->napi, skb);
6935
6936                 received++;
6937                 budget--;
6938
6939 next_pkt:
6940                 (*post_ptr)++;
6941
6942                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6943                         tpr->rx_std_prod_idx = std_prod_idx &
6944                                                tp->rx_std_ring_mask;
6945                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6946                                      tpr->rx_std_prod_idx);
6947                         work_mask &= ~RXD_OPAQUE_RING_STD;
6948                         rx_std_posted = 0;
6949                 }
6950 next_pkt_nopost:
6951                 sw_idx++;
6952                 sw_idx &= tp->rx_ret_ring_mask;
6953
6954                 /* Refresh hw_idx to see if there is new work */
6955                 if (sw_idx == hw_idx) {
6956                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6957                         rmb();
6958                 }
6959         }
6960
6961         /* ACK the status ring. */
6962         tnapi->rx_rcb_ptr = sw_idx;
6963         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6964
6965         /* Refill RX ring(s). */
6966         if (!tg3_flag(tp, ENABLE_RSS)) {
6967                 /* Sync BD data before updating mailbox */
6968                 wmb();
6969
6970                 if (work_mask & RXD_OPAQUE_RING_STD) {
6971                         tpr->rx_std_prod_idx = std_prod_idx &
6972                                                tp->rx_std_ring_mask;
6973                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6974                                      tpr->rx_std_prod_idx);
6975                 }
6976                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6977                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6978                                                tp->rx_jmb_ring_mask;
6979                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6980                                      tpr->rx_jmb_prod_idx);
6981                 }
6982                 mmiowb();
6983         } else if (work_mask) {
6984                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6985                  * updated before the producer indices can be updated.
6986                  */
6987                 smp_wmb();
6988
6989                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6990                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6991
6992                 if (tnapi != &tp->napi[1]) {
6993                         tp->rx_refill = true;
6994                         napi_schedule(&tp->napi[1].napi);
6995                 }
6996         }
6997
6998         return received;
6999 }
7000
7001 static void tg3_poll_link(struct tg3 *tp)
7002 {
7003         /* handle link change and other phy events */
7004         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7005                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7006
7007                 if (sblk->status & SD_STATUS_LINK_CHG) {
7008                         sblk->status = SD_STATUS_UPDATED |
7009                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7010                         spin_lock(&tp->lock);
7011                         if (tg3_flag(tp, USE_PHYLIB)) {
7012                                 tw32_f(MAC_STATUS,
7013                                      (MAC_STATUS_SYNC_CHANGED |
7014                                       MAC_STATUS_CFG_CHANGED |
7015                                       MAC_STATUS_MI_COMPLETION |
7016                                       MAC_STATUS_LNKSTATE_CHANGED));
7017                                 udelay(40);
7018                         } else
7019                                 tg3_setup_phy(tp, false);
7020                         spin_unlock(&tp->lock);
7021                 }
7022         }
7023 }
7024
7025 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7026                                 struct tg3_rx_prodring_set *dpr,
7027                                 struct tg3_rx_prodring_set *spr)
7028 {
7029         u32 si, di, cpycnt, src_prod_idx;
7030         int i, err = 0;
7031
7032         while (1) {
7033                 src_prod_idx = spr->rx_std_prod_idx;
7034
7035                 /* Make sure updates to the rx_std_buffers[] entries and the
7036                  * standard producer index are seen in the correct order.
7037                  */
7038                 smp_rmb();
7039
7040                 if (spr->rx_std_cons_idx == src_prod_idx)
7041                         break;
7042
7043                 if (spr->rx_std_cons_idx < src_prod_idx)
7044                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7045                 else
7046                         cpycnt = tp->rx_std_ring_mask + 1 -
7047                                  spr->rx_std_cons_idx;
7048
7049                 cpycnt = min(cpycnt,
7050                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7051
7052                 si = spr->rx_std_cons_idx;
7053                 di = dpr->rx_std_prod_idx;
7054
7055                 for (i = di; i < di + cpycnt; i++) {
7056                         if (dpr->rx_std_buffers[i].data) {
7057                                 cpycnt = i - di;
7058                                 err = -ENOSPC;
7059                                 break;
7060                         }
7061                 }
7062
7063                 if (!cpycnt)
7064                         break;
7065
7066                 /* Ensure that updates to the rx_std_buffers ring and the
7067                  * shadowed hardware producer ring from tg3_recycle_skb() are
7068                  * ordered correctly WRT the skb check above.
7069                  */
7070                 smp_rmb();
7071
7072                 memcpy(&dpr->rx_std_buffers[di],
7073                        &spr->rx_std_buffers[si],
7074                        cpycnt * sizeof(struct ring_info));
7075
7076                 for (i = 0; i < cpycnt; i++, di++, si++) {
7077                         struct tg3_rx_buffer_desc *sbd, *dbd;
7078                         sbd = &spr->rx_std[si];
7079                         dbd = &dpr->rx_std[di];
7080                         dbd->addr_hi = sbd->addr_hi;
7081                         dbd->addr_lo = sbd->addr_lo;
7082                 }
7083
7084                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7085                                        tp->rx_std_ring_mask;
7086                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7087                                        tp->rx_std_ring_mask;
7088         }
7089
7090         while (1) {
7091                 src_prod_idx = spr->rx_jmb_prod_idx;
7092
7093                 /* Make sure updates to the rx_jmb_buffers[] entries and
7094                  * the jumbo producer index are seen in the correct order.
7095                  */
7096                 smp_rmb();
7097
7098                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7099                         break;
7100
7101                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7102                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7103                 else
7104                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7105                                  spr->rx_jmb_cons_idx;
7106
7107                 cpycnt = min(cpycnt,
7108                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7109
7110                 si = spr->rx_jmb_cons_idx;
7111                 di = dpr->rx_jmb_prod_idx;
7112
7113                 for (i = di; i < di + cpycnt; i++) {
7114                         if (dpr->rx_jmb_buffers[i].data) {
7115                                 cpycnt = i - di;
7116                                 err = -ENOSPC;
7117                                 break;
7118                         }
7119                 }
7120
7121                 if (!cpycnt)
7122                         break;
7123
7124                 /* Ensure that updates to the rx_jmb_buffers ring and the
7125                  * shadowed hardware producer ring from tg3_recycle_skb() are
7126                  * ordered correctly WRT the skb check above.
7127                  */
7128                 smp_rmb();
7129
7130                 memcpy(&dpr->rx_jmb_buffers[di],
7131                        &spr->rx_jmb_buffers[si],
7132                        cpycnt * sizeof(struct ring_info));
7133
7134                 for (i = 0; i < cpycnt; i++, di++, si++) {
7135                         struct tg3_rx_buffer_desc *sbd, *dbd;
7136                         sbd = &spr->rx_jmb[si].std;
7137                         dbd = &dpr->rx_jmb[di].std;
7138                         dbd->addr_hi = sbd->addr_hi;
7139                         dbd->addr_lo = sbd->addr_lo;
7140                 }
7141
7142                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7143                                        tp->rx_jmb_ring_mask;
7144                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7145                                        tp->rx_jmb_ring_mask;
7146         }
7147
7148         return err;
7149 }
7150
7151 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7152 {
7153         struct tg3 *tp = tnapi->tp;
7154
7155         /* run TX completion thread */
7156         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7157                 tg3_tx(tnapi);
7158                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7159                         return work_done;
7160         }
7161
7162         if (!tnapi->rx_rcb_prod_idx)
7163                 return work_done;
7164
7165         /* run RX thread, within the bounds set by NAPI.
7166          * All RX "locking" is done by ensuring outside
7167          * code synchronizes with tg3->napi.poll()
7168          */
7169         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7170                 work_done += tg3_rx(tnapi, budget - work_done);
7171
7172         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7173                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7174                 int i, err = 0;
7175                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7176                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7177
7178                 tp->rx_refill = false;
7179                 for (i = 1; i <= tp->rxq_cnt; i++)
7180                         err |= tg3_rx_prodring_xfer(tp, dpr,
7181                                                     &tp->napi[i].prodring);
7182
7183                 wmb();
7184
7185                 if (std_prod_idx != dpr->rx_std_prod_idx)
7186                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7187                                      dpr->rx_std_prod_idx);
7188
7189                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7190                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7191                                      dpr->rx_jmb_prod_idx);
7192
7193                 mmiowb();
7194
7195                 if (err)
7196                         tw32_f(HOSTCC_MODE, tp->coal_now);
7197         }
7198
7199         return work_done;
7200 }
7201
7202 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7203 {
7204         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7205                 schedule_work(&tp->reset_task);
7206 }
7207
7208 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7209 {
7210         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7211                 cancel_work_sync(&tp->reset_task);
7212         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7213 }
7214
7215 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7216 {
7217         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7218         struct tg3 *tp = tnapi->tp;
7219         int work_done = 0;
7220         struct tg3_hw_status *sblk = tnapi->hw_status;
7221
7222         while (1) {
7223                 work_done = tg3_poll_work(tnapi, work_done, budget);
7224
7225                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7226                         goto tx_recovery;
7227
7228                 if (unlikely(work_done >= budget))
7229                         break;
7230
7231                 /* tp->last_tag is used in tg3_int_reenable() below
7232                  * to tell the hw how much work has been processed,
7233                  * so we must read it before checking for more work.
7234                  */
7235                 tnapi->last_tag = sblk->status_tag;
7236                 tnapi->last_irq_tag = tnapi->last_tag;
7237                 rmb();
7238
7239                 /* check for RX/TX work to do */
7240                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7241                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7242
7243                         /* This test here is not race free, but will reduce
7244                          * the number of interrupts by looping again.
7245                          */
7246                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7247                                 continue;
7248
7249                         napi_complete_done(napi, work_done);
7250                         /* Reenable interrupts. */
7251                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7252
7253                         /* This test here is synchronized by napi_schedule()
7254                          * and napi_complete() to close the race condition.
7255                          */
7256                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7257                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7258                                                   HOSTCC_MODE_ENABLE |
7259                                                   tnapi->coal_now);
7260                         }
7261                         mmiowb();
7262                         break;
7263                 }
7264         }
7265
7266         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7267         return work_done;
7268
7269 tx_recovery:
7270         /* work_done is guaranteed to be less than budget. */
7271         napi_complete(napi);
7272         tg3_reset_task_schedule(tp);
7273         return work_done;
7274 }
7275
7276 static void tg3_process_error(struct tg3 *tp)
7277 {
7278         u32 val;
7279         bool real_error = false;
7280
7281         if (tg3_flag(tp, ERROR_PROCESSED))
7282                 return;
7283
7284         /* Check Flow Attention register */
7285         val = tr32(HOSTCC_FLOW_ATTN);
7286         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7287                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7288                 real_error = true;
7289         }
7290
7291         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7292                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7293                 real_error = true;
7294         }
7295
7296         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7297                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7298                 real_error = true;
7299         }
7300
7301         if (!real_error)
7302                 return;
7303
7304         tg3_dump_state(tp);
7305
7306         tg3_flag_set(tp, ERROR_PROCESSED);
7307         tg3_reset_task_schedule(tp);
7308 }
7309
7310 static int tg3_poll(struct napi_struct *napi, int budget)
7311 {
7312         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7313         struct tg3 *tp = tnapi->tp;
7314         int work_done = 0;
7315         struct tg3_hw_status *sblk = tnapi->hw_status;
7316
7317         while (1) {
7318                 if (sblk->status & SD_STATUS_ERROR)
7319                         tg3_process_error(tp);
7320
7321                 tg3_poll_link(tp);
7322
7323                 work_done = tg3_poll_work(tnapi, work_done, budget);
7324
7325                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7326                         goto tx_recovery;
7327
7328                 if (unlikely(work_done >= budget))
7329                         break;
7330
7331                 if (tg3_flag(tp, TAGGED_STATUS)) {
7332                         /* tp->last_tag is used in tg3_int_reenable() below
7333                          * to tell the hw how much work has been processed,
7334                          * so we must read it before checking for more work.
7335                          */
7336                         tnapi->last_tag = sblk->status_tag;
7337                         tnapi->last_irq_tag = tnapi->last_tag;
7338                         rmb();
7339                 } else
7340                         sblk->status &= ~SD_STATUS_UPDATED;
7341
7342                 if (likely(!tg3_has_work(tnapi))) {
7343                         napi_complete_done(napi, work_done);
7344                         tg3_int_reenable(tnapi);
7345                         break;
7346                 }
7347         }
7348
7349         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7350         return work_done;
7351
7352 tx_recovery:
7353         /* work_done is guaranteed to be less than budget. */
7354         napi_complete(napi);
7355         tg3_reset_task_schedule(tp);
7356         return work_done;
7357 }
7358
7359 static void tg3_napi_disable(struct tg3 *tp)
7360 {
7361         int i;
7362
7363         for (i = tp->irq_cnt - 1; i >= 0; i--)
7364                 napi_disable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_enable(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         for (i = 0; i < tp->irq_cnt; i++)
7372                 napi_enable(&tp->napi[i].napi);
7373 }
7374
7375 static void tg3_napi_init(struct tg3 *tp)
7376 {
7377         int i;
7378
7379         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7380         for (i = 1; i < tp->irq_cnt; i++)
7381                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7382 }
7383
7384 static void tg3_napi_fini(struct tg3 *tp)
7385 {
7386         int i;
7387
7388         for (i = 0; i < tp->irq_cnt; i++)
7389                 netif_napi_del(&tp->napi[i].napi);
7390 }
7391
7392 static inline void tg3_netif_stop(struct tg3 *tp)
7393 {
7394         netif_trans_update(tp->dev);    /* prevent tx timeout */
7395         tg3_napi_disable(tp);
7396         netif_carrier_off(tp->dev);
7397         netif_tx_disable(tp->dev);
7398 }
7399
7400 /* tp->lock must be held */
7401 static inline void tg3_netif_start(struct tg3 *tp)
7402 {
7403         tg3_ptp_resume(tp);
7404
7405         /* NOTE: unconditional netif_tx_wake_all_queues is only
7406          * appropriate so long as all callers are assured to
7407          * have free tx slots (such as after tg3_init_hw)
7408          */
7409         netif_tx_wake_all_queues(tp->dev);
7410
7411         if (tp->link_up)
7412                 netif_carrier_on(tp->dev);
7413
7414         tg3_napi_enable(tp);
7415         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7416         tg3_enable_ints(tp);
7417 }
7418
7419 static void tg3_irq_quiesce(struct tg3 *tp)
7420         __releases(tp->lock)
7421         __acquires(tp->lock)
7422 {
7423         int i;
7424
7425         BUG_ON(tp->irq_sync);
7426
7427         tp->irq_sync = 1;
7428         smp_mb();
7429
7430         spin_unlock_bh(&tp->lock);
7431
7432         for (i = 0; i < tp->irq_cnt; i++)
7433                 synchronize_irq(tp->napi[i].irq_vec);
7434
7435         spin_lock_bh(&tp->lock);
7436 }
7437
7438 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7439  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7440  * with as well.  Most of the time, this is not necessary except when
7441  * shutting down the device.
7442  */
7443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7444 {
7445         spin_lock_bh(&tp->lock);
7446         if (irq_sync)
7447                 tg3_irq_quiesce(tp);
7448 }
7449
7450 static inline void tg3_full_unlock(struct tg3 *tp)
7451 {
7452         spin_unlock_bh(&tp->lock);
7453 }
7454
7455 /* One-shot MSI handler - Chip automatically disables interrupt
7456  * after sending MSI so driver doesn't have to do it.
7457  */
7458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7459 {
7460         struct tg3_napi *tnapi = dev_id;
7461         struct tg3 *tp = tnapi->tp;
7462
7463         prefetch(tnapi->hw_status);
7464         if (tnapi->rx_rcb)
7465                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7466
7467         if (likely(!tg3_irq_sync(tp)))
7468                 napi_schedule(&tnapi->napi);
7469
7470         return IRQ_HANDLED;
7471 }
7472
7473 /* MSI ISR - No need to check for interrupt sharing and no need to
7474  * flush status block and interrupt mailbox. PCI ordering rules
7475  * guarantee that MSI will arrive after the status block.
7476  */
7477 static irqreturn_t tg3_msi(int irq, void *dev_id)
7478 {
7479         struct tg3_napi *tnapi = dev_id;
7480         struct tg3 *tp = tnapi->tp;
7481
7482         prefetch(tnapi->hw_status);
7483         if (tnapi->rx_rcb)
7484                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7485         /*
7486          * Writing any value to intr-mbox-0 clears PCI INTA# and
7487          * chip-internal interrupt pending events.
7488          * Writing non-zero to intr-mbox-0 additional tells the
7489          * NIC to stop sending us irqs, engaging "in-intr-handler"
7490          * event coalescing.
7491          */
7492         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7493         if (likely(!tg3_irq_sync(tp)))
7494                 napi_schedule(&tnapi->napi);
7495
7496         return IRQ_RETVAL(1);
7497 }
7498
7499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7500 {
7501         struct tg3_napi *tnapi = dev_id;
7502         struct tg3 *tp = tnapi->tp;
7503         struct tg3_hw_status *sblk = tnapi->hw_status;
7504         unsigned int handled = 1;
7505
7506         /* In INTx mode, it is possible for the interrupt to arrive at
7507          * the CPU before the status block posted prior to the interrupt.
7508          * Reading the PCI State register will confirm whether the
7509          * interrupt is ours and will flush the status block.
7510          */
7511         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7512                 if (tg3_flag(tp, CHIP_RESETTING) ||
7513                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7514                         handled = 0;
7515                         goto out;
7516                 }
7517         }
7518
7519         /*
7520          * Writing any value to intr-mbox-0 clears PCI INTA# and
7521          * chip-internal interrupt pending events.
7522          * Writing non-zero to intr-mbox-0 additional tells the
7523          * NIC to stop sending us irqs, engaging "in-intr-handler"
7524          * event coalescing.
7525          *
7526          * Flush the mailbox to de-assert the IRQ immediately to prevent
7527          * spurious interrupts.  The flush impacts performance but
7528          * excessive spurious interrupts can be worse in some cases.
7529          */
7530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7531         if (tg3_irq_sync(tp))
7532                 goto out;
7533         sblk->status &= ~SD_STATUS_UPDATED;
7534         if (likely(tg3_has_work(tnapi))) {
7535                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7536                 napi_schedule(&tnapi->napi);
7537         } else {
7538                 /* No work, shared interrupt perhaps?  re-enable
7539                  * interrupts, and flush that PCI write
7540                  */
7541                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7542                                0x00000000);
7543         }
7544 out:
7545         return IRQ_RETVAL(handled);
7546 }
7547
7548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7549 {
7550         struct tg3_napi *tnapi = dev_id;
7551         struct tg3 *tp = tnapi->tp;
7552         struct tg3_hw_status *sblk = tnapi->hw_status;
7553         unsigned int handled = 1;
7554
7555         /* In INTx mode, it is possible for the interrupt to arrive at
7556          * the CPU before the status block posted prior to the interrupt.
7557          * Reading the PCI State register will confirm whether the
7558          * interrupt is ours and will flush the status block.
7559          */
7560         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7561                 if (tg3_flag(tp, CHIP_RESETTING) ||
7562                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7563                         handled = 0;
7564                         goto out;
7565                 }
7566         }
7567
7568         /*
7569          * writing any value to intr-mbox-0 clears PCI INTA# and
7570          * chip-internal interrupt pending events.
7571          * writing non-zero to intr-mbox-0 additional tells the
7572          * NIC to stop sending us irqs, engaging "in-intr-handler"
7573          * event coalescing.
7574          *
7575          * Flush the mailbox to de-assert the IRQ immediately to prevent
7576          * spurious interrupts.  The flush impacts performance but
7577          * excessive spurious interrupts can be worse in some cases.
7578          */
7579         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7580
7581         /*
7582          * In a shared interrupt configuration, sometimes other devices'
7583          * interrupts will scream.  We record the current status tag here
7584          * so that the above check can report that the screaming interrupts
7585          * are unhandled.  Eventually they will be silenced.
7586          */
7587         tnapi->last_irq_tag = sblk->status_tag;
7588
7589         if (tg3_irq_sync(tp))
7590                 goto out;
7591
7592         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7593
7594         napi_schedule(&tnapi->napi);
7595
7596 out:
7597         return IRQ_RETVAL(handled);
7598 }
7599
7600 /* ISR for interrupt test */
7601 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7602 {
7603         struct tg3_napi *tnapi = dev_id;
7604         struct tg3 *tp = tnapi->tp;
7605         struct tg3_hw_status *sblk = tnapi->hw_status;
7606
7607         if ((sblk->status & SD_STATUS_UPDATED) ||
7608             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7609                 tg3_disable_ints(tp);
7610                 return IRQ_RETVAL(1);
7611         }
7612         return IRQ_RETVAL(0);
7613 }
7614
7615 #ifdef CONFIG_NET_POLL_CONTROLLER
7616 static void tg3_poll_controller(struct net_device *dev)
7617 {
7618         int i;
7619         struct tg3 *tp = netdev_priv(dev);
7620
7621         if (tg3_irq_sync(tp))
7622                 return;
7623
7624         for (i = 0; i < tp->irq_cnt; i++)
7625                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7626 }
7627 #endif
7628
7629 static void tg3_tx_timeout(struct net_device *dev)
7630 {
7631         struct tg3 *tp = netdev_priv(dev);
7632
7633         if (netif_msg_tx_err(tp)) {
7634                 netdev_err(dev, "transmit timed out, resetting\n");
7635                 tg3_dump_state(tp);
7636         }
7637
7638         tg3_reset_task_schedule(tp);
7639 }
7640
7641 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7642 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7643 {
7644         u32 base = (u32) mapping & 0xffffffff;
7645
7646         return base + len + 8 < base;
7647 }
7648
7649 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7650  * of any 4GB boundaries: 4G, 8G, etc
7651  */
7652 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7653                                            u32 len, u32 mss)
7654 {
7655         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7656                 u32 base = (u32) mapping & 0xffffffff;
7657
7658                 return ((base + len + (mss & 0x3fff)) < base);
7659         }
7660         return 0;
7661 }
7662
7663 /* Test for DMA addresses > 40-bit */
7664 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7665                                           int len)
7666 {
7667 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7668         if (tg3_flag(tp, 40BIT_DMA_BUG))
7669                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7670         return 0;
7671 #else
7672         return 0;
7673 #endif
7674 }
7675
7676 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7677                                  dma_addr_t mapping, u32 len, u32 flags,
7678                                  u32 mss, u32 vlan)
7679 {
7680         txbd->addr_hi = ((u64) mapping >> 32);
7681         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7682         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7683         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7684 }
7685
7686 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7687                             dma_addr_t map, u32 len, u32 flags,
7688                             u32 mss, u32 vlan)
7689 {
7690         struct tg3 *tp = tnapi->tp;
7691         bool hwbug = false;
7692
7693         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7694                 hwbug = true;
7695
7696         if (tg3_4g_overflow_test(map, len))
7697                 hwbug = true;
7698
7699         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7700                 hwbug = true;
7701
7702         if (tg3_40bit_overflow_test(tp, map, len))
7703                 hwbug = true;
7704
7705         if (tp->dma_limit) {
7706                 u32 prvidx = *entry;
7707                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7708                 while (len > tp->dma_limit && *budget) {
7709                         u32 frag_len = tp->dma_limit;
7710                         len -= tp->dma_limit;
7711
7712                         /* Avoid the 8byte DMA problem */
7713                         if (len <= 8) {
7714                                 len += tp->dma_limit / 2;
7715                                 frag_len = tp->dma_limit / 2;
7716                         }
7717
7718                         tnapi->tx_buffers[*entry].fragmented = true;
7719
7720                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721                                       frag_len, tmp_flag, mss, vlan);
7722                         *budget -= 1;
7723                         prvidx = *entry;
7724                         *entry = NEXT_TX(*entry);
7725
7726                         map += frag_len;
7727                 }
7728
7729                 if (len) {
7730                         if (*budget) {
7731                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7732                                               len, flags, mss, vlan);
7733                                 *budget -= 1;
7734                                 *entry = NEXT_TX(*entry);
7735                         } else {
7736                                 hwbug = true;
7737                                 tnapi->tx_buffers[prvidx].fragmented = false;
7738                         }
7739                 }
7740         } else {
7741                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7742                               len, flags, mss, vlan);
7743                 *entry = NEXT_TX(*entry);
7744         }
7745
7746         return hwbug;
7747 }
7748
7749 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7750 {
7751         int i;
7752         struct sk_buff *skb;
7753         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7754
7755         skb = txb->skb;
7756         txb->skb = NULL;
7757
7758         pci_unmap_single(tnapi->tp->pdev,
7759                          dma_unmap_addr(txb, mapping),
7760                          skb_headlen(skb),
7761                          PCI_DMA_TODEVICE);
7762
7763         while (txb->fragmented) {
7764                 txb->fragmented = false;
7765                 entry = NEXT_TX(entry);
7766                 txb = &tnapi->tx_buffers[entry];
7767         }
7768
7769         for (i = 0; i <= last; i++) {
7770                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7771
7772                 entry = NEXT_TX(entry);
7773                 txb = &tnapi->tx_buffers[entry];
7774
7775                 pci_unmap_page(tnapi->tp->pdev,
7776                                dma_unmap_addr(txb, mapping),
7777                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7778
7779                 while (txb->fragmented) {
7780                         txb->fragmented = false;
7781                         entry = NEXT_TX(entry);
7782                         txb = &tnapi->tx_buffers[entry];
7783                 }
7784         }
7785 }
7786
7787 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7788 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7789                                        struct sk_buff **pskb,
7790                                        u32 *entry, u32 *budget,
7791                                        u32 base_flags, u32 mss, u32 vlan)
7792 {
7793         struct tg3 *tp = tnapi->tp;
7794         struct sk_buff *new_skb, *skb = *pskb;
7795         dma_addr_t new_addr = 0;
7796         int ret = 0;
7797
7798         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7799                 new_skb = skb_copy(skb, GFP_ATOMIC);
7800         else {
7801                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7802
7803                 new_skb = skb_copy_expand(skb,
7804                                           skb_headroom(skb) + more_headroom,
7805                                           skb_tailroom(skb), GFP_ATOMIC);
7806         }
7807
7808         if (!new_skb) {
7809                 ret = -1;
7810         } else {
7811                 /* New SKB is guaranteed to be linear. */
7812                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7813                                           PCI_DMA_TODEVICE);
7814                 /* Make sure the mapping succeeded */
7815                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7816                         dev_kfree_skb_any(new_skb);
7817                         ret = -1;
7818                 } else {
7819                         u32 save_entry = *entry;
7820
7821                         base_flags |= TXD_FLAG_END;
7822
7823                         tnapi->tx_buffers[*entry].skb = new_skb;
7824                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7825                                            mapping, new_addr);
7826
7827                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7828                                             new_skb->len, base_flags,
7829                                             mss, vlan)) {
7830                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7831                                 dev_kfree_skb_any(new_skb);
7832                                 ret = -1;
7833                         }
7834                 }
7835         }
7836
7837         dev_consume_skb_any(skb);
7838         *pskb = new_skb;
7839         return ret;
7840 }
7841
7842 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7843 {
7844         /* Check if we will never have enough descriptors,
7845          * as gso_segs can be more than current ring size
7846          */
7847         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7848 }
7849
7850 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7851
7852 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7853  * indicated in tg3_tx_frag_set()
7854  */
7855 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7856                        struct netdev_queue *txq, struct sk_buff *skb)
7857 {
7858         struct sk_buff *segs, *nskb;
7859         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7860
7861         /* Estimate the number of fragments in the worst case */
7862         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7863                 netif_tx_stop_queue(txq);
7864
7865                 /* netif_tx_stop_queue() must be done before checking
7866                  * checking tx index in tg3_tx_avail() below, because in
7867                  * tg3_tx(), we update tx index before checking for
7868                  * netif_tx_queue_stopped().
7869                  */
7870                 smp_mb();
7871                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7872                         return NETDEV_TX_BUSY;
7873
7874                 netif_tx_wake_queue(txq);
7875         }
7876
7877         segs = skb_gso_segment(skb, tp->dev->features &
7878                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7879         if (IS_ERR(segs) || !segs)
7880                 goto tg3_tso_bug_end;
7881
7882         do {
7883                 nskb = segs;
7884                 segs = segs->next;
7885                 nskb->next = NULL;
7886                 tg3_start_xmit(nskb, tp->dev);
7887         } while (segs);
7888
7889 tg3_tso_bug_end:
7890         dev_consume_skb_any(skb);
7891
7892         return NETDEV_TX_OK;
7893 }
7894
7895 /* hard_start_xmit for all devices */
7896 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7897 {
7898         struct tg3 *tp = netdev_priv(dev);
7899         u32 len, entry, base_flags, mss, vlan = 0;
7900         u32 budget;
7901         int i = -1, would_hit_hwbug;
7902         dma_addr_t mapping;
7903         struct tg3_napi *tnapi;
7904         struct netdev_queue *txq;
7905         unsigned int last;
7906         struct iphdr *iph = NULL;
7907         struct tcphdr *tcph = NULL;
7908         __sum16 tcp_csum = 0, ip_csum = 0;
7909         __be16 ip_tot_len = 0;
7910
7911         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7912         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7913         if (tg3_flag(tp, ENABLE_TSS))
7914                 tnapi++;
7915
7916         budget = tg3_tx_avail(tnapi);
7917
7918         /* We are running in BH disabled context with netif_tx_lock
7919          * and TX reclaim runs via tp->napi.poll inside of a software
7920          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7921          * no IRQ context deadlocks to worry about either.  Rejoice!
7922          */
7923         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7924                 if (!netif_tx_queue_stopped(txq)) {
7925                         netif_tx_stop_queue(txq);
7926
7927                         /* This is a hard error, log it. */
7928                         netdev_err(dev,
7929                                    "BUG! Tx Ring full when queue awake!\n");
7930                 }
7931                 return NETDEV_TX_BUSY;
7932         }
7933
7934         entry = tnapi->tx_prod;
7935         base_flags = 0;
7936
7937         mss = skb_shinfo(skb)->gso_size;
7938         if (mss) {
7939                 u32 tcp_opt_len, hdr_len;
7940
7941                 if (skb_cow_head(skb, 0))
7942                         goto drop;
7943
7944                 iph = ip_hdr(skb);
7945                 tcp_opt_len = tcp_optlen(skb);
7946
7947                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7948
7949                 /* HW/FW can not correctly segment packets that have been
7950                  * vlan encapsulated.
7951                  */
7952                 if (skb->protocol == htons(ETH_P_8021Q) ||
7953                     skb->protocol == htons(ETH_P_8021AD)) {
7954                         if (tg3_tso_bug_gso_check(tnapi, skb))
7955                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7956                         goto drop;
7957                 }
7958
7959                 if (!skb_is_gso_v6(skb)) {
7960                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7961                             tg3_flag(tp, TSO_BUG)) {
7962                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7963                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7964                                 goto drop;
7965                         }
7966                         ip_csum = iph->check;
7967                         ip_tot_len = iph->tot_len;
7968                         iph->check = 0;
7969                         iph->tot_len = htons(mss + hdr_len);
7970                 }
7971
7972                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7973                                TXD_FLAG_CPU_POST_DMA);
7974
7975                 tcph = tcp_hdr(skb);
7976                 tcp_csum = tcph->check;
7977
7978                 if (tg3_flag(tp, HW_TSO_1) ||
7979                     tg3_flag(tp, HW_TSO_2) ||
7980                     tg3_flag(tp, HW_TSO_3)) {
7981                         tcph->check = 0;
7982                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7983                 } else {
7984                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7985                                                          0, IPPROTO_TCP, 0);
7986                 }
7987
7988                 if (tg3_flag(tp, HW_TSO_3)) {
7989                         mss |= (hdr_len & 0xc) << 12;
7990                         if (hdr_len & 0x10)
7991                                 base_flags |= 0x00000010;
7992                         base_flags |= (hdr_len & 0x3e0) << 5;
7993                 } else if (tg3_flag(tp, HW_TSO_2))
7994                         mss |= hdr_len << 9;
7995                 else if (tg3_flag(tp, HW_TSO_1) ||
7996                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7997                         if (tcp_opt_len || iph->ihl > 5) {
7998                                 int tsflags;
7999
8000                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8001                                 mss |= (tsflags << 11);
8002                         }
8003                 } else {
8004                         if (tcp_opt_len || iph->ihl > 5) {
8005                                 int tsflags;
8006
8007                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8008                                 base_flags |= tsflags << 12;
8009                         }
8010                 }
8011         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8012                 /* HW/FW can not correctly checksum packets that have been
8013                  * vlan encapsulated.
8014                  */
8015                 if (skb->protocol == htons(ETH_P_8021Q) ||
8016                     skb->protocol == htons(ETH_P_8021AD)) {
8017                         if (skb_checksum_help(skb))
8018                                 goto drop;
8019                 } else  {
8020                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8021                 }
8022         }
8023
8024         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8025             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8026                 base_flags |= TXD_FLAG_JMB_PKT;
8027
8028         if (skb_vlan_tag_present(skb)) {
8029                 base_flags |= TXD_FLAG_VLAN;
8030                 vlan = skb_vlan_tag_get(skb);
8031         }
8032
8033         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8034             tg3_flag(tp, TX_TSTAMP_EN)) {
8035                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8036                 base_flags |= TXD_FLAG_HWTSTAMP;
8037         }
8038
8039         len = skb_headlen(skb);
8040
8041         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8042         if (pci_dma_mapping_error(tp->pdev, mapping))
8043                 goto drop;
8044
8045
8046         tnapi->tx_buffers[entry].skb = skb;
8047         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8048
8049         would_hit_hwbug = 0;
8050
8051         if (tg3_flag(tp, 5701_DMA_BUG))
8052                 would_hit_hwbug = 1;
8053
8054         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8055                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8056                             mss, vlan)) {
8057                 would_hit_hwbug = 1;
8058         } else if (skb_shinfo(skb)->nr_frags > 0) {
8059                 u32 tmp_mss = mss;
8060
8061                 if (!tg3_flag(tp, HW_TSO_1) &&
8062                     !tg3_flag(tp, HW_TSO_2) &&
8063                     !tg3_flag(tp, HW_TSO_3))
8064                         tmp_mss = 0;
8065
8066                 /* Now loop through additional data
8067                  * fragments, and queue them.
8068                  */
8069                 last = skb_shinfo(skb)->nr_frags - 1;
8070                 for (i = 0; i <= last; i++) {
8071                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8072
8073                         len = skb_frag_size(frag);
8074                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8075                                                    len, DMA_TO_DEVICE);
8076
8077                         tnapi->tx_buffers[entry].skb = NULL;
8078                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8079                                            mapping);
8080                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8081                                 goto dma_error;
8082
8083                         if (!budget ||
8084                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8085                                             len, base_flags |
8086                                             ((i == last) ? TXD_FLAG_END : 0),
8087                                             tmp_mss, vlan)) {
8088                                 would_hit_hwbug = 1;
8089                                 break;
8090                         }
8091                 }
8092         }
8093
8094         if (would_hit_hwbug) {
8095                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8096
8097                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8098                         /* If it's a TSO packet, do GSO instead of
8099                          * allocating and copying to a large linear SKB
8100                          */
8101                         if (ip_tot_len) {
8102                                 iph->check = ip_csum;
8103                                 iph->tot_len = ip_tot_len;
8104                         }
8105                         tcph->check = tcp_csum;
8106                         return tg3_tso_bug(tp, tnapi, txq, skb);
8107                 }
8108
8109                 /* If the workaround fails due to memory/mapping
8110                  * failure, silently drop this packet.
8111                  */
8112                 entry = tnapi->tx_prod;
8113                 budget = tg3_tx_avail(tnapi);
8114                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8115                                                 base_flags, mss, vlan))
8116                         goto drop_nofree;
8117         }
8118
8119         skb_tx_timestamp(skb);
8120         netdev_tx_sent_queue(txq, skb->len);
8121
8122         /* Sync BD data before updating mailbox */
8123         wmb();
8124
8125         tnapi->tx_prod = entry;
8126         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8127                 netif_tx_stop_queue(txq);
8128
8129                 /* netif_tx_stop_queue() must be done before checking
8130                  * checking tx index in tg3_tx_avail() below, because in
8131                  * tg3_tx(), we update tx index before checking for
8132                  * netif_tx_queue_stopped().
8133                  */
8134                 smp_mb();
8135                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8136                         netif_tx_wake_queue(txq);
8137         }
8138
8139         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8140                 /* Packets are ready, update Tx producer idx on card. */
8141                 tw32_tx_mbox(tnapi->prodmbox, entry);
8142                 mmiowb();
8143         }
8144
8145         return NETDEV_TX_OK;
8146
8147 dma_error:
8148         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8149         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8150 drop:
8151         dev_kfree_skb_any(skb);
8152 drop_nofree:
8153         tp->tx_dropped++;
8154         return NETDEV_TX_OK;
8155 }
8156
8157 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8158 {
8159         if (enable) {
8160                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8161                                   MAC_MODE_PORT_MODE_MASK);
8162
8163                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8164
8165                 if (!tg3_flag(tp, 5705_PLUS))
8166                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8167
8168                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8169                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8170                 else
8171                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8172         } else {
8173                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8174
8175                 if (tg3_flag(tp, 5705_PLUS) ||
8176                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8177                     tg3_asic_rev(tp) == ASIC_REV_5700)
8178                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8179         }
8180
8181         tw32(MAC_MODE, tp->mac_mode);
8182         udelay(40);
8183 }
8184
8185 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8186 {
8187         u32 val, bmcr, mac_mode, ptest = 0;
8188
8189         tg3_phy_toggle_apd(tp, false);
8190         tg3_phy_toggle_automdix(tp, false);
8191
8192         if (extlpbk && tg3_phy_set_extloopbk(tp))
8193                 return -EIO;
8194
8195         bmcr = BMCR_FULLDPLX;
8196         switch (speed) {
8197         case SPEED_10:
8198                 break;
8199         case SPEED_100:
8200                 bmcr |= BMCR_SPEED100;
8201                 break;
8202         case SPEED_1000:
8203         default:
8204                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8205                         speed = SPEED_100;
8206                         bmcr |= BMCR_SPEED100;
8207                 } else {
8208                         speed = SPEED_1000;
8209                         bmcr |= BMCR_SPEED1000;
8210                 }
8211         }
8212
8213         if (extlpbk) {
8214                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8215                         tg3_readphy(tp, MII_CTRL1000, &val);
8216                         val |= CTL1000_AS_MASTER |
8217                                CTL1000_ENABLE_MASTER;
8218                         tg3_writephy(tp, MII_CTRL1000, val);
8219                 } else {
8220                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8221                                 MII_TG3_FET_PTEST_TRIM_2;
8222                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8223                 }
8224         } else
8225                 bmcr |= BMCR_LOOPBACK;
8226
8227         tg3_writephy(tp, MII_BMCR, bmcr);
8228
8229         /* The write needs to be flushed for the FETs */
8230         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8231                 tg3_readphy(tp, MII_BMCR, &bmcr);
8232
8233         udelay(40);
8234
8235         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8236             tg3_asic_rev(tp) == ASIC_REV_5785) {
8237                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8238                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8239                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8240
8241                 /* The write needs to be flushed for the AC131 */
8242                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8243         }
8244
8245         /* Reset to prevent losing 1st rx packet intermittently */
8246         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8247             tg3_flag(tp, 5780_CLASS)) {
8248                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8249                 udelay(10);
8250                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8251         }
8252
8253         mac_mode = tp->mac_mode &
8254                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8255         if (speed == SPEED_1000)
8256                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8257         else
8258                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8259
8260         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8261                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8262
8263                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8264                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8265                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8266                         mac_mode |= MAC_MODE_LINK_POLARITY;
8267
8268                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8269                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8270         }
8271
8272         tw32(MAC_MODE, mac_mode);
8273         udelay(40);
8274
8275         return 0;
8276 }
8277
8278 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8279 {
8280         struct tg3 *tp = netdev_priv(dev);
8281
8282         if (features & NETIF_F_LOOPBACK) {
8283                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8284                         return;
8285
8286                 spin_lock_bh(&tp->lock);
8287                 tg3_mac_loopback(tp, true);
8288                 netif_carrier_on(tp->dev);
8289                 spin_unlock_bh(&tp->lock);
8290                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8291         } else {
8292                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8293                         return;
8294
8295                 spin_lock_bh(&tp->lock);
8296                 tg3_mac_loopback(tp, false);
8297                 /* Force link status check */
8298                 tg3_setup_phy(tp, true);
8299                 spin_unlock_bh(&tp->lock);
8300                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8301         }
8302 }
8303
8304 static netdev_features_t tg3_fix_features(struct net_device *dev,
8305         netdev_features_t features)
8306 {
8307         struct tg3 *tp = netdev_priv(dev);
8308
8309         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8310                 features &= ~NETIF_F_ALL_TSO;
8311
8312         return features;
8313 }
8314
8315 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8316 {
8317         netdev_features_t changed = dev->features ^ features;
8318
8319         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8320                 tg3_set_loopback(dev, features);
8321
8322         return 0;
8323 }
8324
8325 static void tg3_rx_prodring_free(struct tg3 *tp,
8326                                  struct tg3_rx_prodring_set *tpr)
8327 {
8328         int i;
8329
8330         if (tpr != &tp->napi[0].prodring) {
8331                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8332                      i = (i + 1) & tp->rx_std_ring_mask)
8333                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8334                                         tp->rx_pkt_map_sz);
8335
8336                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8337                         for (i = tpr->rx_jmb_cons_idx;
8338                              i != tpr->rx_jmb_prod_idx;
8339                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8340                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8341                                                 TG3_RX_JMB_MAP_SZ);
8342                         }
8343                 }
8344
8345                 return;
8346         }
8347
8348         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8349                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8350                                 tp->rx_pkt_map_sz);
8351
8352         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8353                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8354                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8355                                         TG3_RX_JMB_MAP_SZ);
8356         }
8357 }
8358
8359 /* Initialize rx rings for packet processing.
8360  *
8361  * The chip has been shut down and the driver detached from
8362  * the networking, so no interrupts or new tx packets will
8363  * end up in the driver.  tp->{tx,}lock are held and thus
8364  * we may not sleep.
8365  */
8366 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8367                                  struct tg3_rx_prodring_set *tpr)
8368 {
8369         u32 i, rx_pkt_dma_sz;
8370
8371         tpr->rx_std_cons_idx = 0;
8372         tpr->rx_std_prod_idx = 0;
8373         tpr->rx_jmb_cons_idx = 0;
8374         tpr->rx_jmb_prod_idx = 0;
8375
8376         if (tpr != &tp->napi[0].prodring) {
8377                 memset(&tpr->rx_std_buffers[0], 0,
8378                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8379                 if (tpr->rx_jmb_buffers)
8380                         memset(&tpr->rx_jmb_buffers[0], 0,
8381                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8382                 goto done;
8383         }
8384
8385         /* Zero out all descriptors. */
8386         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8387
8388         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8389         if (tg3_flag(tp, 5780_CLASS) &&
8390             tp->dev->mtu > ETH_DATA_LEN)
8391                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8392         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8393
8394         /* Initialize invariants of the rings, we only set this
8395          * stuff once.  This works because the card does not
8396          * write into the rx buffer posting rings.
8397          */
8398         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8399                 struct tg3_rx_buffer_desc *rxd;
8400
8401                 rxd = &tpr->rx_std[i];
8402                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8403                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8404                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8405                                (i << RXD_OPAQUE_INDEX_SHIFT));
8406         }
8407
8408         /* Now allocate fresh SKBs for each rx ring. */
8409         for (i = 0; i < tp->rx_pending; i++) {
8410                 unsigned int frag_size;
8411
8412                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8413                                       &frag_size) < 0) {
8414                         netdev_warn(tp->dev,
8415                                     "Using a smaller RX standard ring. Only "
8416                                     "%d out of %d buffers were allocated "
8417                                     "successfully\n", i, tp->rx_pending);
8418                         if (i == 0)
8419                                 goto initfail;
8420                         tp->rx_pending = i;
8421                         break;
8422                 }
8423         }
8424
8425         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8426                 goto done;
8427
8428         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8429
8430         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8431                 goto done;
8432
8433         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8434                 struct tg3_rx_buffer_desc *rxd;
8435
8436                 rxd = &tpr->rx_jmb[i].std;
8437                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8438                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8439                                   RXD_FLAG_JUMBO;
8440                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8441                        (i << RXD_OPAQUE_INDEX_SHIFT));
8442         }
8443
8444         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8445                 unsigned int frag_size;
8446
8447                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8448                                       &frag_size) < 0) {
8449                         netdev_warn(tp->dev,
8450                                     "Using a smaller RX jumbo ring. Only %d "
8451                                     "out of %d buffers were allocated "
8452                                     "successfully\n", i, tp->rx_jumbo_pending);
8453                         if (i == 0)
8454                                 goto initfail;
8455                         tp->rx_jumbo_pending = i;
8456                         break;
8457                 }
8458         }
8459
8460 done:
8461         return 0;
8462
8463 initfail:
8464         tg3_rx_prodring_free(tp, tpr);
8465         return -ENOMEM;
8466 }
8467
8468 static void tg3_rx_prodring_fini(struct tg3 *tp,
8469                                  struct tg3_rx_prodring_set *tpr)
8470 {
8471         kfree(tpr->rx_std_buffers);
8472         tpr->rx_std_buffers = NULL;
8473         kfree(tpr->rx_jmb_buffers);
8474         tpr->rx_jmb_buffers = NULL;
8475         if (tpr->rx_std) {
8476                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8477                                   tpr->rx_std, tpr->rx_std_mapping);
8478                 tpr->rx_std = NULL;
8479         }
8480         if (tpr->rx_jmb) {
8481                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8482                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8483                 tpr->rx_jmb = NULL;
8484         }
8485 }
8486
8487 static int tg3_rx_prodring_init(struct tg3 *tp,
8488                                 struct tg3_rx_prodring_set *tpr)
8489 {
8490         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8491                                       GFP_KERNEL);
8492         if (!tpr->rx_std_buffers)
8493                 return -ENOMEM;
8494
8495         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8496                                          TG3_RX_STD_RING_BYTES(tp),
8497                                          &tpr->rx_std_mapping,
8498                                          GFP_KERNEL);
8499         if (!tpr->rx_std)
8500                 goto err_out;
8501
8502         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8503                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8504                                               GFP_KERNEL);
8505                 if (!tpr->rx_jmb_buffers)
8506                         goto err_out;
8507
8508                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8509                                                  TG3_RX_JMB_RING_BYTES(tp),
8510                                                  &tpr->rx_jmb_mapping,
8511                                                  GFP_KERNEL);
8512                 if (!tpr->rx_jmb)
8513                         goto err_out;
8514         }
8515
8516         return 0;
8517
8518 err_out:
8519         tg3_rx_prodring_fini(tp, tpr);
8520         return -ENOMEM;
8521 }
8522
8523 /* Free up pending packets in all rx/tx rings.
8524  *
8525  * The chip has been shut down and the driver detached from
8526  * the networking, so no interrupts or new tx packets will
8527  * end up in the driver.  tp->{tx,}lock is not held and we are not
8528  * in an interrupt context and thus may sleep.
8529  */
8530 static void tg3_free_rings(struct tg3 *tp)
8531 {
8532         int i, j;
8533
8534         for (j = 0; j < tp->irq_cnt; j++) {
8535                 struct tg3_napi *tnapi = &tp->napi[j];
8536
8537                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8538
8539                 if (!tnapi->tx_buffers)
8540                         continue;
8541
8542                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8543                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8544
8545                         if (!skb)
8546                                 continue;
8547
8548                         tg3_tx_skb_unmap(tnapi, i,
8549                                          skb_shinfo(skb)->nr_frags - 1);
8550
8551                         dev_consume_skb_any(skb);
8552                 }
8553                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8554         }
8555 }
8556
8557 /* Initialize tx/rx rings for packet processing.
8558  *
8559  * The chip has been shut down and the driver detached from
8560  * the networking, so no interrupts or new tx packets will
8561  * end up in the driver.  tp->{tx,}lock are held and thus
8562  * we may not sleep.
8563  */
8564 static int tg3_init_rings(struct tg3 *tp)
8565 {
8566         int i;
8567
8568         /* Free up all the SKBs. */
8569         tg3_free_rings(tp);
8570
8571         for (i = 0; i < tp->irq_cnt; i++) {
8572                 struct tg3_napi *tnapi = &tp->napi[i];
8573
8574                 tnapi->last_tag = 0;
8575                 tnapi->last_irq_tag = 0;
8576                 tnapi->hw_status->status = 0;
8577                 tnapi->hw_status->status_tag = 0;
8578                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8579
8580                 tnapi->tx_prod = 0;
8581                 tnapi->tx_cons = 0;
8582                 if (tnapi->tx_ring)
8583                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8584
8585                 tnapi->rx_rcb_ptr = 0;
8586                 if (tnapi->rx_rcb)
8587                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8588
8589                 if (tnapi->prodring.rx_std &&
8590                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8591                         tg3_free_rings(tp);
8592                         return -ENOMEM;
8593                 }
8594         }
8595
8596         return 0;
8597 }
8598
8599 static void tg3_mem_tx_release(struct tg3 *tp)
8600 {
8601         int i;
8602
8603         for (i = 0; i < tp->irq_max; i++) {
8604                 struct tg3_napi *tnapi = &tp->napi[i];
8605
8606                 if (tnapi->tx_ring) {
8607                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8608                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8609                         tnapi->tx_ring = NULL;
8610                 }
8611
8612                 kfree(tnapi->tx_buffers);
8613                 tnapi->tx_buffers = NULL;
8614         }
8615 }
8616
8617 static int tg3_mem_tx_acquire(struct tg3 *tp)
8618 {
8619         int i;
8620         struct tg3_napi *tnapi = &tp->napi[0];
8621
8622         /* If multivector TSS is enabled, vector 0 does not handle
8623          * tx interrupts.  Don't allocate any resources for it.
8624          */
8625         if (tg3_flag(tp, ENABLE_TSS))
8626                 tnapi++;
8627
8628         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8629                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8630                                             sizeof(struct tg3_tx_ring_info),
8631                                             GFP_KERNEL);
8632                 if (!tnapi->tx_buffers)
8633                         goto err_out;
8634
8635                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8636                                                     TG3_TX_RING_BYTES,
8637                                                     &tnapi->tx_desc_mapping,
8638                                                     GFP_KERNEL);
8639                 if (!tnapi->tx_ring)
8640                         goto err_out;
8641         }
8642
8643         return 0;
8644
8645 err_out:
8646         tg3_mem_tx_release(tp);
8647         return -ENOMEM;
8648 }
8649
8650 static void tg3_mem_rx_release(struct tg3 *tp)
8651 {
8652         int i;
8653
8654         for (i = 0; i < tp->irq_max; i++) {
8655                 struct tg3_napi *tnapi = &tp->napi[i];
8656
8657                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8658
8659                 if (!tnapi->rx_rcb)
8660                         continue;
8661
8662                 dma_free_coherent(&tp->pdev->dev,
8663                                   TG3_RX_RCB_RING_BYTES(tp),
8664                                   tnapi->rx_rcb,
8665                                   tnapi->rx_rcb_mapping);
8666                 tnapi->rx_rcb = NULL;
8667         }
8668 }
8669
8670 static int tg3_mem_rx_acquire(struct tg3 *tp)
8671 {
8672         unsigned int i, limit;
8673
8674         limit = tp->rxq_cnt;
8675
8676         /* If RSS is enabled, we need a (dummy) producer ring
8677          * set on vector zero.  This is the true hw prodring.
8678          */
8679         if (tg3_flag(tp, ENABLE_RSS))
8680                 limit++;
8681
8682         for (i = 0; i < limit; i++) {
8683                 struct tg3_napi *tnapi = &tp->napi[i];
8684
8685                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8686                         goto err_out;
8687
8688                 /* If multivector RSS is enabled, vector 0
8689                  * does not handle rx or tx interrupts.
8690                  * Don't allocate any resources for it.
8691                  */
8692                 if (!i && tg3_flag(tp, ENABLE_RSS))
8693                         continue;
8694
8695                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8696                                                     TG3_RX_RCB_RING_BYTES(tp),
8697                                                     &tnapi->rx_rcb_mapping,
8698                                                     GFP_KERNEL);
8699                 if (!tnapi->rx_rcb)
8700                         goto err_out;
8701         }
8702
8703         return 0;
8704
8705 err_out:
8706         tg3_mem_rx_release(tp);
8707         return -ENOMEM;
8708 }
8709
8710 /*
8711  * Must not be invoked with interrupt sources disabled and
8712  * the hardware shutdown down.
8713  */
8714 static void tg3_free_consistent(struct tg3 *tp)
8715 {
8716         int i;
8717
8718         for (i = 0; i < tp->irq_cnt; i++) {
8719                 struct tg3_napi *tnapi = &tp->napi[i];
8720
8721                 if (tnapi->hw_status) {
8722                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8723                                           tnapi->hw_status,
8724                                           tnapi->status_mapping);
8725                         tnapi->hw_status = NULL;
8726                 }
8727         }
8728
8729         tg3_mem_rx_release(tp);
8730         tg3_mem_tx_release(tp);
8731
8732         /* tp->hw_stats can be referenced safely:
8733          *     1. under rtnl_lock
8734          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8735          */
8736         if (tp->hw_stats) {
8737                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8738                                   tp->hw_stats, tp->stats_mapping);
8739                 tp->hw_stats = NULL;
8740         }
8741 }
8742
8743 /*
8744  * Must not be invoked with interrupt sources disabled and
8745  * the hardware shutdown down.  Can sleep.
8746  */
8747 static int tg3_alloc_consistent(struct tg3 *tp)
8748 {
8749         int i;
8750
8751         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8752                                            sizeof(struct tg3_hw_stats),
8753                                            &tp->stats_mapping, GFP_KERNEL);
8754         if (!tp->hw_stats)
8755                 goto err_out;
8756
8757         for (i = 0; i < tp->irq_cnt; i++) {
8758                 struct tg3_napi *tnapi = &tp->napi[i];
8759                 struct tg3_hw_status *sblk;
8760
8761                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8762                                                        TG3_HW_STATUS_SIZE,
8763                                                        &tnapi->status_mapping,
8764                                                        GFP_KERNEL);
8765                 if (!tnapi->hw_status)
8766                         goto err_out;
8767
8768                 sblk = tnapi->hw_status;
8769
8770                 if (tg3_flag(tp, ENABLE_RSS)) {
8771                         u16 *prodptr = NULL;
8772
8773                         /*
8774                          * When RSS is enabled, the status block format changes
8775                          * slightly.  The "rx_jumbo_consumer", "reserved",
8776                          * and "rx_mini_consumer" members get mapped to the
8777                          * other three rx return ring producer indexes.
8778                          */
8779                         switch (i) {
8780                         case 1:
8781                                 prodptr = &sblk->idx[0].rx_producer;
8782                                 break;
8783                         case 2:
8784                                 prodptr = &sblk->rx_jumbo_consumer;
8785                                 break;
8786                         case 3:
8787                                 prodptr = &sblk->reserved;
8788                                 break;
8789                         case 4:
8790                                 prodptr = &sblk->rx_mini_consumer;
8791                                 break;
8792                         }
8793                         tnapi->rx_rcb_prod_idx = prodptr;
8794                 } else {
8795                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8796                 }
8797         }
8798
8799         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8800                 goto err_out;
8801
8802         return 0;
8803
8804 err_out:
8805         tg3_free_consistent(tp);
8806         return -ENOMEM;
8807 }
8808
8809 #define MAX_WAIT_CNT 1000
8810
8811 /* To stop a block, clear the enable bit and poll till it
8812  * clears.  tp->lock is held.
8813  */
8814 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8815 {
8816         unsigned int i;
8817         u32 val;
8818
8819         if (tg3_flag(tp, 5705_PLUS)) {
8820                 switch (ofs) {
8821                 case RCVLSC_MODE:
8822                 case DMAC_MODE:
8823                 case MBFREE_MODE:
8824                 case BUFMGR_MODE:
8825                 case MEMARB_MODE:
8826                         /* We can't enable/disable these bits of the
8827                          * 5705/5750, just say success.
8828                          */
8829                         return 0;
8830
8831                 default:
8832                         break;
8833                 }
8834         }
8835
8836         val = tr32(ofs);
8837         val &= ~enable_bit;
8838         tw32_f(ofs, val);
8839
8840         for (i = 0; i < MAX_WAIT_CNT; i++) {
8841                 if (pci_channel_offline(tp->pdev)) {
8842                         dev_err(&tp->pdev->dev,
8843                                 "tg3_stop_block device offline, "
8844                                 "ofs=%lx enable_bit=%x\n",
8845                                 ofs, enable_bit);
8846                         return -ENODEV;
8847                 }
8848
8849                 udelay(100);
8850                 val = tr32(ofs);
8851                 if ((val & enable_bit) == 0)
8852                         break;
8853         }
8854
8855         if (i == MAX_WAIT_CNT && !silent) {
8856                 dev_err(&tp->pdev->dev,
8857                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8858                         ofs, enable_bit);
8859                 return -ENODEV;
8860         }
8861
8862         return 0;
8863 }
8864
8865 /* tp->lock is held. */
8866 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8867 {
8868         int i, err;
8869
8870         tg3_disable_ints(tp);
8871
8872         if (pci_channel_offline(tp->pdev)) {
8873                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8874                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8875                 err = -ENODEV;
8876                 goto err_no_dev;
8877         }
8878
8879         tp->rx_mode &= ~RX_MODE_ENABLE;
8880         tw32_f(MAC_RX_MODE, tp->rx_mode);
8881         udelay(10);
8882
8883         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8889
8890         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8891         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8893         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8894         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8897
8898         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8899         tw32_f(MAC_MODE, tp->mac_mode);
8900         udelay(40);
8901
8902         tp->tx_mode &= ~TX_MODE_ENABLE;
8903         tw32_f(MAC_TX_MODE, tp->tx_mode);
8904
8905         for (i = 0; i < MAX_WAIT_CNT; i++) {
8906                 udelay(100);
8907                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8908                         break;
8909         }
8910         if (i >= MAX_WAIT_CNT) {
8911                 dev_err(&tp->pdev->dev,
8912                         "%s timed out, TX_MODE_ENABLE will not clear "
8913                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8914                 err |= -ENODEV;
8915         }
8916
8917         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8918         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8919         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8920
8921         tw32(FTQ_RESET, 0xffffffff);
8922         tw32(FTQ_RESET, 0x00000000);
8923
8924         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8925         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8926
8927 err_no_dev:
8928         for (i = 0; i < tp->irq_cnt; i++) {
8929                 struct tg3_napi *tnapi = &tp->napi[i];
8930                 if (tnapi->hw_status)
8931                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8932         }
8933
8934         return err;
8935 }
8936
8937 /* Save PCI command register before chip reset */
8938 static void tg3_save_pci_state(struct tg3 *tp)
8939 {
8940         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8941 }
8942
8943 /* Restore PCI state after chip reset */
8944 static void tg3_restore_pci_state(struct tg3 *tp)
8945 {
8946         u32 val;
8947
8948         /* Re-enable indirect register accesses. */
8949         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8950                                tp->misc_host_ctrl);
8951
8952         /* Set MAX PCI retry to zero. */
8953         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8954         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8955             tg3_flag(tp, PCIX_MODE))
8956                 val |= PCISTATE_RETRY_SAME_DMA;
8957         /* Allow reads and writes to the APE register and memory space. */
8958         if (tg3_flag(tp, ENABLE_APE))
8959                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8960                        PCISTATE_ALLOW_APE_SHMEM_WR |
8961                        PCISTATE_ALLOW_APE_PSPACE_WR;
8962         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8963
8964         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8965
8966         if (!tg3_flag(tp, PCI_EXPRESS)) {
8967                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8968                                       tp->pci_cacheline_sz);
8969                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8970                                       tp->pci_lat_timer);
8971         }
8972
8973         /* Make sure PCI-X relaxed ordering bit is clear. */
8974         if (tg3_flag(tp, PCIX_MODE)) {
8975                 u16 pcix_cmd;
8976
8977                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8978                                      &pcix_cmd);
8979                 pcix_cmd &= ~PCI_X_CMD_ERO;
8980                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8981                                       pcix_cmd);
8982         }
8983
8984         if (tg3_flag(tp, 5780_CLASS)) {
8985
8986                 /* Chip reset on 5780 will reset MSI enable bit,
8987                  * so need to restore it.
8988                  */
8989                 if (tg3_flag(tp, USING_MSI)) {
8990                         u16 ctrl;
8991
8992                         pci_read_config_word(tp->pdev,
8993                                              tp->msi_cap + PCI_MSI_FLAGS,
8994                                              &ctrl);
8995                         pci_write_config_word(tp->pdev,
8996                                               tp->msi_cap + PCI_MSI_FLAGS,
8997                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8998                         val = tr32(MSGINT_MODE);
8999                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9000                 }
9001         }
9002 }
9003
9004 static void tg3_override_clk(struct tg3 *tp)
9005 {
9006         u32 val;
9007
9008         switch (tg3_asic_rev(tp)) {
9009         case ASIC_REV_5717:
9010                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9011                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9012                      TG3_CPMU_MAC_ORIDE_ENABLE);
9013                 break;
9014
9015         case ASIC_REV_5719:
9016         case ASIC_REV_5720:
9017                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9018                 break;
9019
9020         default:
9021                 return;
9022         }
9023 }
9024
9025 static void tg3_restore_clk(struct tg3 *tp)
9026 {
9027         u32 val;
9028
9029         switch (tg3_asic_rev(tp)) {
9030         case ASIC_REV_5717:
9031                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9032                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9033                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9034                 break;
9035
9036         case ASIC_REV_5719:
9037         case ASIC_REV_5720:
9038                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9039                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9040                 break;
9041
9042         default:
9043                 return;
9044         }
9045 }
9046
9047 /* tp->lock is held. */
9048 static int tg3_chip_reset(struct tg3 *tp)
9049         __releases(tp->lock)
9050         __acquires(tp->lock)
9051 {
9052         u32 val;
9053         void (*write_op)(struct tg3 *, u32, u32);
9054         int i, err;
9055
9056         if (!pci_device_is_present(tp->pdev))
9057                 return -ENODEV;
9058
9059         tg3_nvram_lock(tp);
9060
9061         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9062
9063         /* No matching tg3_nvram_unlock() after this because
9064          * chip reset below will undo the nvram lock.
9065          */
9066         tp->nvram_lock_cnt = 0;
9067
9068         /* GRC_MISC_CFG core clock reset will clear the memory
9069          * enable bit in PCI register 4 and the MSI enable bit
9070          * on some chips, so we save relevant registers here.
9071          */
9072         tg3_save_pci_state(tp);
9073
9074         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9075             tg3_flag(tp, 5755_PLUS))
9076                 tw32(GRC_FASTBOOT_PC, 0);
9077
9078         /*
9079          * We must avoid the readl() that normally takes place.
9080          * It locks machines, causes machine checks, and other
9081          * fun things.  So, temporarily disable the 5701
9082          * hardware workaround, while we do the reset.
9083          */
9084         write_op = tp->write32;
9085         if (write_op == tg3_write_flush_reg32)
9086                 tp->write32 = tg3_write32;
9087
9088         /* Prevent the irq handler from reading or writing PCI registers
9089          * during chip reset when the memory enable bit in the PCI command
9090          * register may be cleared.  The chip does not generate interrupt
9091          * at this time, but the irq handler may still be called due to irq
9092          * sharing or irqpoll.
9093          */
9094         tg3_flag_set(tp, CHIP_RESETTING);
9095         for (i = 0; i < tp->irq_cnt; i++) {
9096                 struct tg3_napi *tnapi = &tp->napi[i];
9097                 if (tnapi->hw_status) {
9098                         tnapi->hw_status->status = 0;
9099                         tnapi->hw_status->status_tag = 0;
9100                 }
9101                 tnapi->last_tag = 0;
9102                 tnapi->last_irq_tag = 0;
9103         }
9104         smp_mb();
9105
9106         tg3_full_unlock(tp);
9107
9108         for (i = 0; i < tp->irq_cnt; i++)
9109                 synchronize_irq(tp->napi[i].irq_vec);
9110
9111         tg3_full_lock(tp, 0);
9112
9113         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9114                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9115                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9116         }
9117
9118         /* do the reset */
9119         val = GRC_MISC_CFG_CORECLK_RESET;
9120
9121         if (tg3_flag(tp, PCI_EXPRESS)) {
9122                 /* Force PCIe 1.0a mode */
9123                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9124                     !tg3_flag(tp, 57765_PLUS) &&
9125                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9126                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9127                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9128
9129                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9130                         tw32(GRC_MISC_CFG, (1 << 29));
9131                         val |= (1 << 29);
9132                 }
9133         }
9134
9135         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9136                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9137                 tw32(GRC_VCPU_EXT_CTRL,
9138                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9139         }
9140
9141         /* Set the clock to the highest frequency to avoid timeouts. With link
9142          * aware mode, the clock speed could be slow and bootcode does not
9143          * complete within the expected time. Override the clock to allow the
9144          * bootcode to finish sooner and then restore it.
9145          */
9146         tg3_override_clk(tp);
9147
9148         /* Manage gphy power for all CPMU absent PCIe devices. */
9149         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9150                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9151
9152         tw32(GRC_MISC_CFG, val);
9153
9154         /* restore 5701 hardware bug workaround write method */
9155         tp->write32 = write_op;
9156
9157         /* Unfortunately, we have to delay before the PCI read back.
9158          * Some 575X chips even will not respond to a PCI cfg access
9159          * when the reset command is given to the chip.
9160          *
9161          * How do these hardware designers expect things to work
9162          * properly if the PCI write is posted for a long period
9163          * of time?  It is always necessary to have some method by
9164          * which a register read back can occur to push the write
9165          * out which does the reset.
9166          *
9167          * For most tg3 variants the trick below was working.
9168          * Ho hum...
9169          */
9170         udelay(120);
9171
9172         /* Flush PCI posted writes.  The normal MMIO registers
9173          * are inaccessible at this time so this is the only
9174          * way to make this reliably (actually, this is no longer
9175          * the case, see above).  I tried to use indirect
9176          * register read/write but this upset some 5701 variants.
9177          */
9178         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9179
9180         udelay(120);
9181
9182         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9183                 u16 val16;
9184
9185                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9186                         int j;
9187                         u32 cfg_val;
9188
9189                         /* Wait for link training to complete.  */
9190                         for (j = 0; j < 5000; j++)
9191                                 udelay(100);
9192
9193                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9194                         pci_write_config_dword(tp->pdev, 0xc4,
9195                                                cfg_val | (1 << 15));
9196                 }
9197
9198                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9199                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9200                 /*
9201                  * Older PCIe devices only support the 128 byte
9202                  * MPS setting.  Enforce the restriction.
9203                  */
9204                 if (!tg3_flag(tp, CPMU_PRESENT))
9205                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9206                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9207
9208                 /* Clear error status */
9209                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9210                                       PCI_EXP_DEVSTA_CED |
9211                                       PCI_EXP_DEVSTA_NFED |
9212                                       PCI_EXP_DEVSTA_FED |
9213                                       PCI_EXP_DEVSTA_URD);
9214         }
9215
9216         tg3_restore_pci_state(tp);
9217
9218         tg3_flag_clear(tp, CHIP_RESETTING);
9219         tg3_flag_clear(tp, ERROR_PROCESSED);
9220
9221         val = 0;
9222         if (tg3_flag(tp, 5780_CLASS))
9223                 val = tr32(MEMARB_MODE);
9224         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9225
9226         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9227                 tg3_stop_fw(tp);
9228                 tw32(0x5000, 0x400);
9229         }
9230
9231         if (tg3_flag(tp, IS_SSB_CORE)) {
9232                 /*
9233                  * BCM4785: In order to avoid repercussions from using
9234                  * potentially defective internal ROM, stop the Rx RISC CPU,
9235                  * which is not required.
9236                  */
9237                 tg3_stop_fw(tp);
9238                 tg3_halt_cpu(tp, RX_CPU_BASE);
9239         }
9240
9241         err = tg3_poll_fw(tp);
9242         if (err)
9243                 return err;
9244
9245         tw32(GRC_MODE, tp->grc_mode);
9246
9247         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9248                 val = tr32(0xc4);
9249
9250                 tw32(0xc4, val | (1 << 15));
9251         }
9252
9253         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9254             tg3_asic_rev(tp) == ASIC_REV_5705) {
9255                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9256                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9257                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9258                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9259         }
9260
9261         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9262                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9263                 val = tp->mac_mode;
9264         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9265                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9266                 val = tp->mac_mode;
9267         } else
9268                 val = 0;
9269
9270         tw32_f(MAC_MODE, val);
9271         udelay(40);
9272
9273         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9274
9275         tg3_mdio_start(tp);
9276
9277         if (tg3_flag(tp, PCI_EXPRESS) &&
9278             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9279             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9280             !tg3_flag(tp, 57765_PLUS)) {
9281                 val = tr32(0x7c00);
9282
9283                 tw32(0x7c00, val | (1 << 25));
9284         }
9285
9286         tg3_restore_clk(tp);
9287
9288         /* Increase the core clock speed to fix tx timeout issue for 5762
9289          * with 100Mbps link speed.
9290          */
9291         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9292                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9293                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9294                      TG3_CPMU_MAC_ORIDE_ENABLE);
9295         }
9296
9297         /* Reprobe ASF enable state.  */
9298         tg3_flag_clear(tp, ENABLE_ASF);
9299         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9300                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9301
9302         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9303         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9304         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9305                 u32 nic_cfg;
9306
9307                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9308                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9309                         tg3_flag_set(tp, ENABLE_ASF);
9310                         tp->last_event_jiffies = jiffies;
9311                         if (tg3_flag(tp, 5750_PLUS))
9312                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9313
9314                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9315                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9316                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9317                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9318                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9319                 }
9320         }
9321
9322         return 0;
9323 }
9324
9325 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9326 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9327 static void __tg3_set_rx_mode(struct net_device *);
9328
9329 /* tp->lock is held. */
9330 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9331 {
9332         int err;
9333
9334         tg3_stop_fw(tp);
9335
9336         tg3_write_sig_pre_reset(tp, kind);
9337
9338         tg3_abort_hw(tp, silent);
9339         err = tg3_chip_reset(tp);
9340
9341         __tg3_set_mac_addr(tp, false);
9342
9343         tg3_write_sig_legacy(tp, kind);
9344         tg3_write_sig_post_reset(tp, kind);
9345
9346         if (tp->hw_stats) {
9347                 /* Save the stats across chip resets... */
9348                 tg3_get_nstats(tp, &tp->net_stats_prev);
9349                 tg3_get_estats(tp, &tp->estats_prev);
9350
9351                 /* And make sure the next sample is new data */
9352                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9353         }
9354
9355         return err;
9356 }
9357
9358 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9359 {
9360         struct tg3 *tp = netdev_priv(dev);
9361         struct sockaddr *addr = p;
9362         int err = 0;
9363         bool skip_mac_1 = false;
9364
9365         if (!is_valid_ether_addr(addr->sa_data))
9366                 return -EADDRNOTAVAIL;
9367
9368         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9369
9370         if (!netif_running(dev))
9371                 return 0;
9372
9373         if (tg3_flag(tp, ENABLE_ASF)) {
9374                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9375
9376                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9377                 addr0_low = tr32(MAC_ADDR_0_LOW);
9378                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9379                 addr1_low = tr32(MAC_ADDR_1_LOW);
9380
9381                 /* Skip MAC addr 1 if ASF is using it. */
9382                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9383                     !(addr1_high == 0 && addr1_low == 0))
9384                         skip_mac_1 = true;
9385         }
9386         spin_lock_bh(&tp->lock);
9387         __tg3_set_mac_addr(tp, skip_mac_1);
9388         __tg3_set_rx_mode(dev);
9389         spin_unlock_bh(&tp->lock);
9390
9391         return err;
9392 }
9393
9394 /* tp->lock is held. */
9395 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9396                            dma_addr_t mapping, u32 maxlen_flags,
9397                            u32 nic_addr)
9398 {
9399         tg3_write_mem(tp,
9400                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9401                       ((u64) mapping >> 32));
9402         tg3_write_mem(tp,
9403                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9404                       ((u64) mapping & 0xffffffff));
9405         tg3_write_mem(tp,
9406                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9407                        maxlen_flags);
9408
9409         if (!tg3_flag(tp, 5705_PLUS))
9410                 tg3_write_mem(tp,
9411                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9412                               nic_addr);
9413 }
9414
9415
9416 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9417 {
9418         int i = 0;
9419
9420         if (!tg3_flag(tp, ENABLE_TSS)) {
9421                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9422                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9423                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9424         } else {
9425                 tw32(HOSTCC_TXCOL_TICKS, 0);
9426                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9427                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9428
9429                 for (; i < tp->txq_cnt; i++) {
9430                         u32 reg;
9431
9432                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9433                         tw32(reg, ec->tx_coalesce_usecs);
9434                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9435                         tw32(reg, ec->tx_max_coalesced_frames);
9436                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9437                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9438                 }
9439         }
9440
9441         for (; i < tp->irq_max - 1; i++) {
9442                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9443                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9444                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9445         }
9446 }
9447
9448 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9449 {
9450         int i = 0;
9451         u32 limit = tp->rxq_cnt;
9452
9453         if (!tg3_flag(tp, ENABLE_RSS)) {
9454                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9455                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9456                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9457                 limit--;
9458         } else {
9459                 tw32(HOSTCC_RXCOL_TICKS, 0);
9460                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9461                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9462         }
9463
9464         for (; i < limit; i++) {
9465                 u32 reg;
9466
9467                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9468                 tw32(reg, ec->rx_coalesce_usecs);
9469                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9470                 tw32(reg, ec->rx_max_coalesced_frames);
9471                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9472                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9473         }
9474
9475         for (; i < tp->irq_max - 1; i++) {
9476                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9477                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9478                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9479         }
9480 }
9481
9482 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9483 {
9484         tg3_coal_tx_init(tp, ec);
9485         tg3_coal_rx_init(tp, ec);
9486
9487         if (!tg3_flag(tp, 5705_PLUS)) {
9488                 u32 val = ec->stats_block_coalesce_usecs;
9489
9490                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9491                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9492
9493                 if (!tp->link_up)
9494                         val = 0;
9495
9496                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9497         }
9498 }
9499
9500 /* tp->lock is held. */
9501 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9502 {
9503         u32 txrcb, limit;
9504
9505         /* Disable all transmit rings but the first. */
9506         if (!tg3_flag(tp, 5705_PLUS))
9507                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9508         else if (tg3_flag(tp, 5717_PLUS))
9509                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9510         else if (tg3_flag(tp, 57765_CLASS) ||
9511                  tg3_asic_rev(tp) == ASIC_REV_5762)
9512                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9513         else
9514                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9515
9516         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9517              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9518                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9519                               BDINFO_FLAGS_DISABLED);
9520 }
9521
9522 /* tp->lock is held. */
9523 static void tg3_tx_rcbs_init(struct tg3 *tp)
9524 {
9525         int i = 0;
9526         u32 txrcb = NIC_SRAM_SEND_RCB;
9527
9528         if (tg3_flag(tp, ENABLE_TSS))
9529                 i++;
9530
9531         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9532                 struct tg3_napi *tnapi = &tp->napi[i];
9533
9534                 if (!tnapi->tx_ring)
9535                         continue;
9536
9537                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9538                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9539                                NIC_SRAM_TX_BUFFER_DESC);
9540         }
9541 }
9542
9543 /* tp->lock is held. */
9544 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9545 {
9546         u32 rxrcb, limit;
9547
9548         /* Disable all receive return rings but the first. */
9549         if (tg3_flag(tp, 5717_PLUS))
9550                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9551         else if (!tg3_flag(tp, 5705_PLUS))
9552                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9553         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9554                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9555                  tg3_flag(tp, 57765_CLASS))
9556                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9557         else
9558                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9559
9560         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9561              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9562                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9563                               BDINFO_FLAGS_DISABLED);
9564 }
9565
9566 /* tp->lock is held. */
9567 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9568 {
9569         int i = 0;
9570         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9571
9572         if (tg3_flag(tp, ENABLE_RSS))
9573                 i++;
9574
9575         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9576                 struct tg3_napi *tnapi = &tp->napi[i];
9577
9578                 if (!tnapi->rx_rcb)
9579                         continue;
9580
9581                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9582                                (tp->rx_ret_ring_mask + 1) <<
9583                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9584         }
9585 }
9586
9587 /* tp->lock is held. */
9588 static void tg3_rings_reset(struct tg3 *tp)
9589 {
9590         int i;
9591         u32 stblk;
9592         struct tg3_napi *tnapi = &tp->napi[0];
9593
9594         tg3_tx_rcbs_disable(tp);
9595
9596         tg3_rx_ret_rcbs_disable(tp);
9597
9598         /* Disable interrupts */
9599         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9600         tp->napi[0].chk_msi_cnt = 0;
9601         tp->napi[0].last_rx_cons = 0;
9602         tp->napi[0].last_tx_cons = 0;
9603
9604         /* Zero mailbox registers. */
9605         if (tg3_flag(tp, SUPPORT_MSIX)) {
9606                 for (i = 1; i < tp->irq_max; i++) {
9607                         tp->napi[i].tx_prod = 0;
9608                         tp->napi[i].tx_cons = 0;
9609                         if (tg3_flag(tp, ENABLE_TSS))
9610                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9611                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9612                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9613                         tp->napi[i].chk_msi_cnt = 0;
9614                         tp->napi[i].last_rx_cons = 0;
9615                         tp->napi[i].last_tx_cons = 0;
9616                 }
9617                 if (!tg3_flag(tp, ENABLE_TSS))
9618                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9619         } else {
9620                 tp->napi[0].tx_prod = 0;
9621                 tp->napi[0].tx_cons = 0;
9622                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9623                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9624         }
9625
9626         /* Make sure the NIC-based send BD rings are disabled. */
9627         if (!tg3_flag(tp, 5705_PLUS)) {
9628                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9629                 for (i = 0; i < 16; i++)
9630                         tw32_tx_mbox(mbox + i * 8, 0);
9631         }
9632
9633         /* Clear status block in ram. */
9634         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9635
9636         /* Set status block DMA address */
9637         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9638              ((u64) tnapi->status_mapping >> 32));
9639         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9640              ((u64) tnapi->status_mapping & 0xffffffff));
9641
9642         stblk = HOSTCC_STATBLCK_RING1;
9643
9644         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9645                 u64 mapping = (u64)tnapi->status_mapping;
9646                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9647                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9648                 stblk += 8;
9649
9650                 /* Clear status block in ram. */
9651                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9652         }
9653
9654         tg3_tx_rcbs_init(tp);
9655         tg3_rx_ret_rcbs_init(tp);
9656 }
9657
9658 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9659 {
9660         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9661
9662         if (!tg3_flag(tp, 5750_PLUS) ||
9663             tg3_flag(tp, 5780_CLASS) ||
9664             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9665             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9666             tg3_flag(tp, 57765_PLUS))
9667                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9668         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9669                  tg3_asic_rev(tp) == ASIC_REV_5787)
9670                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9671         else
9672                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9673
9674         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9675         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9676
9677         val = min(nic_rep_thresh, host_rep_thresh);
9678         tw32(RCVBDI_STD_THRESH, val);
9679
9680         if (tg3_flag(tp, 57765_PLUS))
9681                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9682
9683         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9684                 return;
9685
9686         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9687
9688         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9689
9690         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9691         tw32(RCVBDI_JUMBO_THRESH, val);
9692
9693         if (tg3_flag(tp, 57765_PLUS))
9694                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9695 }
9696
9697 static inline u32 calc_crc(unsigned char *buf, int len)
9698 {
9699         u32 reg;
9700         u32 tmp;
9701         int j, k;
9702
9703         reg = 0xffffffff;
9704
9705         for (j = 0; j < len; j++) {
9706                 reg ^= buf[j];
9707
9708                 for (k = 0; k < 8; k++) {
9709                         tmp = reg & 0x01;
9710
9711                         reg >>= 1;
9712
9713                         if (tmp)
9714                                 reg ^= CRC32_POLY_LE;
9715                 }
9716         }
9717
9718         return ~reg;
9719 }
9720
9721 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9722 {
9723         /* accept or reject all multicast frames */
9724         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9725         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9726         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9727         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9728 }
9729
9730 static void __tg3_set_rx_mode(struct net_device *dev)
9731 {
9732         struct tg3 *tp = netdev_priv(dev);
9733         u32 rx_mode;
9734
9735         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9736                                   RX_MODE_KEEP_VLAN_TAG);
9737
9738 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9739         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9740          * flag clear.
9741          */
9742         if (!tg3_flag(tp, ENABLE_ASF))
9743                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9744 #endif
9745
9746         if (dev->flags & IFF_PROMISC) {
9747                 /* Promiscuous mode. */
9748                 rx_mode |= RX_MODE_PROMISC;
9749         } else if (dev->flags & IFF_ALLMULTI) {
9750                 /* Accept all multicast. */
9751                 tg3_set_multi(tp, 1);
9752         } else if (netdev_mc_empty(dev)) {
9753                 /* Reject all multicast. */
9754                 tg3_set_multi(tp, 0);
9755         } else {
9756                 /* Accept one or more multicast(s). */
9757                 struct netdev_hw_addr *ha;
9758                 u32 mc_filter[4] = { 0, };
9759                 u32 regidx;
9760                 u32 bit;
9761                 u32 crc;
9762
9763                 netdev_for_each_mc_addr(ha, dev) {
9764                         crc = calc_crc(ha->addr, ETH_ALEN);
9765                         bit = ~crc & 0x7f;
9766                         regidx = (bit & 0x60) >> 5;
9767                         bit &= 0x1f;
9768                         mc_filter[regidx] |= (1 << bit);
9769                 }
9770
9771                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9772                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9773                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9774                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9775         }
9776
9777         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9778                 rx_mode |= RX_MODE_PROMISC;
9779         } else if (!(dev->flags & IFF_PROMISC)) {
9780                 /* Add all entries into to the mac addr filter list */
9781                 int i = 0;
9782                 struct netdev_hw_addr *ha;
9783
9784                 netdev_for_each_uc_addr(ha, dev) {
9785                         __tg3_set_one_mac_addr(tp, ha->addr,
9786                                                i + TG3_UCAST_ADDR_IDX(tp));
9787                         i++;
9788                 }
9789         }
9790
9791         if (rx_mode != tp->rx_mode) {
9792                 tp->rx_mode = rx_mode;
9793                 tw32_f(MAC_RX_MODE, rx_mode);
9794                 udelay(10);
9795         }
9796 }
9797
9798 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9799 {
9800         int i;
9801
9802         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9803                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9804 }
9805
9806 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9807 {
9808         int i;
9809
9810         if (!tg3_flag(tp, SUPPORT_MSIX))
9811                 return;
9812
9813         if (tp->rxq_cnt == 1) {
9814                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9815                 return;
9816         }
9817
9818         /* Validate table against current IRQ count */
9819         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9820                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9821                         break;
9822         }
9823
9824         if (i != TG3_RSS_INDIR_TBL_SIZE)
9825                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9826 }
9827
9828 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9829 {
9830         int i = 0;
9831         u32 reg = MAC_RSS_INDIR_TBL_0;
9832
9833         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9834                 u32 val = tp->rss_ind_tbl[i];
9835                 i++;
9836                 for (; i % 8; i++) {
9837                         val <<= 4;
9838                         val |= tp->rss_ind_tbl[i];
9839                 }
9840                 tw32(reg, val);
9841                 reg += 4;
9842         }
9843 }
9844
9845 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9846 {
9847         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9848                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9849         else
9850                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9851 }
9852
9853 /* tp->lock is held. */
9854 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9855 {
9856         u32 val, rdmac_mode;
9857         int i, err, limit;
9858         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9859
9860         tg3_disable_ints(tp);
9861
9862         tg3_stop_fw(tp);
9863
9864         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9865
9866         if (tg3_flag(tp, INIT_COMPLETE))
9867                 tg3_abort_hw(tp, 1);
9868
9869         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9870             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9871                 tg3_phy_pull_config(tp);
9872                 tg3_eee_pull_config(tp, NULL);
9873                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9874         }
9875
9876         /* Enable MAC control of LPI */
9877         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9878                 tg3_setup_eee(tp);
9879
9880         if (reset_phy)
9881                 tg3_phy_reset(tp);
9882
9883         err = tg3_chip_reset(tp);
9884         if (err)
9885                 return err;
9886
9887         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9888
9889         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9890                 val = tr32(TG3_CPMU_CTRL);
9891                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9892                 tw32(TG3_CPMU_CTRL, val);
9893
9894                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9895                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9896                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9897                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9898
9899                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9900                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9901                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9902                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9903
9904                 val = tr32(TG3_CPMU_HST_ACC);
9905                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9906                 val |= CPMU_HST_ACC_MACCLK_6_25;
9907                 tw32(TG3_CPMU_HST_ACC, val);
9908         }
9909
9910         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9911                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9912                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9913                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9914                 tw32(PCIE_PWR_MGMT_THRESH, val);
9915
9916                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9917                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9918
9919                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9920
9921                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9922                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9923         }
9924
9925         if (tg3_flag(tp, L1PLLPD_EN)) {
9926                 u32 grc_mode = tr32(GRC_MODE);
9927
9928                 /* Access the lower 1K of PL PCIE block registers. */
9929                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9930                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9931
9932                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9933                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9934                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9935
9936                 tw32(GRC_MODE, grc_mode);
9937         }
9938
9939         if (tg3_flag(tp, 57765_CLASS)) {
9940                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9941                         u32 grc_mode = tr32(GRC_MODE);
9942
9943                         /* Access the lower 1K of PL PCIE block registers. */
9944                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9945                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9946
9947                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9948                                    TG3_PCIE_PL_LO_PHYCTL5);
9949                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9950                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9951
9952                         tw32(GRC_MODE, grc_mode);
9953                 }
9954
9955                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9956                         u32 grc_mode;
9957
9958                         /* Fix transmit hangs */
9959                         val = tr32(TG3_CPMU_PADRNG_CTL);
9960                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9961                         tw32(TG3_CPMU_PADRNG_CTL, val);
9962
9963                         grc_mode = tr32(GRC_MODE);
9964
9965                         /* Access the lower 1K of DL PCIE block registers. */
9966                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9967                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9968
9969                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9970                                    TG3_PCIE_DL_LO_FTSMAX);
9971                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9972                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9973                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9974
9975                         tw32(GRC_MODE, grc_mode);
9976                 }
9977
9978                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9979                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9980                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9981                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9982         }
9983
9984         /* This works around an issue with Athlon chipsets on
9985          * B3 tigon3 silicon.  This bit has no effect on any
9986          * other revision.  But do not set this on PCI Express
9987          * chips and don't even touch the clocks if the CPMU is present.
9988          */
9989         if (!tg3_flag(tp, CPMU_PRESENT)) {
9990                 if (!tg3_flag(tp, PCI_EXPRESS))
9991                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9992                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9993         }
9994
9995         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9996             tg3_flag(tp, PCIX_MODE)) {
9997                 val = tr32(TG3PCI_PCISTATE);
9998                 val |= PCISTATE_RETRY_SAME_DMA;
9999                 tw32(TG3PCI_PCISTATE, val);
10000         }
10001
10002         if (tg3_flag(tp, ENABLE_APE)) {
10003                 /* Allow reads and writes to the
10004                  * APE register and memory space.
10005                  */
10006                 val = tr32(TG3PCI_PCISTATE);
10007                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10008                        PCISTATE_ALLOW_APE_SHMEM_WR |
10009                        PCISTATE_ALLOW_APE_PSPACE_WR;
10010                 tw32(TG3PCI_PCISTATE, val);
10011         }
10012
10013         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10014                 /* Enable some hw fixes.  */
10015                 val = tr32(TG3PCI_MSI_DATA);
10016                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10017                 tw32(TG3PCI_MSI_DATA, val);
10018         }
10019
10020         /* Descriptor ring init may make accesses to the
10021          * NIC SRAM area to setup the TX descriptors, so we
10022          * can only do this after the hardware has been
10023          * successfully reset.
10024          */
10025         err = tg3_init_rings(tp);
10026         if (err)
10027                 return err;
10028
10029         if (tg3_flag(tp, 57765_PLUS)) {
10030                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10031                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10032                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10033                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10034                 if (!tg3_flag(tp, 57765_CLASS) &&
10035                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10036                     tg3_asic_rev(tp) != ASIC_REV_5762)
10037                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10038                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10039         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10040                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10041                 /* This value is determined during the probe time DMA
10042                  * engine test, tg3_test_dma.
10043                  */
10044                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10045         }
10046
10047         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10048                           GRC_MODE_4X_NIC_SEND_RINGS |
10049                           GRC_MODE_NO_TX_PHDR_CSUM |
10050                           GRC_MODE_NO_RX_PHDR_CSUM);
10051         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10052
10053         /* Pseudo-header checksum is done by hardware logic and not
10054          * the offload processers, so make the chip do the pseudo-
10055          * header checksums on receive.  For transmit it is more
10056          * convenient to do the pseudo-header checksum in software
10057          * as Linux does that on transmit for us in all cases.
10058          */
10059         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10060
10061         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10062         if (tp->rxptpctl)
10063                 tw32(TG3_RX_PTP_CTL,
10064                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10065
10066         if (tg3_flag(tp, PTP_CAPABLE))
10067                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10068
10069         tw32(GRC_MODE, tp->grc_mode | val);
10070
10071         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10072          * south bridge limitation. As a workaround, Driver is setting MRRS
10073          * to 2048 instead of default 4096.
10074          */
10075         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10076             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10077                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10078                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10079         }
10080
10081         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10082         val = tr32(GRC_MISC_CFG);
10083         val &= ~0xff;
10084         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10085         tw32(GRC_MISC_CFG, val);
10086
10087         /* Initialize MBUF/DESC pool. */
10088         if (tg3_flag(tp, 5750_PLUS)) {
10089                 /* Do nothing.  */
10090         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10091                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10092                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10093                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10094                 else
10095                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10096                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10097                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10098         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10099                 int fw_len;
10100
10101                 fw_len = tp->fw_len;
10102                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10103                 tw32(BUFMGR_MB_POOL_ADDR,
10104                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10105                 tw32(BUFMGR_MB_POOL_SIZE,
10106                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10107         }
10108
10109         if (tp->dev->mtu <= ETH_DATA_LEN) {
10110                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10111                      tp->bufmgr_config.mbuf_read_dma_low_water);
10112                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10113                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10114                 tw32(BUFMGR_MB_HIGH_WATER,
10115                      tp->bufmgr_config.mbuf_high_water);
10116         } else {
10117                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10118                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10119                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10120                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10121                 tw32(BUFMGR_MB_HIGH_WATER,
10122                      tp->bufmgr_config.mbuf_high_water_jumbo);
10123         }
10124         tw32(BUFMGR_DMA_LOW_WATER,
10125              tp->bufmgr_config.dma_low_water);
10126         tw32(BUFMGR_DMA_HIGH_WATER,
10127              tp->bufmgr_config.dma_high_water);
10128
10129         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10130         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10131                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10132         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10133             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10134             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10135             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10136                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10137         tw32(BUFMGR_MODE, val);
10138         for (i = 0; i < 2000; i++) {
10139                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10140                         break;
10141                 udelay(10);
10142         }
10143         if (i >= 2000) {
10144                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10145                 return -ENODEV;
10146         }
10147
10148         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10149                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10150
10151         tg3_setup_rxbd_thresholds(tp);
10152
10153         /* Initialize TG3_BDINFO's at:
10154          *  RCVDBDI_STD_BD:     standard eth size rx ring
10155          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10156          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10157          *
10158          * like so:
10159          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10160          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10161          *                              ring attribute flags
10162          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10163          *
10164          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10165          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10166          *
10167          * The size of each ring is fixed in the firmware, but the location is
10168          * configurable.
10169          */
10170         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10171              ((u64) tpr->rx_std_mapping >> 32));
10172         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10173              ((u64) tpr->rx_std_mapping & 0xffffffff));
10174         if (!tg3_flag(tp, 5717_PLUS))
10175                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10176                      NIC_SRAM_RX_BUFFER_DESC);
10177
10178         /* Disable the mini ring */
10179         if (!tg3_flag(tp, 5705_PLUS))
10180                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10181                      BDINFO_FLAGS_DISABLED);
10182
10183         /* Program the jumbo buffer descriptor ring control
10184          * blocks on those devices that have them.
10185          */
10186         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10187             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10188
10189                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10190                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10191                              ((u64) tpr->rx_jmb_mapping >> 32));
10192                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10193                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10194                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10195                               BDINFO_FLAGS_MAXLEN_SHIFT;
10196                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10197                              val | BDINFO_FLAGS_USE_EXT_RECV);
10198                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10199                             tg3_flag(tp, 57765_CLASS) ||
10200                             tg3_asic_rev(tp) == ASIC_REV_5762)
10201                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10202                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10203                 } else {
10204                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205                              BDINFO_FLAGS_DISABLED);
10206                 }
10207
10208                 if (tg3_flag(tp, 57765_PLUS)) {
10209                         val = TG3_RX_STD_RING_SIZE(tp);
10210                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10211                         val |= (TG3_RX_STD_DMA_SZ << 2);
10212                 } else
10213                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10214         } else
10215                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10216
10217         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10218
10219         tpr->rx_std_prod_idx = tp->rx_pending;
10220         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10221
10222         tpr->rx_jmb_prod_idx =
10223                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10224         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10225
10226         tg3_rings_reset(tp);
10227
10228         /* Initialize MAC address and backoff seed. */
10229         __tg3_set_mac_addr(tp, false);
10230
10231         /* MTU + ethernet header + FCS + optional VLAN tag */
10232         tw32(MAC_RX_MTU_SIZE,
10233              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10234
10235         /* The slot time is changed by tg3_setup_phy if we
10236          * run at gigabit with half duplex.
10237          */
10238         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10239               (6 << TX_LENGTHS_IPG_SHIFT) |
10240               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10241
10242         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10243             tg3_asic_rev(tp) == ASIC_REV_5762)
10244                 val |= tr32(MAC_TX_LENGTHS) &
10245                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10246                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10247
10248         tw32(MAC_TX_LENGTHS, val);
10249
10250         /* Receive rules. */
10251         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10252         tw32(RCVLPC_CONFIG, 0x0181);
10253
10254         /* Calculate RDMAC_MODE setting early, we need it to determine
10255          * the RCVLPC_STATE_ENABLE mask.
10256          */
10257         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10258                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10259                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10260                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10261                       RDMAC_MODE_LNGREAD_ENAB);
10262
10263         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10264                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10265
10266         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10267             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10268             tg3_asic_rev(tp) == ASIC_REV_57780)
10269                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10270                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10271                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10272
10273         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10274             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10275                 if (tg3_flag(tp, TSO_CAPABLE) &&
10276                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10277                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10278                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10279                            !tg3_flag(tp, IS_5788)) {
10280                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10281                 }
10282         }
10283
10284         if (tg3_flag(tp, PCI_EXPRESS))
10285                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10286
10287         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10288                 tp->dma_limit = 0;
10289                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10290                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10291                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10292                 }
10293         }
10294
10295         if (tg3_flag(tp, HW_TSO_1) ||
10296             tg3_flag(tp, HW_TSO_2) ||
10297             tg3_flag(tp, HW_TSO_3))
10298                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10299
10300         if (tg3_flag(tp, 57765_PLUS) ||
10301             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10302             tg3_asic_rev(tp) == ASIC_REV_57780)
10303                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10304
10305         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10306             tg3_asic_rev(tp) == ASIC_REV_5762)
10307                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10308
10309         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10310             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10311             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10312             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10313             tg3_flag(tp, 57765_PLUS)) {
10314                 u32 tgtreg;
10315
10316                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10317                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10318                 else
10319                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10320
10321                 val = tr32(tgtreg);
10322                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10323                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10324                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10325                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10326                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10327                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10328                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10329                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10330                 }
10331                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10332         }
10333
10334         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10335             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10336             tg3_asic_rev(tp) == ASIC_REV_5762) {
10337                 u32 tgtreg;
10338
10339                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10340                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10341                 else
10342                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10343
10344                 val = tr32(tgtreg);
10345                 tw32(tgtreg, val |
10346                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10347                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10348         }
10349
10350         /* Receive/send statistics. */
10351         if (tg3_flag(tp, 5750_PLUS)) {
10352                 val = tr32(RCVLPC_STATS_ENABLE);
10353                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10354                 tw32(RCVLPC_STATS_ENABLE, val);
10355         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10356                    tg3_flag(tp, TSO_CAPABLE)) {
10357                 val = tr32(RCVLPC_STATS_ENABLE);
10358                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10359                 tw32(RCVLPC_STATS_ENABLE, val);
10360         } else {
10361                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10362         }
10363         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10364         tw32(SNDDATAI_STATSENAB, 0xffffff);
10365         tw32(SNDDATAI_STATSCTRL,
10366              (SNDDATAI_SCTRL_ENABLE |
10367               SNDDATAI_SCTRL_FASTUPD));
10368
10369         /* Setup host coalescing engine. */
10370         tw32(HOSTCC_MODE, 0);
10371         for (i = 0; i < 2000; i++) {
10372                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10373                         break;
10374                 udelay(10);
10375         }
10376
10377         __tg3_set_coalesce(tp, &tp->coal);
10378
10379         if (!tg3_flag(tp, 5705_PLUS)) {
10380                 /* Status/statistics block address.  See tg3_timer,
10381                  * the tg3_periodic_fetch_stats call there, and
10382                  * tg3_get_stats to see how this works for 5705/5750 chips.
10383                  */
10384                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10385                      ((u64) tp->stats_mapping >> 32));
10386                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10387                      ((u64) tp->stats_mapping & 0xffffffff));
10388                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10389
10390                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10391
10392                 /* Clear statistics and status block memory areas */
10393                 for (i = NIC_SRAM_STATS_BLK;
10394                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10395                      i += sizeof(u32)) {
10396                         tg3_write_mem(tp, i, 0);
10397                         udelay(40);
10398                 }
10399         }
10400
10401         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10402
10403         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10404         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10405         if (!tg3_flag(tp, 5705_PLUS))
10406                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10407
10408         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10409                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10410                 /* reset to prevent losing 1st rx packet intermittently */
10411                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10412                 udelay(10);
10413         }
10414
10415         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10416                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10417                         MAC_MODE_FHDE_ENABLE;
10418         if (tg3_flag(tp, ENABLE_APE))
10419                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10420         if (!tg3_flag(tp, 5705_PLUS) &&
10421             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10422             tg3_asic_rev(tp) != ASIC_REV_5700)
10423                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10424         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10425         udelay(40);
10426
10427         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10428          * If TG3_FLAG_IS_NIC is zero, we should read the
10429          * register to preserve the GPIO settings for LOMs. The GPIOs,
10430          * whether used as inputs or outputs, are set by boot code after
10431          * reset.
10432          */
10433         if (!tg3_flag(tp, IS_NIC)) {
10434                 u32 gpio_mask;
10435
10436                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10437                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10438                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10439
10440                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10441                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10442                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10443
10444                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10445                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10446
10447                 tp->grc_local_ctrl &= ~gpio_mask;
10448                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10449
10450                 /* GPIO1 must be driven high for eeprom write protect */
10451                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10452                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10453                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10454         }
10455         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10456         udelay(100);
10457
10458         if (tg3_flag(tp, USING_MSIX)) {
10459                 val = tr32(MSGINT_MODE);
10460                 val |= MSGINT_MODE_ENABLE;
10461                 if (tp->irq_cnt > 1)
10462                         val |= MSGINT_MODE_MULTIVEC_EN;
10463                 if (!tg3_flag(tp, 1SHOT_MSI))
10464                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10465                 tw32(MSGINT_MODE, val);
10466         }
10467
10468         if (!tg3_flag(tp, 5705_PLUS)) {
10469                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10470                 udelay(40);
10471         }
10472
10473         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10474                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10475                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10476                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10477                WDMAC_MODE_LNGREAD_ENAB);
10478
10479         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10480             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10481                 if (tg3_flag(tp, TSO_CAPABLE) &&
10482                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10483                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10484                         /* nothing */
10485                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10486                            !tg3_flag(tp, IS_5788)) {
10487                         val |= WDMAC_MODE_RX_ACCEL;
10488                 }
10489         }
10490
10491         /* Enable host coalescing bug fix */
10492         if (tg3_flag(tp, 5755_PLUS))
10493                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10494
10495         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10496                 val |= WDMAC_MODE_BURST_ALL_DATA;
10497
10498         tw32_f(WDMAC_MODE, val);
10499         udelay(40);
10500
10501         if (tg3_flag(tp, PCIX_MODE)) {
10502                 u16 pcix_cmd;
10503
10504                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10505                                      &pcix_cmd);
10506                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10507                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10508                         pcix_cmd |= PCI_X_CMD_READ_2K;
10509                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10510                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10511                         pcix_cmd |= PCI_X_CMD_READ_2K;
10512                 }
10513                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10514                                       pcix_cmd);
10515         }
10516
10517         tw32_f(RDMAC_MODE, rdmac_mode);
10518         udelay(40);
10519
10520         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10521             tg3_asic_rev(tp) == ASIC_REV_5720) {
10522                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10523                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10524                                 break;
10525                 }
10526                 if (i < TG3_NUM_RDMA_CHANNELS) {
10527                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10528                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10529                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10530                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10531                 }
10532         }
10533
10534         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10535         if (!tg3_flag(tp, 5705_PLUS))
10536                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10537
10538         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10539                 tw32(SNDDATAC_MODE,
10540                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10541         else
10542                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10543
10544         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10545         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10546         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10547         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10548                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10549         tw32(RCVDBDI_MODE, val);
10550         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10551         if (tg3_flag(tp, HW_TSO_1) ||
10552             tg3_flag(tp, HW_TSO_2) ||
10553             tg3_flag(tp, HW_TSO_3))
10554                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10555         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10556         if (tg3_flag(tp, ENABLE_TSS))
10557                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10558         tw32(SNDBDI_MODE, val);
10559         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10560
10561         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10562                 err = tg3_load_5701_a0_firmware_fix(tp);
10563                 if (err)
10564                         return err;
10565         }
10566
10567         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10568                 /* Ignore any errors for the firmware download. If download
10569                  * fails, the device will operate with EEE disabled
10570                  */
10571                 tg3_load_57766_firmware(tp);
10572         }
10573
10574         if (tg3_flag(tp, TSO_CAPABLE)) {
10575                 err = tg3_load_tso_firmware(tp);
10576                 if (err)
10577                         return err;
10578         }
10579
10580         tp->tx_mode = TX_MODE_ENABLE;
10581
10582         if (tg3_flag(tp, 5755_PLUS) ||
10583             tg3_asic_rev(tp) == ASIC_REV_5906)
10584                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10585
10586         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10587             tg3_asic_rev(tp) == ASIC_REV_5762) {
10588                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10589                 tp->tx_mode &= ~val;
10590                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10591         }
10592
10593         tw32_f(MAC_TX_MODE, tp->tx_mode);
10594         udelay(100);
10595
10596         if (tg3_flag(tp, ENABLE_RSS)) {
10597                 u32 rss_key[10];
10598
10599                 tg3_rss_write_indir_tbl(tp);
10600
10601                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10602
10603                 for (i = 0; i < 10 ; i++)
10604                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10605         }
10606
10607         tp->rx_mode = RX_MODE_ENABLE;
10608         if (tg3_flag(tp, 5755_PLUS))
10609                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10610
10611         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10612                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10613
10614         if (tg3_flag(tp, ENABLE_RSS))
10615                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10616                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10617                                RX_MODE_RSS_IPV6_HASH_EN |
10618                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10619                                RX_MODE_RSS_IPV4_HASH_EN |
10620                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10621
10622         tw32_f(MAC_RX_MODE, tp->rx_mode);
10623         udelay(10);
10624
10625         tw32(MAC_LED_CTRL, tp->led_ctrl);
10626
10627         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10628         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10629                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10630                 udelay(10);
10631         }
10632         tw32_f(MAC_RX_MODE, tp->rx_mode);
10633         udelay(10);
10634
10635         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10636                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10637                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10638                         /* Set drive transmission level to 1.2V  */
10639                         /* only if the signal pre-emphasis bit is not set  */
10640                         val = tr32(MAC_SERDES_CFG);
10641                         val &= 0xfffff000;
10642                         val |= 0x880;
10643                         tw32(MAC_SERDES_CFG, val);
10644                 }
10645                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10646                         tw32(MAC_SERDES_CFG, 0x616000);
10647         }
10648
10649         /* Prevent chip from dropping frames when flow control
10650          * is enabled.
10651          */
10652         if (tg3_flag(tp, 57765_CLASS))
10653                 val = 1;
10654         else
10655                 val = 2;
10656         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10657
10658         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10659             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10660                 /* Use hardware link auto-negotiation */
10661                 tg3_flag_set(tp, HW_AUTONEG);
10662         }
10663
10664         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10665             tg3_asic_rev(tp) == ASIC_REV_5714) {
10666                 u32 tmp;
10667
10668                 tmp = tr32(SERDES_RX_CTRL);
10669                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10670                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10671                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10672                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10673         }
10674
10675         if (!tg3_flag(tp, USE_PHYLIB)) {
10676                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10677                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10678
10679                 err = tg3_setup_phy(tp, false);
10680                 if (err)
10681                         return err;
10682
10683                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10684                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10685                         u32 tmp;
10686
10687                         /* Clear CRC stats. */
10688                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10689                                 tg3_writephy(tp, MII_TG3_TEST1,
10690                                              tmp | MII_TG3_TEST1_CRC_EN);
10691                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10692                         }
10693                 }
10694         }
10695
10696         __tg3_set_rx_mode(tp->dev);
10697
10698         /* Initialize receive rules. */
10699         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10700         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10701         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10702         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10703
10704         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10705                 limit = 8;
10706         else
10707                 limit = 16;
10708         if (tg3_flag(tp, ENABLE_ASF))
10709                 limit -= 4;
10710         switch (limit) {
10711         case 16:
10712                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10713                 /* fall through */
10714         case 15:
10715                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10716                 /* fall through */
10717         case 14:
10718                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10719                 /* fall through */
10720         case 13:
10721                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10722                 /* fall through */
10723         case 12:
10724                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10725                 /* fall through */
10726         case 11:
10727                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10728                 /* fall through */
10729         case 10:
10730                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10731                 /* fall through */
10732         case 9:
10733                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10734                 /* fall through */
10735         case 8:
10736                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10737                 /* fall through */
10738         case 7:
10739                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10740                 /* fall through */
10741         case 6:
10742                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10743                 /* fall through */
10744         case 5:
10745                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10746                 /* fall through */
10747         case 4:
10748                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10749         case 3:
10750                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10751         case 2:
10752         case 1:
10753
10754         default:
10755                 break;
10756         }
10757
10758         if (tg3_flag(tp, ENABLE_APE))
10759                 /* Write our heartbeat update interval to APE. */
10760                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10761                                 APE_HOST_HEARTBEAT_INT_5SEC);
10762
10763         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10764
10765         return 0;
10766 }
10767
10768 /* Called at device open time to get the chip ready for
10769  * packet processing.  Invoked with tp->lock held.
10770  */
10771 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10772 {
10773         /* Chip may have been just powered on. If so, the boot code may still
10774          * be running initialization. Wait for it to finish to avoid races in
10775          * accessing the hardware.
10776          */
10777         tg3_enable_register_access(tp);
10778         tg3_poll_fw(tp);
10779
10780         tg3_switch_clocks(tp);
10781
10782         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10783
10784         return tg3_reset_hw(tp, reset_phy);
10785 }
10786
10787 #ifdef CONFIG_TIGON3_HWMON
10788 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10789 {
10790         int i;
10791
10792         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10793                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10794
10795                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10796                 off += len;
10797
10798                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10799                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10800                         memset(ocir, 0, TG3_OCIR_LEN);
10801         }
10802 }
10803
10804 /* sysfs attributes for hwmon */
10805 static ssize_t tg3_show_temp(struct device *dev,
10806                              struct device_attribute *devattr, char *buf)
10807 {
10808         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10809         struct tg3 *tp = dev_get_drvdata(dev);
10810         u32 temperature;
10811
10812         spin_lock_bh(&tp->lock);
10813         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10814                                 sizeof(temperature));
10815         spin_unlock_bh(&tp->lock);
10816         return sprintf(buf, "%u\n", temperature * 1000);
10817 }
10818
10819
10820 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10821                           TG3_TEMP_SENSOR_OFFSET);
10822 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10823                           TG3_TEMP_CAUTION_OFFSET);
10824 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10825                           TG3_TEMP_MAX_OFFSET);
10826
10827 static struct attribute *tg3_attrs[] = {
10828         &sensor_dev_attr_temp1_input.dev_attr.attr,
10829         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10830         &sensor_dev_attr_temp1_max.dev_attr.attr,
10831         NULL
10832 };
10833 ATTRIBUTE_GROUPS(tg3);
10834
10835 static void tg3_hwmon_close(struct tg3 *tp)
10836 {
10837         if (tp->hwmon_dev) {
10838                 hwmon_device_unregister(tp->hwmon_dev);
10839                 tp->hwmon_dev = NULL;
10840         }
10841 }
10842
10843 static void tg3_hwmon_open(struct tg3 *tp)
10844 {
10845         int i;
10846         u32 size = 0;
10847         struct pci_dev *pdev = tp->pdev;
10848         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10849
10850         tg3_sd_scan_scratchpad(tp, ocirs);
10851
10852         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10853                 if (!ocirs[i].src_data_length)
10854                         continue;
10855
10856                 size += ocirs[i].src_hdr_length;
10857                 size += ocirs[i].src_data_length;
10858         }
10859
10860         if (!size)
10861                 return;
10862
10863         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10864                                                           tp, tg3_groups);
10865         if (IS_ERR(tp->hwmon_dev)) {
10866                 tp->hwmon_dev = NULL;
10867                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10868         }
10869 }
10870 #else
10871 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10872 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10873 #endif /* CONFIG_TIGON3_HWMON */
10874
10875
10876 #define TG3_STAT_ADD32(PSTAT, REG) \
10877 do {    u32 __val = tr32(REG); \
10878         (PSTAT)->low += __val; \
10879         if ((PSTAT)->low < __val) \
10880                 (PSTAT)->high += 1; \
10881 } while (0)
10882
10883 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10884 {
10885         struct tg3_hw_stats *sp = tp->hw_stats;
10886
10887         if (!tp->link_up)
10888                 return;
10889
10890         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10891         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10892         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10893         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10894         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10895         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10896         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10897         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10898         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10899         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10900         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10901         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10902         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10903         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10904                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10905                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10906                 u32 val;
10907
10908                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10909                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10910                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10911                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10912         }
10913
10914         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10915         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10916         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10917         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10918         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10919         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10920         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10921         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10922         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10923         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10924         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10925         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10926         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10927         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10928
10929         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10930         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10931             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10932             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10933             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10934                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10935         } else {
10936                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10937                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10938                 if (val) {
10939                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10940                         sp->rx_discards.low += val;
10941                         if (sp->rx_discards.low < val)
10942                                 sp->rx_discards.high += 1;
10943                 }
10944                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10945         }
10946         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10947 }
10948
10949 static void tg3_chk_missed_msi(struct tg3 *tp)
10950 {
10951         u32 i;
10952
10953         for (i = 0; i < tp->irq_cnt; i++) {
10954                 struct tg3_napi *tnapi = &tp->napi[i];
10955
10956                 if (tg3_has_work(tnapi)) {
10957                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10958                             tnapi->last_tx_cons == tnapi->tx_cons) {
10959                                 if (tnapi->chk_msi_cnt < 1) {
10960                                         tnapi->chk_msi_cnt++;
10961                                         return;
10962                                 }
10963                                 tg3_msi(0, tnapi);
10964                         }
10965                 }
10966                 tnapi->chk_msi_cnt = 0;
10967                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10968                 tnapi->last_tx_cons = tnapi->tx_cons;
10969         }
10970 }
10971
10972 static void tg3_timer(struct timer_list *t)
10973 {
10974         struct tg3 *tp = from_timer(tp, t, timer);
10975
10976         spin_lock(&tp->lock);
10977
10978         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10979                 spin_unlock(&tp->lock);
10980                 goto restart_timer;
10981         }
10982
10983         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10984             tg3_flag(tp, 57765_CLASS))
10985                 tg3_chk_missed_msi(tp);
10986
10987         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10988                 /* BCM4785: Flush posted writes from GbE to host memory. */
10989                 tr32(HOSTCC_MODE);
10990         }
10991
10992         if (!tg3_flag(tp, TAGGED_STATUS)) {
10993                 /* All of this garbage is because when using non-tagged
10994                  * IRQ status the mailbox/status_block protocol the chip
10995                  * uses with the cpu is race prone.
10996                  */
10997                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10998                         tw32(GRC_LOCAL_CTRL,
10999                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11000                 } else {
11001                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11002                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11003                 }
11004
11005                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11006                         spin_unlock(&tp->lock);
11007                         tg3_reset_task_schedule(tp);
11008                         goto restart_timer;
11009                 }
11010         }
11011
11012         /* This part only runs once per second. */
11013         if (!--tp->timer_counter) {
11014                 if (tg3_flag(tp, 5705_PLUS))
11015                         tg3_periodic_fetch_stats(tp);
11016
11017                 if (tp->setlpicnt && !--tp->setlpicnt)
11018                         tg3_phy_eee_enable(tp);
11019
11020                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11021                         u32 mac_stat;
11022                         int phy_event;
11023
11024                         mac_stat = tr32(MAC_STATUS);
11025
11026                         phy_event = 0;
11027                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11028                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11029                                         phy_event = 1;
11030                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11031                                 phy_event = 1;
11032
11033                         if (phy_event)
11034                                 tg3_setup_phy(tp, false);
11035                 } else if (tg3_flag(tp, POLL_SERDES)) {
11036                         u32 mac_stat = tr32(MAC_STATUS);
11037                         int need_setup = 0;
11038
11039                         if (tp->link_up &&
11040                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11041                                 need_setup = 1;
11042                         }
11043                         if (!tp->link_up &&
11044                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11045                                          MAC_STATUS_SIGNAL_DET))) {
11046                                 need_setup = 1;
11047                         }
11048                         if (need_setup) {
11049                                 if (!tp->serdes_counter) {
11050                                         tw32_f(MAC_MODE,
11051                                              (tp->mac_mode &
11052                                               ~MAC_MODE_PORT_MODE_MASK));
11053                                         udelay(40);
11054                                         tw32_f(MAC_MODE, tp->mac_mode);
11055                                         udelay(40);
11056                                 }
11057                                 tg3_setup_phy(tp, false);
11058                         }
11059                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11060                            tg3_flag(tp, 5780_CLASS)) {
11061                         tg3_serdes_parallel_detect(tp);
11062                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11063                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11064                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11065                                          TG3_CPMU_STATUS_LINK_MASK);
11066
11067                         if (link_up != tp->link_up)
11068                                 tg3_setup_phy(tp, false);
11069                 }
11070
11071                 tp->timer_counter = tp->timer_multiplier;
11072         }
11073
11074         /* Heartbeat is only sent once every 2 seconds.
11075          *
11076          * The heartbeat is to tell the ASF firmware that the host
11077          * driver is still alive.  In the event that the OS crashes,
11078          * ASF needs to reset the hardware to free up the FIFO space
11079          * that may be filled with rx packets destined for the host.
11080          * If the FIFO is full, ASF will no longer function properly.
11081          *
11082          * Unintended resets have been reported on real time kernels
11083          * where the timer doesn't run on time.  Netpoll will also have
11084          * same problem.
11085          *
11086          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11087          * to check the ring condition when the heartbeat is expiring
11088          * before doing the reset.  This will prevent most unintended
11089          * resets.
11090          */
11091         if (!--tp->asf_counter) {
11092                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11093                         tg3_wait_for_event_ack(tp);
11094
11095                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11096                                       FWCMD_NICDRV_ALIVE3);
11097                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11098                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11099                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11100
11101                         tg3_generate_fw_event(tp);
11102                 }
11103                 tp->asf_counter = tp->asf_multiplier;
11104         }
11105
11106         /* Update the APE heartbeat every 5 seconds.*/
11107         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11108
11109         spin_unlock(&tp->lock);
11110
11111 restart_timer:
11112         tp->timer.expires = jiffies + tp->timer_offset;
11113         add_timer(&tp->timer);
11114 }
11115
11116 static void tg3_timer_init(struct tg3 *tp)
11117 {
11118         if (tg3_flag(tp, TAGGED_STATUS) &&
11119             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11120             !tg3_flag(tp, 57765_CLASS))
11121                 tp->timer_offset = HZ;
11122         else
11123                 tp->timer_offset = HZ / 10;
11124
11125         BUG_ON(tp->timer_offset > HZ);
11126
11127         tp->timer_multiplier = (HZ / tp->timer_offset);
11128         tp->asf_multiplier = (HZ / tp->timer_offset) *
11129                              TG3_FW_UPDATE_FREQ_SEC;
11130
11131         timer_setup(&tp->timer, tg3_timer, 0);
11132 }
11133
11134 static void tg3_timer_start(struct tg3 *tp)
11135 {
11136         tp->asf_counter   = tp->asf_multiplier;
11137         tp->timer_counter = tp->timer_multiplier;
11138
11139         tp->timer.expires = jiffies + tp->timer_offset;
11140         add_timer(&tp->timer);
11141 }
11142
11143 static void tg3_timer_stop(struct tg3 *tp)
11144 {
11145         del_timer_sync(&tp->timer);
11146 }
11147
11148 /* Restart hardware after configuration changes, self-test, etc.
11149  * Invoked with tp->lock held.
11150  */
11151 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11152         __releases(tp->lock)
11153         __acquires(tp->lock)
11154 {
11155         int err;
11156
11157         err = tg3_init_hw(tp, reset_phy);
11158         if (err) {
11159                 netdev_err(tp->dev,
11160                            "Failed to re-initialize device, aborting\n");
11161                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11162                 tg3_full_unlock(tp);
11163                 tg3_timer_stop(tp);
11164                 tp->irq_sync = 0;
11165                 tg3_napi_enable(tp);
11166                 dev_close(tp->dev);
11167                 tg3_full_lock(tp, 0);
11168         }
11169         return err;
11170 }
11171
11172 static void tg3_reset_task(struct work_struct *work)
11173 {
11174         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11175         int err;
11176
11177         rtnl_lock();
11178         tg3_full_lock(tp, 0);
11179
11180         if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11181                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11182                 tg3_full_unlock(tp);
11183                 rtnl_unlock();
11184                 return;
11185         }
11186
11187         tg3_full_unlock(tp);
11188
11189         tg3_phy_stop(tp);
11190
11191         tg3_netif_stop(tp);
11192
11193         tg3_full_lock(tp, 1);
11194
11195         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11196                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11197                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11198                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11199                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11200         }
11201
11202         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11203         err = tg3_init_hw(tp, true);
11204         if (err) {
11205                 tg3_full_unlock(tp);
11206                 tp->irq_sync = 0;
11207                 tg3_napi_enable(tp);
11208                 /* Clear this flag so that tg3_reset_task_cancel() will not
11209                  * call cancel_work_sync() and wait forever.
11210                  */
11211                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11212                 dev_close(tp->dev);
11213                 goto out;
11214         }
11215
11216         tg3_netif_start(tp);
11217
11218         tg3_full_unlock(tp);
11219
11220         if (!err)
11221                 tg3_phy_start(tp);
11222
11223         tg3_flag_clear(tp, RESET_TASK_PENDING);
11224 out:
11225         rtnl_unlock();
11226 }
11227
11228 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11229 {
11230         irq_handler_t fn;
11231         unsigned long flags;
11232         char *name;
11233         struct tg3_napi *tnapi = &tp->napi[irq_num];
11234
11235         if (tp->irq_cnt == 1)
11236                 name = tp->dev->name;
11237         else {
11238                 name = &tnapi->irq_lbl[0];
11239                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11240                         snprintf(name, IFNAMSIZ,
11241                                  "%s-txrx-%d", tp->dev->name, irq_num);
11242                 else if (tnapi->tx_buffers)
11243                         snprintf(name, IFNAMSIZ,
11244                                  "%s-tx-%d", tp->dev->name, irq_num);
11245                 else if (tnapi->rx_rcb)
11246                         snprintf(name, IFNAMSIZ,
11247                                  "%s-rx-%d", tp->dev->name, irq_num);
11248                 else
11249                         snprintf(name, IFNAMSIZ,
11250                                  "%s-%d", tp->dev->name, irq_num);
11251                 name[IFNAMSIZ-1] = 0;
11252         }
11253
11254         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11255                 fn = tg3_msi;
11256                 if (tg3_flag(tp, 1SHOT_MSI))
11257                         fn = tg3_msi_1shot;
11258                 flags = 0;
11259         } else {
11260                 fn = tg3_interrupt;
11261                 if (tg3_flag(tp, TAGGED_STATUS))
11262                         fn = tg3_interrupt_tagged;
11263                 flags = IRQF_SHARED;
11264         }
11265
11266         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11267 }
11268
11269 static int tg3_test_interrupt(struct tg3 *tp)
11270 {
11271         struct tg3_napi *tnapi = &tp->napi[0];
11272         struct net_device *dev = tp->dev;
11273         int err, i, intr_ok = 0;
11274         u32 val;
11275
11276         if (!netif_running(dev))
11277                 return -ENODEV;
11278
11279         tg3_disable_ints(tp);
11280
11281         free_irq(tnapi->irq_vec, tnapi);
11282
11283         /*
11284          * Turn off MSI one shot mode.  Otherwise this test has no
11285          * observable way to know whether the interrupt was delivered.
11286          */
11287         if (tg3_flag(tp, 57765_PLUS)) {
11288                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11289                 tw32(MSGINT_MODE, val);
11290         }
11291
11292         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11293                           IRQF_SHARED, dev->name, tnapi);
11294         if (err)
11295                 return err;
11296
11297         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11298         tg3_enable_ints(tp);
11299
11300         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11301                tnapi->coal_now);
11302
11303         for (i = 0; i < 5; i++) {
11304                 u32 int_mbox, misc_host_ctrl;
11305
11306                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11307                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11308
11309                 if ((int_mbox != 0) ||
11310                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11311                         intr_ok = 1;
11312                         break;
11313                 }
11314
11315                 if (tg3_flag(tp, 57765_PLUS) &&
11316                     tnapi->hw_status->status_tag != tnapi->last_tag)
11317                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11318
11319                 msleep(10);
11320         }
11321
11322         tg3_disable_ints(tp);
11323
11324         free_irq(tnapi->irq_vec, tnapi);
11325
11326         err = tg3_request_irq(tp, 0);
11327
11328         if (err)
11329                 return err;
11330
11331         if (intr_ok) {
11332                 /* Reenable MSI one shot mode. */
11333                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11334                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11335                         tw32(MSGINT_MODE, val);
11336                 }
11337                 return 0;
11338         }
11339
11340         return -EIO;
11341 }
11342
11343 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11344  * successfully restored
11345  */
11346 static int tg3_test_msi(struct tg3 *tp)
11347 {
11348         int err;
11349         u16 pci_cmd;
11350
11351         if (!tg3_flag(tp, USING_MSI))
11352                 return 0;
11353
11354         /* Turn off SERR reporting in case MSI terminates with Master
11355          * Abort.
11356          */
11357         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11358         pci_write_config_word(tp->pdev, PCI_COMMAND,
11359                               pci_cmd & ~PCI_COMMAND_SERR);
11360
11361         err = tg3_test_interrupt(tp);
11362
11363         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11364
11365         if (!err)
11366                 return 0;
11367
11368         /* other failures */
11369         if (err != -EIO)
11370                 return err;
11371
11372         /* MSI test failed, go back to INTx mode */
11373         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11374                     "to INTx mode. Please report this failure to the PCI "
11375                     "maintainer and include system chipset information\n");
11376
11377         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11378
11379         pci_disable_msi(tp->pdev);
11380
11381         tg3_flag_clear(tp, USING_MSI);
11382         tp->napi[0].irq_vec = tp->pdev->irq;
11383
11384         err = tg3_request_irq(tp, 0);
11385         if (err)
11386                 return err;
11387
11388         /* Need to reset the chip because the MSI cycle may have terminated
11389          * with Master Abort.
11390          */
11391         tg3_full_lock(tp, 1);
11392
11393         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11394         err = tg3_init_hw(tp, true);
11395
11396         tg3_full_unlock(tp);
11397
11398         if (err)
11399                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11400
11401         return err;
11402 }
11403
11404 static int tg3_request_firmware(struct tg3 *tp)
11405 {
11406         const struct tg3_firmware_hdr *fw_hdr;
11407
11408         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11409                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11410                            tp->fw_needed);
11411                 return -ENOENT;
11412         }
11413
11414         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11415
11416         /* Firmware blob starts with version numbers, followed by
11417          * start address and _full_ length including BSS sections
11418          * (which must be longer than the actual data, of course
11419          */
11420
11421         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11422         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11423                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11424                            tp->fw_len, tp->fw_needed);
11425                 release_firmware(tp->fw);
11426                 tp->fw = NULL;
11427                 return -EINVAL;
11428         }
11429
11430         /* We no longer need firmware; we have it. */
11431         tp->fw_needed = NULL;
11432         return 0;
11433 }
11434
11435 static u32 tg3_irq_count(struct tg3 *tp)
11436 {
11437         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11438
11439         if (irq_cnt > 1) {
11440                 /* We want as many rx rings enabled as there are cpus.
11441                  * In multiqueue MSI-X mode, the first MSI-X vector
11442                  * only deals with link interrupts, etc, so we add
11443                  * one to the number of vectors we are requesting.
11444                  */
11445                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11446         }
11447
11448         return irq_cnt;
11449 }
11450
11451 static bool tg3_enable_msix(struct tg3 *tp)
11452 {
11453         int i, rc;
11454         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11455
11456         tp->txq_cnt = tp->txq_req;
11457         tp->rxq_cnt = tp->rxq_req;
11458         if (!tp->rxq_cnt)
11459                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11460         if (tp->rxq_cnt > tp->rxq_max)
11461                 tp->rxq_cnt = tp->rxq_max;
11462
11463         /* Disable multiple TX rings by default.  Simple round-robin hardware
11464          * scheduling of the TX rings can cause starvation of rings with
11465          * small packets when other rings have TSO or jumbo packets.
11466          */
11467         if (!tp->txq_req)
11468                 tp->txq_cnt = 1;
11469
11470         tp->irq_cnt = tg3_irq_count(tp);
11471
11472         for (i = 0; i < tp->irq_max; i++) {
11473                 msix_ent[i].entry  = i;
11474                 msix_ent[i].vector = 0;
11475         }
11476
11477         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11478         if (rc < 0) {
11479                 return false;
11480         } else if (rc < tp->irq_cnt) {
11481                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11482                               tp->irq_cnt, rc);
11483                 tp->irq_cnt = rc;
11484                 tp->rxq_cnt = max(rc - 1, 1);
11485                 if (tp->txq_cnt)
11486                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11487         }
11488
11489         for (i = 0; i < tp->irq_max; i++)
11490                 tp->napi[i].irq_vec = msix_ent[i].vector;
11491
11492         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11493                 pci_disable_msix(tp->pdev);
11494                 return false;
11495         }
11496
11497         if (tp->irq_cnt == 1)
11498                 return true;
11499
11500         tg3_flag_set(tp, ENABLE_RSS);
11501
11502         if (tp->txq_cnt > 1)
11503                 tg3_flag_set(tp, ENABLE_TSS);
11504
11505         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11506
11507         return true;
11508 }
11509
11510 static void tg3_ints_init(struct tg3 *tp)
11511 {
11512         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11513             !tg3_flag(tp, TAGGED_STATUS)) {
11514                 /* All MSI supporting chips should support tagged
11515                  * status.  Assert that this is the case.
11516                  */
11517                 netdev_warn(tp->dev,
11518                             "MSI without TAGGED_STATUS? Not using MSI\n");
11519                 goto defcfg;
11520         }
11521
11522         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11523                 tg3_flag_set(tp, USING_MSIX);
11524         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11525                 tg3_flag_set(tp, USING_MSI);
11526
11527         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11528                 u32 msi_mode = tr32(MSGINT_MODE);
11529                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11530                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11531                 if (!tg3_flag(tp, 1SHOT_MSI))
11532                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11533                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11534         }
11535 defcfg:
11536         if (!tg3_flag(tp, USING_MSIX)) {
11537                 tp->irq_cnt = 1;
11538                 tp->napi[0].irq_vec = tp->pdev->irq;
11539         }
11540
11541         if (tp->irq_cnt == 1) {
11542                 tp->txq_cnt = 1;
11543                 tp->rxq_cnt = 1;
11544                 netif_set_real_num_tx_queues(tp->dev, 1);
11545                 netif_set_real_num_rx_queues(tp->dev, 1);
11546         }
11547 }
11548
11549 static void tg3_ints_fini(struct tg3 *tp)
11550 {
11551         if (tg3_flag(tp, USING_MSIX))
11552                 pci_disable_msix(tp->pdev);
11553         else if (tg3_flag(tp, USING_MSI))
11554                 pci_disable_msi(tp->pdev);
11555         tg3_flag_clear(tp, USING_MSI);
11556         tg3_flag_clear(tp, USING_MSIX);
11557         tg3_flag_clear(tp, ENABLE_RSS);
11558         tg3_flag_clear(tp, ENABLE_TSS);
11559 }
11560
11561 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11562                      bool init)
11563 {
11564         struct net_device *dev = tp->dev;
11565         int i, err;
11566
11567         /*
11568          * Setup interrupts first so we know how
11569          * many NAPI resources to allocate
11570          */
11571         tg3_ints_init(tp);
11572
11573         tg3_rss_check_indir_tbl(tp);
11574
11575         /* The placement of this call is tied
11576          * to the setup and use of Host TX descriptors.
11577          */
11578         err = tg3_alloc_consistent(tp);
11579         if (err)
11580                 goto out_ints_fini;
11581
11582         tg3_napi_init(tp);
11583
11584         tg3_napi_enable(tp);
11585
11586         for (i = 0; i < tp->irq_cnt; i++) {
11587                 err = tg3_request_irq(tp, i);
11588                 if (err) {
11589                         for (i--; i >= 0; i--) {
11590                                 struct tg3_napi *tnapi = &tp->napi[i];
11591
11592                                 free_irq(tnapi->irq_vec, tnapi);
11593                         }
11594                         goto out_napi_fini;
11595                 }
11596         }
11597
11598         tg3_full_lock(tp, 0);
11599
11600         if (init)
11601                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11602
11603         err = tg3_init_hw(tp, reset_phy);
11604         if (err) {
11605                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11606                 tg3_free_rings(tp);
11607         }
11608
11609         tg3_full_unlock(tp);
11610
11611         if (err)
11612                 goto out_free_irq;
11613
11614         if (test_irq && tg3_flag(tp, USING_MSI)) {
11615                 err = tg3_test_msi(tp);
11616
11617                 if (err) {
11618                         tg3_full_lock(tp, 0);
11619                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11620                         tg3_free_rings(tp);
11621                         tg3_full_unlock(tp);
11622
11623                         goto out_napi_fini;
11624                 }
11625
11626                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11627                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11628
11629                         tw32(PCIE_TRANSACTION_CFG,
11630                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11631                 }
11632         }
11633
11634         tg3_phy_start(tp);
11635
11636         tg3_hwmon_open(tp);
11637
11638         tg3_full_lock(tp, 0);
11639
11640         tg3_timer_start(tp);
11641         tg3_flag_set(tp, INIT_COMPLETE);
11642         tg3_enable_ints(tp);
11643
11644         tg3_ptp_resume(tp);
11645
11646         tg3_full_unlock(tp);
11647
11648         netif_tx_start_all_queues(dev);
11649
11650         /*
11651          * Reset loopback feature if it was turned on while the device was down
11652          * make sure that it's installed properly now.
11653          */
11654         if (dev->features & NETIF_F_LOOPBACK)
11655                 tg3_set_loopback(dev, dev->features);
11656
11657         return 0;
11658
11659 out_free_irq:
11660         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11661                 struct tg3_napi *tnapi = &tp->napi[i];
11662                 free_irq(tnapi->irq_vec, tnapi);
11663         }
11664
11665 out_napi_fini:
11666         tg3_napi_disable(tp);
11667         tg3_napi_fini(tp);
11668         tg3_free_consistent(tp);
11669
11670 out_ints_fini:
11671         tg3_ints_fini(tp);
11672
11673         return err;
11674 }
11675
11676 static void tg3_stop(struct tg3 *tp)
11677 {
11678         int i;
11679
11680         tg3_reset_task_cancel(tp);
11681         tg3_netif_stop(tp);
11682
11683         tg3_timer_stop(tp);
11684
11685         tg3_hwmon_close(tp);
11686
11687         tg3_phy_stop(tp);
11688
11689         tg3_full_lock(tp, 1);
11690
11691         tg3_disable_ints(tp);
11692
11693         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11694         tg3_free_rings(tp);
11695         tg3_flag_clear(tp, INIT_COMPLETE);
11696
11697         tg3_full_unlock(tp);
11698
11699         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11700                 struct tg3_napi *tnapi = &tp->napi[i];
11701                 free_irq(tnapi->irq_vec, tnapi);
11702         }
11703
11704         tg3_ints_fini(tp);
11705
11706         tg3_napi_fini(tp);
11707
11708         tg3_free_consistent(tp);
11709 }
11710
11711 static int tg3_open(struct net_device *dev)
11712 {
11713         struct tg3 *tp = netdev_priv(dev);
11714         int err;
11715
11716         if (tp->pcierr_recovery) {
11717                 netdev_err(dev, "Failed to open device. PCI error recovery "
11718                            "in progress\n");
11719                 return -EAGAIN;
11720         }
11721
11722         if (tp->fw_needed) {
11723                 err = tg3_request_firmware(tp);
11724                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11725                         if (err) {
11726                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11727                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11728                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11729                                 netdev_warn(tp->dev, "EEE capability restored\n");
11730                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11731                         }
11732                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11733                         if (err)
11734                                 return err;
11735                 } else if (err) {
11736                         netdev_warn(tp->dev, "TSO capability disabled\n");
11737                         tg3_flag_clear(tp, TSO_CAPABLE);
11738                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11739                         netdev_notice(tp->dev, "TSO capability restored\n");
11740                         tg3_flag_set(tp, TSO_CAPABLE);
11741                 }
11742         }
11743
11744         tg3_carrier_off(tp);
11745
11746         err = tg3_power_up(tp);
11747         if (err)
11748                 return err;
11749
11750         tg3_full_lock(tp, 0);
11751
11752         tg3_disable_ints(tp);
11753         tg3_flag_clear(tp, INIT_COMPLETE);
11754
11755         tg3_full_unlock(tp);
11756
11757         err = tg3_start(tp,
11758                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11759                         true, true);
11760         if (err) {
11761                 tg3_frob_aux_power(tp, false);
11762                 pci_set_power_state(tp->pdev, PCI_D3hot);
11763         }
11764
11765         return err;
11766 }
11767
11768 static int tg3_close(struct net_device *dev)
11769 {
11770         struct tg3 *tp = netdev_priv(dev);
11771
11772         if (tp->pcierr_recovery) {
11773                 netdev_err(dev, "Failed to close device. PCI error recovery "
11774                            "in progress\n");
11775                 return -EAGAIN;
11776         }
11777
11778         tg3_stop(tp);
11779
11780         if (pci_device_is_present(tp->pdev)) {
11781                 tg3_power_down_prepare(tp);
11782
11783                 tg3_carrier_off(tp);
11784         }
11785         return 0;
11786 }
11787
11788 static inline u64 get_stat64(tg3_stat64_t *val)
11789 {
11790        return ((u64)val->high << 32) | ((u64)val->low);
11791 }
11792
11793 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11794 {
11795         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11796
11797         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11798             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11799              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11800                 u32 val;
11801
11802                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11803                         tg3_writephy(tp, MII_TG3_TEST1,
11804                                      val | MII_TG3_TEST1_CRC_EN);
11805                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11806                 } else
11807                         val = 0;
11808
11809                 tp->phy_crc_errors += val;
11810
11811                 return tp->phy_crc_errors;
11812         }
11813
11814         return get_stat64(&hw_stats->rx_fcs_errors);
11815 }
11816
11817 #define ESTAT_ADD(member) \
11818         estats->member =        old_estats->member + \
11819                                 get_stat64(&hw_stats->member)
11820
11821 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11822 {
11823         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11824         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11825
11826         ESTAT_ADD(rx_octets);
11827         ESTAT_ADD(rx_fragments);
11828         ESTAT_ADD(rx_ucast_packets);
11829         ESTAT_ADD(rx_mcast_packets);
11830         ESTAT_ADD(rx_bcast_packets);
11831         ESTAT_ADD(rx_fcs_errors);
11832         ESTAT_ADD(rx_align_errors);
11833         ESTAT_ADD(rx_xon_pause_rcvd);
11834         ESTAT_ADD(rx_xoff_pause_rcvd);
11835         ESTAT_ADD(rx_mac_ctrl_rcvd);
11836         ESTAT_ADD(rx_xoff_entered);
11837         ESTAT_ADD(rx_frame_too_long_errors);
11838         ESTAT_ADD(rx_jabbers);
11839         ESTAT_ADD(rx_undersize_packets);
11840         ESTAT_ADD(rx_in_length_errors);
11841         ESTAT_ADD(rx_out_length_errors);
11842         ESTAT_ADD(rx_64_or_less_octet_packets);
11843         ESTAT_ADD(rx_65_to_127_octet_packets);
11844         ESTAT_ADD(rx_128_to_255_octet_packets);
11845         ESTAT_ADD(rx_256_to_511_octet_packets);
11846         ESTAT_ADD(rx_512_to_1023_octet_packets);
11847         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11848         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11849         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11850         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11851         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11852
11853         ESTAT_ADD(tx_octets);
11854         ESTAT_ADD(tx_collisions);
11855         ESTAT_ADD(tx_xon_sent);
11856         ESTAT_ADD(tx_xoff_sent);
11857         ESTAT_ADD(tx_flow_control);
11858         ESTAT_ADD(tx_mac_errors);
11859         ESTAT_ADD(tx_single_collisions);
11860         ESTAT_ADD(tx_mult_collisions);
11861         ESTAT_ADD(tx_deferred);
11862         ESTAT_ADD(tx_excessive_collisions);
11863         ESTAT_ADD(tx_late_collisions);
11864         ESTAT_ADD(tx_collide_2times);
11865         ESTAT_ADD(tx_collide_3times);
11866         ESTAT_ADD(tx_collide_4times);
11867         ESTAT_ADD(tx_collide_5times);
11868         ESTAT_ADD(tx_collide_6times);
11869         ESTAT_ADD(tx_collide_7times);
11870         ESTAT_ADD(tx_collide_8times);
11871         ESTAT_ADD(tx_collide_9times);
11872         ESTAT_ADD(tx_collide_10times);
11873         ESTAT_ADD(tx_collide_11times);
11874         ESTAT_ADD(tx_collide_12times);
11875         ESTAT_ADD(tx_collide_13times);
11876         ESTAT_ADD(tx_collide_14times);
11877         ESTAT_ADD(tx_collide_15times);
11878         ESTAT_ADD(tx_ucast_packets);
11879         ESTAT_ADD(tx_mcast_packets);
11880         ESTAT_ADD(tx_bcast_packets);
11881         ESTAT_ADD(tx_carrier_sense_errors);
11882         ESTAT_ADD(tx_discards);
11883         ESTAT_ADD(tx_errors);
11884
11885         ESTAT_ADD(dma_writeq_full);
11886         ESTAT_ADD(dma_write_prioq_full);
11887         ESTAT_ADD(rxbds_empty);
11888         ESTAT_ADD(rx_discards);
11889         ESTAT_ADD(rx_errors);
11890         ESTAT_ADD(rx_threshold_hit);
11891
11892         ESTAT_ADD(dma_readq_full);
11893         ESTAT_ADD(dma_read_prioq_full);
11894         ESTAT_ADD(tx_comp_queue_full);
11895
11896         ESTAT_ADD(ring_set_send_prod_index);
11897         ESTAT_ADD(ring_status_update);
11898         ESTAT_ADD(nic_irqs);
11899         ESTAT_ADD(nic_avoided_irqs);
11900         ESTAT_ADD(nic_tx_threshold_hit);
11901
11902         ESTAT_ADD(mbuf_lwm_thresh_hit);
11903 }
11904
11905 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11906 {
11907         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11908         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11909
11910         stats->rx_packets = old_stats->rx_packets +
11911                 get_stat64(&hw_stats->rx_ucast_packets) +
11912                 get_stat64(&hw_stats->rx_mcast_packets) +
11913                 get_stat64(&hw_stats->rx_bcast_packets);
11914
11915         stats->tx_packets = old_stats->tx_packets +
11916                 get_stat64(&hw_stats->tx_ucast_packets) +
11917                 get_stat64(&hw_stats->tx_mcast_packets) +
11918                 get_stat64(&hw_stats->tx_bcast_packets);
11919
11920         stats->rx_bytes = old_stats->rx_bytes +
11921                 get_stat64(&hw_stats->rx_octets);
11922         stats->tx_bytes = old_stats->tx_bytes +
11923                 get_stat64(&hw_stats->tx_octets);
11924
11925         stats->rx_errors = old_stats->rx_errors +
11926                 get_stat64(&hw_stats->rx_errors);
11927         stats->tx_errors = old_stats->tx_errors +
11928                 get_stat64(&hw_stats->tx_errors) +
11929                 get_stat64(&hw_stats->tx_mac_errors) +
11930                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11931                 get_stat64(&hw_stats->tx_discards);
11932
11933         stats->multicast = old_stats->multicast +
11934                 get_stat64(&hw_stats->rx_mcast_packets);
11935         stats->collisions = old_stats->collisions +
11936                 get_stat64(&hw_stats->tx_collisions);
11937
11938         stats->rx_length_errors = old_stats->rx_length_errors +
11939                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11940                 get_stat64(&hw_stats->rx_undersize_packets);
11941
11942         stats->rx_frame_errors = old_stats->rx_frame_errors +
11943                 get_stat64(&hw_stats->rx_align_errors);
11944         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11945                 get_stat64(&hw_stats->tx_discards);
11946         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11947                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11948
11949         stats->rx_crc_errors = old_stats->rx_crc_errors +
11950                 tg3_calc_crc_errors(tp);
11951
11952         stats->rx_missed_errors = old_stats->rx_missed_errors +
11953                 get_stat64(&hw_stats->rx_discards);
11954
11955         stats->rx_dropped = tp->rx_dropped;
11956         stats->tx_dropped = tp->tx_dropped;
11957 }
11958
11959 static int tg3_get_regs_len(struct net_device *dev)
11960 {
11961         return TG3_REG_BLK_SIZE;
11962 }
11963
11964 static void tg3_get_regs(struct net_device *dev,
11965                 struct ethtool_regs *regs, void *_p)
11966 {
11967         struct tg3 *tp = netdev_priv(dev);
11968
11969         regs->version = 0;
11970
11971         memset(_p, 0, TG3_REG_BLK_SIZE);
11972
11973         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11974                 return;
11975
11976         tg3_full_lock(tp, 0);
11977
11978         tg3_dump_legacy_regs(tp, (u32 *)_p);
11979
11980         tg3_full_unlock(tp);
11981 }
11982
11983 static int tg3_get_eeprom_len(struct net_device *dev)
11984 {
11985         struct tg3 *tp = netdev_priv(dev);
11986
11987         return tp->nvram_size;
11988 }
11989
11990 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11991 {
11992         struct tg3 *tp = netdev_priv(dev);
11993         int ret, cpmu_restore = 0;
11994         u8  *pd;
11995         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11996         __be32 val;
11997
11998         if (tg3_flag(tp, NO_NVRAM))
11999                 return -EINVAL;
12000
12001         offset = eeprom->offset;
12002         len = eeprom->len;
12003         eeprom->len = 0;
12004
12005         eeprom->magic = TG3_EEPROM_MAGIC;
12006
12007         /* Override clock, link aware and link idle modes */
12008         if (tg3_flag(tp, CPMU_PRESENT)) {
12009                 cpmu_val = tr32(TG3_CPMU_CTRL);
12010                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12011                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12012                         tw32(TG3_CPMU_CTRL, cpmu_val &
12013                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12014                                              CPMU_CTRL_LINK_IDLE_MODE));
12015                         cpmu_restore = 1;
12016                 }
12017         }
12018         tg3_override_clk(tp);
12019
12020         if (offset & 3) {
12021                 /* adjustments to start on required 4 byte boundary */
12022                 b_offset = offset & 3;
12023                 b_count = 4 - b_offset;
12024                 if (b_count > len) {
12025                         /* i.e. offset=1 len=2 */
12026                         b_count = len;
12027                 }
12028                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12029                 if (ret)
12030                         goto eeprom_done;
12031                 memcpy(data, ((char *)&val) + b_offset, b_count);
12032                 len -= b_count;
12033                 offset += b_count;
12034                 eeprom->len += b_count;
12035         }
12036
12037         /* read bytes up to the last 4 byte boundary */
12038         pd = &data[eeprom->len];
12039         for (i = 0; i < (len - (len & 3)); i += 4) {
12040                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12041                 if (ret) {
12042                         if (i)
12043                                 i -= 4;
12044                         eeprom->len += i;
12045                         goto eeprom_done;
12046                 }
12047                 memcpy(pd + i, &val, 4);
12048                 if (need_resched()) {
12049                         if (signal_pending(current)) {
12050                                 eeprom->len += i;
12051                                 ret = -EINTR;
12052                                 goto eeprom_done;
12053                         }
12054                         cond_resched();
12055                 }
12056         }
12057         eeprom->len += i;
12058
12059         if (len & 3) {
12060                 /* read last bytes not ending on 4 byte boundary */
12061                 pd = &data[eeprom->len];
12062                 b_count = len & 3;
12063                 b_offset = offset + len - b_count;
12064                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12065                 if (ret)
12066                         goto eeprom_done;
12067                 memcpy(pd, &val, b_count);
12068                 eeprom->len += b_count;
12069         }
12070         ret = 0;
12071
12072 eeprom_done:
12073         /* Restore clock, link aware and link idle modes */
12074         tg3_restore_clk(tp);
12075         if (cpmu_restore)
12076                 tw32(TG3_CPMU_CTRL, cpmu_val);
12077
12078         return ret;
12079 }
12080
12081 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12082 {
12083         struct tg3 *tp = netdev_priv(dev);
12084         int ret;
12085         u32 offset, len, b_offset, odd_len;
12086         u8 *buf;
12087         __be32 start = 0, end;
12088
12089         if (tg3_flag(tp, NO_NVRAM) ||
12090             eeprom->magic != TG3_EEPROM_MAGIC)
12091                 return -EINVAL;
12092
12093         offset = eeprom->offset;
12094         len = eeprom->len;
12095
12096         if ((b_offset = (offset & 3))) {
12097                 /* adjustments to start on required 4 byte boundary */
12098                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12099                 if (ret)
12100                         return ret;
12101                 len += b_offset;
12102                 offset &= ~3;
12103                 if (len < 4)
12104                         len = 4;
12105         }
12106
12107         odd_len = 0;
12108         if (len & 3) {
12109                 /* adjustments to end on required 4 byte boundary */
12110                 odd_len = 1;
12111                 len = (len + 3) & ~3;
12112                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12113                 if (ret)
12114                         return ret;
12115         }
12116
12117         buf = data;
12118         if (b_offset || odd_len) {
12119                 buf = kmalloc(len, GFP_KERNEL);
12120                 if (!buf)
12121                         return -ENOMEM;
12122                 if (b_offset)
12123                         memcpy(buf, &start, 4);
12124                 if (odd_len)
12125                         memcpy(buf+len-4, &end, 4);
12126                 memcpy(buf + b_offset, data, eeprom->len);
12127         }
12128
12129         ret = tg3_nvram_write_block(tp, offset, len, buf);
12130
12131         if (buf != data)
12132                 kfree(buf);
12133
12134         return ret;
12135 }
12136
12137 static int tg3_get_link_ksettings(struct net_device *dev,
12138                                   struct ethtool_link_ksettings *cmd)
12139 {
12140         struct tg3 *tp = netdev_priv(dev);
12141         u32 supported, advertising;
12142
12143         if (tg3_flag(tp, USE_PHYLIB)) {
12144                 struct phy_device *phydev;
12145                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12146                         return -EAGAIN;
12147                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12148                 phy_ethtool_ksettings_get(phydev, cmd);
12149
12150                 return 0;
12151         }
12152
12153         supported = (SUPPORTED_Autoneg);
12154
12155         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12156                 supported |= (SUPPORTED_1000baseT_Half |
12157                               SUPPORTED_1000baseT_Full);
12158
12159         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12160                 supported |= (SUPPORTED_100baseT_Half |
12161                               SUPPORTED_100baseT_Full |
12162                               SUPPORTED_10baseT_Half |
12163                               SUPPORTED_10baseT_Full |
12164                               SUPPORTED_TP);
12165                 cmd->base.port = PORT_TP;
12166         } else {
12167                 supported |= SUPPORTED_FIBRE;
12168                 cmd->base.port = PORT_FIBRE;
12169         }
12170         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12171                                                 supported);
12172
12173         advertising = tp->link_config.advertising;
12174         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12175                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12176                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12177                                 advertising |= ADVERTISED_Pause;
12178                         } else {
12179                                 advertising |= ADVERTISED_Pause |
12180                                         ADVERTISED_Asym_Pause;
12181                         }
12182                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12183                         advertising |= ADVERTISED_Asym_Pause;
12184                 }
12185         }
12186         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12187                                                 advertising);
12188
12189         if (netif_running(dev) && tp->link_up) {
12190                 cmd->base.speed = tp->link_config.active_speed;
12191                 cmd->base.duplex = tp->link_config.active_duplex;
12192                 ethtool_convert_legacy_u32_to_link_mode(
12193                         cmd->link_modes.lp_advertising,
12194                         tp->link_config.rmt_adv);
12195
12196                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12197                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12198                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12199                         else
12200                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12201                 }
12202         } else {
12203                 cmd->base.speed = SPEED_UNKNOWN;
12204                 cmd->base.duplex = DUPLEX_UNKNOWN;
12205                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12206         }
12207         cmd->base.phy_address = tp->phy_addr;
12208         cmd->base.autoneg = tp->link_config.autoneg;
12209         return 0;
12210 }
12211
12212 static int tg3_set_link_ksettings(struct net_device *dev,
12213                                   const struct ethtool_link_ksettings *cmd)
12214 {
12215         struct tg3 *tp = netdev_priv(dev);
12216         u32 speed = cmd->base.speed;
12217         u32 advertising;
12218
12219         if (tg3_flag(tp, USE_PHYLIB)) {
12220                 struct phy_device *phydev;
12221                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12222                         return -EAGAIN;
12223                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12224                 return phy_ethtool_ksettings_set(phydev, cmd);
12225         }
12226
12227         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12228             cmd->base.autoneg != AUTONEG_DISABLE)
12229                 return -EINVAL;
12230
12231         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12232             cmd->base.duplex != DUPLEX_FULL &&
12233             cmd->base.duplex != DUPLEX_HALF)
12234                 return -EINVAL;
12235
12236         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12237                                                 cmd->link_modes.advertising);
12238
12239         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12240                 u32 mask = ADVERTISED_Autoneg |
12241                            ADVERTISED_Pause |
12242                            ADVERTISED_Asym_Pause;
12243
12244                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12245                         mask |= ADVERTISED_1000baseT_Half |
12246                                 ADVERTISED_1000baseT_Full;
12247
12248                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12249                         mask |= ADVERTISED_100baseT_Half |
12250                                 ADVERTISED_100baseT_Full |
12251                                 ADVERTISED_10baseT_Half |
12252                                 ADVERTISED_10baseT_Full |
12253                                 ADVERTISED_TP;
12254                 else
12255                         mask |= ADVERTISED_FIBRE;
12256
12257                 if (advertising & ~mask)
12258                         return -EINVAL;
12259
12260                 mask &= (ADVERTISED_1000baseT_Half |
12261                          ADVERTISED_1000baseT_Full |
12262                          ADVERTISED_100baseT_Half |
12263                          ADVERTISED_100baseT_Full |
12264                          ADVERTISED_10baseT_Half |
12265                          ADVERTISED_10baseT_Full);
12266
12267                 advertising &= mask;
12268         } else {
12269                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12270                         if (speed != SPEED_1000)
12271                                 return -EINVAL;
12272
12273                         if (cmd->base.duplex != DUPLEX_FULL)
12274                                 return -EINVAL;
12275                 } else {
12276                         if (speed != SPEED_100 &&
12277                             speed != SPEED_10)
12278                                 return -EINVAL;
12279                 }
12280         }
12281
12282         tg3_full_lock(tp, 0);
12283
12284         tp->link_config.autoneg = cmd->base.autoneg;
12285         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12286                 tp->link_config.advertising = (advertising |
12287                                               ADVERTISED_Autoneg);
12288                 tp->link_config.speed = SPEED_UNKNOWN;
12289                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12290         } else {
12291                 tp->link_config.advertising = 0;
12292                 tp->link_config.speed = speed;
12293                 tp->link_config.duplex = cmd->base.duplex;
12294         }
12295
12296         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12297
12298         tg3_warn_mgmt_link_flap(tp);
12299
12300         if (netif_running(dev))
12301                 tg3_setup_phy(tp, true);
12302
12303         tg3_full_unlock(tp);
12304
12305         return 0;
12306 }
12307
12308 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12309 {
12310         struct tg3 *tp = netdev_priv(dev);
12311
12312         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12313         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12314         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12315         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12316 }
12317
12318 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12319 {
12320         struct tg3 *tp = netdev_priv(dev);
12321
12322         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12323                 wol->supported = WAKE_MAGIC;
12324         else
12325                 wol->supported = 0;
12326         wol->wolopts = 0;
12327         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12328                 wol->wolopts = WAKE_MAGIC;
12329         memset(&wol->sopass, 0, sizeof(wol->sopass));
12330 }
12331
12332 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12333 {
12334         struct tg3 *tp = netdev_priv(dev);
12335         struct device *dp = &tp->pdev->dev;
12336
12337         if (wol->wolopts & ~WAKE_MAGIC)
12338                 return -EINVAL;
12339         if ((wol->wolopts & WAKE_MAGIC) &&
12340             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12341                 return -EINVAL;
12342
12343         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12344
12345         if (device_may_wakeup(dp))
12346                 tg3_flag_set(tp, WOL_ENABLE);
12347         else
12348                 tg3_flag_clear(tp, WOL_ENABLE);
12349
12350         return 0;
12351 }
12352
12353 static u32 tg3_get_msglevel(struct net_device *dev)
12354 {
12355         struct tg3 *tp = netdev_priv(dev);
12356         return tp->msg_enable;
12357 }
12358
12359 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12360 {
12361         struct tg3 *tp = netdev_priv(dev);
12362         tp->msg_enable = value;
12363 }
12364
12365 static int tg3_nway_reset(struct net_device *dev)
12366 {
12367         struct tg3 *tp = netdev_priv(dev);
12368         int r;
12369
12370         if (!netif_running(dev))
12371                 return -EAGAIN;
12372
12373         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12374                 return -EINVAL;
12375
12376         tg3_warn_mgmt_link_flap(tp);
12377
12378         if (tg3_flag(tp, USE_PHYLIB)) {
12379                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12380                         return -EAGAIN;
12381                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12382         } else {
12383                 u32 bmcr;
12384
12385                 spin_lock_bh(&tp->lock);
12386                 r = -EINVAL;
12387                 tg3_readphy(tp, MII_BMCR, &bmcr);
12388                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12389                     ((bmcr & BMCR_ANENABLE) ||
12390                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12391                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12392                                                    BMCR_ANENABLE);
12393                         r = 0;
12394                 }
12395                 spin_unlock_bh(&tp->lock);
12396         }
12397
12398         return r;
12399 }
12400
12401 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12402 {
12403         struct tg3 *tp = netdev_priv(dev);
12404
12405         ering->rx_max_pending = tp->rx_std_ring_mask;
12406         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12407                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12408         else
12409                 ering->rx_jumbo_max_pending = 0;
12410
12411         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12412
12413         ering->rx_pending = tp->rx_pending;
12414         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12415                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12416         else
12417                 ering->rx_jumbo_pending = 0;
12418
12419         ering->tx_pending = tp->napi[0].tx_pending;
12420 }
12421
12422 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12423 {
12424         struct tg3 *tp = netdev_priv(dev);
12425         int i, irq_sync = 0, err = 0;
12426         bool reset_phy = false;
12427
12428         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12429             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12430             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12431             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12432             (tg3_flag(tp, TSO_BUG) &&
12433              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12434                 return -EINVAL;
12435
12436         if (netif_running(dev)) {
12437                 tg3_phy_stop(tp);
12438                 tg3_netif_stop(tp);
12439                 irq_sync = 1;
12440         }
12441
12442         tg3_full_lock(tp, irq_sync);
12443
12444         tp->rx_pending = ering->rx_pending;
12445
12446         if (tg3_flag(tp, MAX_RXPEND_64) &&
12447             tp->rx_pending > 63)
12448                 tp->rx_pending = 63;
12449
12450         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12451                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12452
12453         for (i = 0; i < tp->irq_max; i++)
12454                 tp->napi[i].tx_pending = ering->tx_pending;
12455
12456         if (netif_running(dev)) {
12457                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12458                 /* Reset PHY to avoid PHY lock up */
12459                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12460                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12461                     tg3_asic_rev(tp) == ASIC_REV_5720)
12462                         reset_phy = true;
12463
12464                 err = tg3_restart_hw(tp, reset_phy);
12465                 if (!err)
12466                         tg3_netif_start(tp);
12467         }
12468
12469         tg3_full_unlock(tp);
12470
12471         if (irq_sync && !err)
12472                 tg3_phy_start(tp);
12473
12474         return err;
12475 }
12476
12477 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12478 {
12479         struct tg3 *tp = netdev_priv(dev);
12480
12481         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12482
12483         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12484                 epause->rx_pause = 1;
12485         else
12486                 epause->rx_pause = 0;
12487
12488         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12489                 epause->tx_pause = 1;
12490         else
12491                 epause->tx_pause = 0;
12492 }
12493
12494 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12495 {
12496         struct tg3 *tp = netdev_priv(dev);
12497         int err = 0;
12498         bool reset_phy = false;
12499
12500         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12501                 tg3_warn_mgmt_link_flap(tp);
12502
12503         if (tg3_flag(tp, USE_PHYLIB)) {
12504                 u32 newadv;
12505                 struct phy_device *phydev;
12506
12507                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12508
12509                 if (!(phydev->supported & SUPPORTED_Pause) ||
12510                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12511                      (epause->rx_pause != epause->tx_pause)))
12512                         return -EINVAL;
12513
12514                 tp->link_config.flowctrl = 0;
12515                 if (epause->rx_pause) {
12516                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12517
12518                         if (epause->tx_pause) {
12519                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12520                                 newadv = ADVERTISED_Pause;
12521                         } else
12522                                 newadv = ADVERTISED_Pause |
12523                                          ADVERTISED_Asym_Pause;
12524                 } else if (epause->tx_pause) {
12525                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12526                         newadv = ADVERTISED_Asym_Pause;
12527                 } else
12528                         newadv = 0;
12529
12530                 if (epause->autoneg)
12531                         tg3_flag_set(tp, PAUSE_AUTONEG);
12532                 else
12533                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12534
12535                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12536                         u32 oldadv = phydev->advertising &
12537                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12538                         if (oldadv != newadv) {
12539                                 phydev->advertising &=
12540                                         ~(ADVERTISED_Pause |
12541                                           ADVERTISED_Asym_Pause);
12542                                 phydev->advertising |= newadv;
12543                                 if (phydev->autoneg) {
12544                                         /*
12545                                          * Always renegotiate the link to
12546                                          * inform our link partner of our
12547                                          * flow control settings, even if the
12548                                          * flow control is forced.  Let
12549                                          * tg3_adjust_link() do the final
12550                                          * flow control setup.
12551                                          */
12552                                         return phy_start_aneg(phydev);
12553                                 }
12554                         }
12555
12556                         if (!epause->autoneg)
12557                                 tg3_setup_flow_control(tp, 0, 0);
12558                 } else {
12559                         tp->link_config.advertising &=
12560                                         ~(ADVERTISED_Pause |
12561                                           ADVERTISED_Asym_Pause);
12562                         tp->link_config.advertising |= newadv;
12563                 }
12564         } else {
12565                 int irq_sync = 0;
12566
12567                 if (netif_running(dev)) {
12568                         tg3_netif_stop(tp);
12569                         irq_sync = 1;
12570                 }
12571
12572                 tg3_full_lock(tp, irq_sync);
12573
12574                 if (epause->autoneg)
12575                         tg3_flag_set(tp, PAUSE_AUTONEG);
12576                 else
12577                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12578                 if (epause->rx_pause)
12579                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12580                 else
12581                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12582                 if (epause->tx_pause)
12583                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12584                 else
12585                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12586
12587                 if (netif_running(dev)) {
12588                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12589                         /* Reset PHY to avoid PHY lock up */
12590                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12591                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12592                             tg3_asic_rev(tp) == ASIC_REV_5720)
12593                                 reset_phy = true;
12594
12595                         err = tg3_restart_hw(tp, reset_phy);
12596                         if (!err)
12597                                 tg3_netif_start(tp);
12598                 }
12599
12600                 tg3_full_unlock(tp);
12601         }
12602
12603         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12604
12605         return err;
12606 }
12607
12608 static int tg3_get_sset_count(struct net_device *dev, int sset)
12609 {
12610         switch (sset) {
12611         case ETH_SS_TEST:
12612                 return TG3_NUM_TEST;
12613         case ETH_SS_STATS:
12614                 return TG3_NUM_STATS;
12615         default:
12616                 return -EOPNOTSUPP;
12617         }
12618 }
12619
12620 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12621                          u32 *rules __always_unused)
12622 {
12623         struct tg3 *tp = netdev_priv(dev);
12624
12625         if (!tg3_flag(tp, SUPPORT_MSIX))
12626                 return -EOPNOTSUPP;
12627
12628         switch (info->cmd) {
12629         case ETHTOOL_GRXRINGS:
12630                 if (netif_running(tp->dev))
12631                         info->data = tp->rxq_cnt;
12632                 else {
12633                         info->data = num_online_cpus();
12634                         if (info->data > TG3_RSS_MAX_NUM_QS)
12635                                 info->data = TG3_RSS_MAX_NUM_QS;
12636                 }
12637
12638                 return 0;
12639
12640         default:
12641                 return -EOPNOTSUPP;
12642         }
12643 }
12644
12645 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12646 {
12647         u32 size = 0;
12648         struct tg3 *tp = netdev_priv(dev);
12649
12650         if (tg3_flag(tp, SUPPORT_MSIX))
12651                 size = TG3_RSS_INDIR_TBL_SIZE;
12652
12653         return size;
12654 }
12655
12656 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12657 {
12658         struct tg3 *tp = netdev_priv(dev);
12659         int i;
12660
12661         if (hfunc)
12662                 *hfunc = ETH_RSS_HASH_TOP;
12663         if (!indir)
12664                 return 0;
12665
12666         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12667                 indir[i] = tp->rss_ind_tbl[i];
12668
12669         return 0;
12670 }
12671
12672 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12673                         const u8 hfunc)
12674 {
12675         struct tg3 *tp = netdev_priv(dev);
12676         size_t i;
12677
12678         /* We require at least one supported parameter to be changed and no
12679          * change in any of the unsupported parameters
12680          */
12681         if (key ||
12682             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12683                 return -EOPNOTSUPP;
12684
12685         if (!indir)
12686                 return 0;
12687
12688         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12689                 tp->rss_ind_tbl[i] = indir[i];
12690
12691         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12692                 return 0;
12693
12694         /* It is legal to write the indirection
12695          * table while the device is running.
12696          */
12697         tg3_full_lock(tp, 0);
12698         tg3_rss_write_indir_tbl(tp);
12699         tg3_full_unlock(tp);
12700
12701         return 0;
12702 }
12703
12704 static void tg3_get_channels(struct net_device *dev,
12705                              struct ethtool_channels *channel)
12706 {
12707         struct tg3 *tp = netdev_priv(dev);
12708         u32 deflt_qs = netif_get_num_default_rss_queues();
12709
12710         channel->max_rx = tp->rxq_max;
12711         channel->max_tx = tp->txq_max;
12712
12713         if (netif_running(dev)) {
12714                 channel->rx_count = tp->rxq_cnt;
12715                 channel->tx_count = tp->txq_cnt;
12716         } else {
12717                 if (tp->rxq_req)
12718                         channel->rx_count = tp->rxq_req;
12719                 else
12720                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12721
12722                 if (tp->txq_req)
12723                         channel->tx_count = tp->txq_req;
12724                 else
12725                         channel->tx_count = min(deflt_qs, tp->txq_max);
12726         }
12727 }
12728
12729 static int tg3_set_channels(struct net_device *dev,
12730                             struct ethtool_channels *channel)
12731 {
12732         struct tg3 *tp = netdev_priv(dev);
12733
12734         if (!tg3_flag(tp, SUPPORT_MSIX))
12735                 return -EOPNOTSUPP;
12736
12737         if (channel->rx_count > tp->rxq_max ||
12738             channel->tx_count > tp->txq_max)
12739                 return -EINVAL;
12740
12741         tp->rxq_req = channel->rx_count;
12742         tp->txq_req = channel->tx_count;
12743
12744         if (!netif_running(dev))
12745                 return 0;
12746
12747         tg3_stop(tp);
12748
12749         tg3_carrier_off(tp);
12750
12751         tg3_start(tp, true, false, false);
12752
12753         return 0;
12754 }
12755
12756 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12757 {
12758         switch (stringset) {
12759         case ETH_SS_STATS:
12760                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12761                 break;
12762         case ETH_SS_TEST:
12763                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12764                 break;
12765         default:
12766                 WARN_ON(1);     /* we need a WARN() */
12767                 break;
12768         }
12769 }
12770
12771 static int tg3_set_phys_id(struct net_device *dev,
12772                             enum ethtool_phys_id_state state)
12773 {
12774         struct tg3 *tp = netdev_priv(dev);
12775
12776         if (!netif_running(tp->dev))
12777                 return -EAGAIN;
12778
12779         switch (state) {
12780         case ETHTOOL_ID_ACTIVE:
12781                 return 1;       /* cycle on/off once per second */
12782
12783         case ETHTOOL_ID_ON:
12784                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12785                      LED_CTRL_1000MBPS_ON |
12786                      LED_CTRL_100MBPS_ON |
12787                      LED_CTRL_10MBPS_ON |
12788                      LED_CTRL_TRAFFIC_OVERRIDE |
12789                      LED_CTRL_TRAFFIC_BLINK |
12790                      LED_CTRL_TRAFFIC_LED);
12791                 break;
12792
12793         case ETHTOOL_ID_OFF:
12794                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12795                      LED_CTRL_TRAFFIC_OVERRIDE);
12796                 break;
12797
12798         case ETHTOOL_ID_INACTIVE:
12799                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12800                 break;
12801         }
12802
12803         return 0;
12804 }
12805
12806 static void tg3_get_ethtool_stats(struct net_device *dev,
12807                                    struct ethtool_stats *estats, u64 *tmp_stats)
12808 {
12809         struct tg3 *tp = netdev_priv(dev);
12810
12811         if (tp->hw_stats)
12812                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12813         else
12814                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12815 }
12816
12817 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12818 {
12819         int i;
12820         __be32 *buf;
12821         u32 offset = 0, len = 0;
12822         u32 magic, val;
12823
12824         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12825                 return NULL;
12826
12827         if (magic == TG3_EEPROM_MAGIC) {
12828                 for (offset = TG3_NVM_DIR_START;
12829                      offset < TG3_NVM_DIR_END;
12830                      offset += TG3_NVM_DIRENT_SIZE) {
12831                         if (tg3_nvram_read(tp, offset, &val))
12832                                 return NULL;
12833
12834                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12835                             TG3_NVM_DIRTYPE_EXTVPD)
12836                                 break;
12837                 }
12838
12839                 if (offset != TG3_NVM_DIR_END) {
12840                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12841                         if (tg3_nvram_read(tp, offset + 4, &offset))
12842                                 return NULL;
12843
12844                         offset = tg3_nvram_logical_addr(tp, offset);
12845                 }
12846         }
12847
12848         if (!offset || !len) {
12849                 offset = TG3_NVM_VPD_OFF;
12850                 len = TG3_NVM_VPD_LEN;
12851         }
12852
12853         buf = kmalloc(len, GFP_KERNEL);
12854         if (buf == NULL)
12855                 return NULL;
12856
12857         if (magic == TG3_EEPROM_MAGIC) {
12858                 for (i = 0; i < len; i += 4) {
12859                         /* The data is in little-endian format in NVRAM.
12860                          * Use the big-endian read routines to preserve
12861                          * the byte order as it exists in NVRAM.
12862                          */
12863                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12864                                 goto error;
12865                 }
12866         } else {
12867                 u8 *ptr;
12868                 ssize_t cnt;
12869                 unsigned int pos = 0;
12870
12871                 ptr = (u8 *)&buf[0];
12872                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12873                         cnt = pci_read_vpd(tp->pdev, pos,
12874                                            len - pos, ptr);
12875                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12876                                 cnt = 0;
12877                         else if (cnt < 0)
12878                                 goto error;
12879                 }
12880                 if (pos != len)
12881                         goto error;
12882         }
12883
12884         *vpdlen = len;
12885
12886         return buf;
12887
12888 error:
12889         kfree(buf);
12890         return NULL;
12891 }
12892
12893 #define NVRAM_TEST_SIZE 0x100
12894 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12895 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12896 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12897 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12898 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12899 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12900 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12901 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12902
12903 static int tg3_test_nvram(struct tg3 *tp)
12904 {
12905         u32 csum, magic, len;
12906         __be32 *buf;
12907         int i, j, k, err = 0, size;
12908
12909         if (tg3_flag(tp, NO_NVRAM))
12910                 return 0;
12911
12912         if (tg3_nvram_read(tp, 0, &magic) != 0)
12913                 return -EIO;
12914
12915         if (magic == TG3_EEPROM_MAGIC)
12916                 size = NVRAM_TEST_SIZE;
12917         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12918                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12919                     TG3_EEPROM_SB_FORMAT_1) {
12920                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12921                         case TG3_EEPROM_SB_REVISION_0:
12922                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12923                                 break;
12924                         case TG3_EEPROM_SB_REVISION_2:
12925                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12926                                 break;
12927                         case TG3_EEPROM_SB_REVISION_3:
12928                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12929                                 break;
12930                         case TG3_EEPROM_SB_REVISION_4:
12931                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12932                                 break;
12933                         case TG3_EEPROM_SB_REVISION_5:
12934                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12935                                 break;
12936                         case TG3_EEPROM_SB_REVISION_6:
12937                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12938                                 break;
12939                         default:
12940                                 return -EIO;
12941                         }
12942                 } else
12943                         return 0;
12944         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12945                 size = NVRAM_SELFBOOT_HW_SIZE;
12946         else
12947                 return -EIO;
12948
12949         buf = kmalloc(size, GFP_KERNEL);
12950         if (buf == NULL)
12951                 return -ENOMEM;
12952
12953         err = -EIO;
12954         for (i = 0, j = 0; i < size; i += 4, j++) {
12955                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12956                 if (err)
12957                         break;
12958         }
12959         if (i < size)
12960                 goto out;
12961
12962         /* Selfboot format */
12963         magic = be32_to_cpu(buf[0]);
12964         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12965             TG3_EEPROM_MAGIC_FW) {
12966                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12967
12968                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12969                     TG3_EEPROM_SB_REVISION_2) {
12970                         /* For rev 2, the csum doesn't include the MBA. */
12971                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12972                                 csum8 += buf8[i];
12973                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12974                                 csum8 += buf8[i];
12975                 } else {
12976                         for (i = 0; i < size; i++)
12977                                 csum8 += buf8[i];
12978                 }
12979
12980                 if (csum8 == 0) {
12981                         err = 0;
12982                         goto out;
12983                 }
12984
12985                 err = -EIO;
12986                 goto out;
12987         }
12988
12989         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12990             TG3_EEPROM_MAGIC_HW) {
12991                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12992                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12993                 u8 *buf8 = (u8 *) buf;
12994
12995                 /* Separate the parity bits and the data bytes.  */
12996                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12997                         if ((i == 0) || (i == 8)) {
12998                                 int l;
12999                                 u8 msk;
13000
13001                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13002                                         parity[k++] = buf8[i] & msk;
13003                                 i++;
13004                         } else if (i == 16) {
13005                                 int l;
13006                                 u8 msk;
13007
13008                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13009                                         parity[k++] = buf8[i] & msk;
13010                                 i++;
13011
13012                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13013                                         parity[k++] = buf8[i] & msk;
13014                                 i++;
13015                         }
13016                         data[j++] = buf8[i];
13017                 }
13018
13019                 err = -EIO;
13020                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13021                         u8 hw8 = hweight8(data[i]);
13022
13023                         if ((hw8 & 0x1) && parity[i])
13024                                 goto out;
13025                         else if (!(hw8 & 0x1) && !parity[i])
13026                                 goto out;
13027                 }
13028                 err = 0;
13029                 goto out;
13030         }
13031
13032         err = -EIO;
13033
13034         /* Bootstrap checksum at offset 0x10 */
13035         csum = calc_crc((unsigned char *) buf, 0x10);
13036         if (csum != le32_to_cpu(buf[0x10/4]))
13037                 goto out;
13038
13039         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13040         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13041         if (csum != le32_to_cpu(buf[0xfc/4]))
13042                 goto out;
13043
13044         kfree(buf);
13045
13046         buf = tg3_vpd_readblock(tp, &len);
13047         if (!buf)
13048                 return -ENOMEM;
13049
13050         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13051         if (i > 0) {
13052                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13053                 if (j < 0)
13054                         goto out;
13055
13056                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13057                         goto out;
13058
13059                 i += PCI_VPD_LRDT_TAG_SIZE;
13060                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13061                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13062                 if (j > 0) {
13063                         u8 csum8 = 0;
13064
13065                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13066
13067                         for (i = 0; i <= j; i++)
13068                                 csum8 += ((u8 *)buf)[i];
13069
13070                         if (csum8)
13071                                 goto out;
13072                 }
13073         }
13074
13075         err = 0;
13076
13077 out:
13078         kfree(buf);
13079         return err;
13080 }
13081
13082 #define TG3_SERDES_TIMEOUT_SEC  2
13083 #define TG3_COPPER_TIMEOUT_SEC  6
13084
13085 static int tg3_test_link(struct tg3 *tp)
13086 {
13087         int i, max;
13088
13089         if (!netif_running(tp->dev))
13090                 return -ENODEV;
13091
13092         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13093                 max = TG3_SERDES_TIMEOUT_SEC;
13094         else
13095                 max = TG3_COPPER_TIMEOUT_SEC;
13096
13097         for (i = 0; i < max; i++) {
13098                 if (tp->link_up)
13099                         return 0;
13100
13101                 if (msleep_interruptible(1000))
13102                         break;
13103         }
13104
13105         return -EIO;
13106 }
13107
13108 /* Only test the commonly used registers */
13109 static int tg3_test_registers(struct tg3 *tp)
13110 {
13111         int i, is_5705, is_5750;
13112         u32 offset, read_mask, write_mask, val, save_val, read_val;
13113         static struct {
13114                 u16 offset;
13115                 u16 flags;
13116 #define TG3_FL_5705     0x1
13117 #define TG3_FL_NOT_5705 0x2
13118 #define TG3_FL_NOT_5788 0x4
13119 #define TG3_FL_NOT_5750 0x8
13120                 u32 read_mask;
13121                 u32 write_mask;
13122         } reg_tbl[] = {
13123                 /* MAC Control Registers */
13124                 { MAC_MODE, TG3_FL_NOT_5705,
13125                         0x00000000, 0x00ef6f8c },
13126                 { MAC_MODE, TG3_FL_5705,
13127                         0x00000000, 0x01ef6b8c },
13128                 { MAC_STATUS, TG3_FL_NOT_5705,
13129                         0x03800107, 0x00000000 },
13130                 { MAC_STATUS, TG3_FL_5705,
13131                         0x03800100, 0x00000000 },
13132                 { MAC_ADDR_0_HIGH, 0x0000,
13133                         0x00000000, 0x0000ffff },
13134                 { MAC_ADDR_0_LOW, 0x0000,
13135                         0x00000000, 0xffffffff },
13136                 { MAC_RX_MTU_SIZE, 0x0000,
13137                         0x00000000, 0x0000ffff },
13138                 { MAC_TX_MODE, 0x0000,
13139                         0x00000000, 0x00000070 },
13140                 { MAC_TX_LENGTHS, 0x0000,
13141                         0x00000000, 0x00003fff },
13142                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13143                         0x00000000, 0x000007fc },
13144                 { MAC_RX_MODE, TG3_FL_5705,
13145                         0x00000000, 0x000007dc },
13146                 { MAC_HASH_REG_0, 0x0000,
13147                         0x00000000, 0xffffffff },
13148                 { MAC_HASH_REG_1, 0x0000,
13149                         0x00000000, 0xffffffff },
13150                 { MAC_HASH_REG_2, 0x0000,
13151                         0x00000000, 0xffffffff },
13152                 { MAC_HASH_REG_3, 0x0000,
13153                         0x00000000, 0xffffffff },
13154
13155                 /* Receive Data and Receive BD Initiator Control Registers. */
13156                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13157                         0x00000000, 0xffffffff },
13158                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13161                         0x00000000, 0x00000003 },
13162                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { RCVDBDI_STD_BD+0, 0x0000,
13165                         0x00000000, 0xffffffff },
13166                 { RCVDBDI_STD_BD+4, 0x0000,
13167                         0x00000000, 0xffffffff },
13168                 { RCVDBDI_STD_BD+8, 0x0000,
13169                         0x00000000, 0xffff0002 },
13170                 { RCVDBDI_STD_BD+0xc, 0x0000,
13171                         0x00000000, 0xffffffff },
13172
13173                 /* Receive BD Initiator Control Registers. */
13174                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13177                         0x00000000, 0x000003ff },
13178                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180
13181                 /* Host Coalescing Control Registers. */
13182                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13183                         0x00000000, 0x00000004 },
13184                 { HOSTCC_MODE, TG3_FL_5705,
13185                         0x00000000, 0x000000f6 },
13186                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13187                         0x00000000, 0xffffffff },
13188                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13189                         0x00000000, 0x000003ff },
13190                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13191                         0x00000000, 0xffffffff },
13192                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13193                         0x00000000, 0x000003ff },
13194                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13195                         0x00000000, 0xffffffff },
13196                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13197                         0x00000000, 0x000000ff },
13198                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13199                         0x00000000, 0xffffffff },
13200                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13201                         0x00000000, 0x000000ff },
13202                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13203                         0x00000000, 0xffffffff },
13204                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13205                         0x00000000, 0xffffffff },
13206                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13207                         0x00000000, 0xffffffff },
13208                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13209                         0x00000000, 0x000000ff },
13210                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13211                         0x00000000, 0xffffffff },
13212                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13213                         0x00000000, 0x000000ff },
13214                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13215                         0x00000000, 0xffffffff },
13216                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13217                         0x00000000, 0xffffffff },
13218                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13219                         0x00000000, 0xffffffff },
13220                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13221                         0x00000000, 0xffffffff },
13222                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13223                         0x00000000, 0xffffffff },
13224                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13225                         0xffffffff, 0x00000000 },
13226                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13227                         0xffffffff, 0x00000000 },
13228
13229                 /* Buffer Manager Control Registers. */
13230                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13231                         0x00000000, 0x007fff80 },
13232                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13233                         0x00000000, 0x007fffff },
13234                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13235                         0x00000000, 0x0000003f },
13236                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13237                         0x00000000, 0x000001ff },
13238                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13239                         0x00000000, 0x000001ff },
13240                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13241                         0xffffffff, 0x00000000 },
13242                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13243                         0xffffffff, 0x00000000 },
13244
13245                 /* Mailbox Registers */
13246                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13247                         0x00000000, 0x000001ff },
13248                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13249                         0x00000000, 0x000001ff },
13250                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13251                         0x00000000, 0x000007ff },
13252                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13253                         0x00000000, 0x000001ff },
13254
13255                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13256         };
13257
13258         is_5705 = is_5750 = 0;
13259         if (tg3_flag(tp, 5705_PLUS)) {
13260                 is_5705 = 1;
13261                 if (tg3_flag(tp, 5750_PLUS))
13262                         is_5750 = 1;
13263         }
13264
13265         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13266                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13267                         continue;
13268
13269                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13270                         continue;
13271
13272                 if (tg3_flag(tp, IS_5788) &&
13273                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13274                         continue;
13275
13276                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13277                         continue;
13278
13279                 offset = (u32) reg_tbl[i].offset;
13280                 read_mask = reg_tbl[i].read_mask;
13281                 write_mask = reg_tbl[i].write_mask;
13282
13283                 /* Save the original register content */
13284                 save_val = tr32(offset);
13285
13286                 /* Determine the read-only value. */
13287                 read_val = save_val & read_mask;
13288
13289                 /* Write zero to the register, then make sure the read-only bits
13290                  * are not changed and the read/write bits are all zeros.
13291                  */
13292                 tw32(offset, 0);
13293
13294                 val = tr32(offset);
13295
13296                 /* Test the read-only and read/write bits. */
13297                 if (((val & read_mask) != read_val) || (val & write_mask))
13298                         goto out;
13299
13300                 /* Write ones to all the bits defined by RdMask and WrMask, then
13301                  * make sure the read-only bits are not changed and the
13302                  * read/write bits are all ones.
13303                  */
13304                 tw32(offset, read_mask | write_mask);
13305
13306                 val = tr32(offset);
13307
13308                 /* Test the read-only bits. */
13309                 if ((val & read_mask) != read_val)
13310                         goto out;
13311
13312                 /* Test the read/write bits. */
13313                 if ((val & write_mask) != write_mask)
13314                         goto out;
13315
13316                 tw32(offset, save_val);
13317         }
13318
13319         return 0;
13320
13321 out:
13322         if (netif_msg_hw(tp))
13323                 netdev_err(tp->dev,
13324                            "Register test failed at offset %x\n", offset);
13325         tw32(offset, save_val);
13326         return -EIO;
13327 }
13328
13329 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13330 {
13331         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13332         int i;
13333         u32 j;
13334
13335         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13336                 for (j = 0; j < len; j += 4) {
13337                         u32 val;
13338
13339                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13340                         tg3_read_mem(tp, offset + j, &val);
13341                         if (val != test_pattern[i])
13342                                 return -EIO;
13343                 }
13344         }
13345         return 0;
13346 }
13347
13348 static int tg3_test_memory(struct tg3 *tp)
13349 {
13350         static struct mem_entry {
13351                 u32 offset;
13352                 u32 len;
13353         } mem_tbl_570x[] = {
13354                 { 0x00000000, 0x00b50},
13355                 { 0x00002000, 0x1c000},
13356                 { 0xffffffff, 0x00000}
13357         }, mem_tbl_5705[] = {
13358                 { 0x00000100, 0x0000c},
13359                 { 0x00000200, 0x00008},
13360                 { 0x00004000, 0x00800},
13361                 { 0x00006000, 0x01000},
13362                 { 0x00008000, 0x02000},
13363                 { 0x00010000, 0x0e000},
13364                 { 0xffffffff, 0x00000}
13365         }, mem_tbl_5755[] = {
13366                 { 0x00000200, 0x00008},
13367                 { 0x00004000, 0x00800},
13368                 { 0x00006000, 0x00800},
13369                 { 0x00008000, 0x02000},
13370                 { 0x00010000, 0x0c000},
13371                 { 0xffffffff, 0x00000}
13372         }, mem_tbl_5906[] = {
13373                 { 0x00000200, 0x00008},
13374                 { 0x00004000, 0x00400},
13375                 { 0x00006000, 0x00400},
13376                 { 0x00008000, 0x01000},
13377                 { 0x00010000, 0x01000},
13378                 { 0xffffffff, 0x00000}
13379         }, mem_tbl_5717[] = {
13380                 { 0x00000200, 0x00008},
13381                 { 0x00010000, 0x0a000},
13382                 { 0x00020000, 0x13c00},
13383                 { 0xffffffff, 0x00000}
13384         }, mem_tbl_57765[] = {
13385                 { 0x00000200, 0x00008},
13386                 { 0x00004000, 0x00800},
13387                 { 0x00006000, 0x09800},
13388                 { 0x00010000, 0x0a000},
13389                 { 0xffffffff, 0x00000}
13390         };
13391         struct mem_entry *mem_tbl;
13392         int err = 0;
13393         int i;
13394
13395         if (tg3_flag(tp, 5717_PLUS))
13396                 mem_tbl = mem_tbl_5717;
13397         else if (tg3_flag(tp, 57765_CLASS) ||
13398                  tg3_asic_rev(tp) == ASIC_REV_5762)
13399                 mem_tbl = mem_tbl_57765;
13400         else if (tg3_flag(tp, 5755_PLUS))
13401                 mem_tbl = mem_tbl_5755;
13402         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13403                 mem_tbl = mem_tbl_5906;
13404         else if (tg3_flag(tp, 5705_PLUS))
13405                 mem_tbl = mem_tbl_5705;
13406         else
13407                 mem_tbl = mem_tbl_570x;
13408
13409         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13410                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13411                 if (err)
13412                         break;
13413         }
13414
13415         return err;
13416 }
13417
13418 #define TG3_TSO_MSS             500
13419
13420 #define TG3_TSO_IP_HDR_LEN      20
13421 #define TG3_TSO_TCP_HDR_LEN     20
13422 #define TG3_TSO_TCP_OPT_LEN     12
13423
13424 static const u8 tg3_tso_header[] = {
13425 0x08, 0x00,
13426 0x45, 0x00, 0x00, 0x00,
13427 0x00, 0x00, 0x40, 0x00,
13428 0x40, 0x06, 0x00, 0x00,
13429 0x0a, 0x00, 0x00, 0x01,
13430 0x0a, 0x00, 0x00, 0x02,
13431 0x0d, 0x00, 0xe0, 0x00,
13432 0x00, 0x00, 0x01, 0x00,
13433 0x00, 0x00, 0x02, 0x00,
13434 0x80, 0x10, 0x10, 0x00,
13435 0x14, 0x09, 0x00, 0x00,
13436 0x01, 0x01, 0x08, 0x0a,
13437 0x11, 0x11, 0x11, 0x11,
13438 0x11, 0x11, 0x11, 0x11,
13439 };
13440
13441 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13442 {
13443         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13444         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13445         u32 budget;
13446         struct sk_buff *skb;
13447         u8 *tx_data, *rx_data;
13448         dma_addr_t map;
13449         int num_pkts, tx_len, rx_len, i, err;
13450         struct tg3_rx_buffer_desc *desc;
13451         struct tg3_napi *tnapi, *rnapi;
13452         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13453
13454         tnapi = &tp->napi[0];
13455         rnapi = &tp->napi[0];
13456         if (tp->irq_cnt > 1) {
13457                 if (tg3_flag(tp, ENABLE_RSS))
13458                         rnapi = &tp->napi[1];
13459                 if (tg3_flag(tp, ENABLE_TSS))
13460                         tnapi = &tp->napi[1];
13461         }
13462         coal_now = tnapi->coal_now | rnapi->coal_now;
13463
13464         err = -EIO;
13465
13466         tx_len = pktsz;
13467         skb = netdev_alloc_skb(tp->dev, tx_len);
13468         if (!skb)
13469                 return -ENOMEM;
13470
13471         tx_data = skb_put(skb, tx_len);
13472         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13473         memset(tx_data + ETH_ALEN, 0x0, 8);
13474
13475         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13476
13477         if (tso_loopback) {
13478                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13479
13480                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13481                               TG3_TSO_TCP_OPT_LEN;
13482
13483                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13484                        sizeof(tg3_tso_header));
13485                 mss = TG3_TSO_MSS;
13486
13487                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13488                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13489
13490                 /* Set the total length field in the IP header */
13491                 iph->tot_len = htons((u16)(mss + hdr_len));
13492
13493                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13494                               TXD_FLAG_CPU_POST_DMA);
13495
13496                 if (tg3_flag(tp, HW_TSO_1) ||
13497                     tg3_flag(tp, HW_TSO_2) ||
13498                     tg3_flag(tp, HW_TSO_3)) {
13499                         struct tcphdr *th;
13500                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13501                         th = (struct tcphdr *)&tx_data[val];
13502                         th->check = 0;
13503                 } else
13504                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13505
13506                 if (tg3_flag(tp, HW_TSO_3)) {
13507                         mss |= (hdr_len & 0xc) << 12;
13508                         if (hdr_len & 0x10)
13509                                 base_flags |= 0x00000010;
13510                         base_flags |= (hdr_len & 0x3e0) << 5;
13511                 } else if (tg3_flag(tp, HW_TSO_2))
13512                         mss |= hdr_len << 9;
13513                 else if (tg3_flag(tp, HW_TSO_1) ||
13514                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13515                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13516                 } else {
13517                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13518                 }
13519
13520                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13521         } else {
13522                 num_pkts = 1;
13523                 data_off = ETH_HLEN;
13524
13525                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13526                     tx_len > VLAN_ETH_FRAME_LEN)
13527                         base_flags |= TXD_FLAG_JMB_PKT;
13528         }
13529
13530         for (i = data_off; i < tx_len; i++)
13531                 tx_data[i] = (u8) (i & 0xff);
13532
13533         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13534         if (pci_dma_mapping_error(tp->pdev, map)) {
13535                 dev_kfree_skb(skb);
13536                 return -EIO;
13537         }
13538
13539         val = tnapi->tx_prod;
13540         tnapi->tx_buffers[val].skb = skb;
13541         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13542
13543         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13544                rnapi->coal_now);
13545
13546         udelay(10);
13547
13548         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13549
13550         budget = tg3_tx_avail(tnapi);
13551         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13552                             base_flags | TXD_FLAG_END, mss, 0)) {
13553                 tnapi->tx_buffers[val].skb = NULL;
13554                 dev_kfree_skb(skb);
13555                 return -EIO;
13556         }
13557
13558         tnapi->tx_prod++;
13559
13560         /* Sync BD data before updating mailbox */
13561         wmb();
13562
13563         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13564         tr32_mailbox(tnapi->prodmbox);
13565
13566         udelay(10);
13567
13568         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13569         for (i = 0; i < 35; i++) {
13570                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13571                        coal_now);
13572
13573                 udelay(10);
13574
13575                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13576                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13577                 if ((tx_idx == tnapi->tx_prod) &&
13578                     (rx_idx == (rx_start_idx + num_pkts)))
13579                         break;
13580         }
13581
13582         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13583         dev_kfree_skb(skb);
13584
13585         if (tx_idx != tnapi->tx_prod)
13586                 goto out;
13587
13588         if (rx_idx != rx_start_idx + num_pkts)
13589                 goto out;
13590
13591         val = data_off;
13592         while (rx_idx != rx_start_idx) {
13593                 desc = &rnapi->rx_rcb[rx_start_idx++];
13594                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13595                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13596
13597                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13598                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13599                         goto out;
13600
13601                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13602                          - ETH_FCS_LEN;
13603
13604                 if (!tso_loopback) {
13605                         if (rx_len != tx_len)
13606                                 goto out;
13607
13608                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13609                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13610                                         goto out;
13611                         } else {
13612                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13613                                         goto out;
13614                         }
13615                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13616                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13617                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13618                         goto out;
13619                 }
13620
13621                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13622                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13623                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13624                                              mapping);
13625                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13626                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13627                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13628                                              mapping);
13629                 } else
13630                         goto out;
13631
13632                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13633                                             PCI_DMA_FROMDEVICE);
13634
13635                 rx_data += TG3_RX_OFFSET(tp);
13636                 for (i = data_off; i < rx_len; i++, val++) {
13637                         if (*(rx_data + i) != (u8) (val & 0xff))
13638                                 goto out;
13639                 }
13640         }
13641
13642         err = 0;
13643
13644         /* tg3_free_rings will unmap and free the rx_data */
13645 out:
13646         return err;
13647 }
13648
13649 #define TG3_STD_LOOPBACK_FAILED         1
13650 #define TG3_JMB_LOOPBACK_FAILED         2
13651 #define TG3_TSO_LOOPBACK_FAILED         4
13652 #define TG3_LOOPBACK_FAILED \
13653         (TG3_STD_LOOPBACK_FAILED | \
13654          TG3_JMB_LOOPBACK_FAILED | \
13655          TG3_TSO_LOOPBACK_FAILED)
13656
13657 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13658 {
13659         int err = -EIO;
13660         u32 eee_cap;
13661         u32 jmb_pkt_sz = 9000;
13662
13663         if (tp->dma_limit)
13664                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13665
13666         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13667         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13668
13669         if (!netif_running(tp->dev)) {
13670                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13671                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13672                 if (do_extlpbk)
13673                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13674                 goto done;
13675         }
13676
13677         err = tg3_reset_hw(tp, true);
13678         if (err) {
13679                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13680                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13681                 if (do_extlpbk)
13682                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13683                 goto done;
13684         }
13685
13686         if (tg3_flag(tp, ENABLE_RSS)) {
13687                 int i;
13688
13689                 /* Reroute all rx packets to the 1st queue */
13690                 for (i = MAC_RSS_INDIR_TBL_0;
13691                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13692                         tw32(i, 0x0);
13693         }
13694
13695         /* HW errata - mac loopback fails in some cases on 5780.
13696          * Normal traffic and PHY loopback are not affected by
13697          * errata.  Also, the MAC loopback test is deprecated for
13698          * all newer ASIC revisions.
13699          */
13700         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13701             !tg3_flag(tp, CPMU_PRESENT)) {
13702                 tg3_mac_loopback(tp, true);
13703
13704                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13705                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13706
13707                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13708                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13709                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13710
13711                 tg3_mac_loopback(tp, false);
13712         }
13713
13714         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13715             !tg3_flag(tp, USE_PHYLIB)) {
13716                 int i;
13717
13718                 tg3_phy_lpbk_set(tp, 0, false);
13719
13720                 /* Wait for link */
13721                 for (i = 0; i < 100; i++) {
13722                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13723                                 break;
13724                         mdelay(1);
13725                 }
13726
13727                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13728                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13729                 if (tg3_flag(tp, TSO_CAPABLE) &&
13730                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13731                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13732                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13733                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13734                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13735
13736                 if (do_extlpbk) {
13737                         tg3_phy_lpbk_set(tp, 0, true);
13738
13739                         /* All link indications report up, but the hardware
13740                          * isn't really ready for about 20 msec.  Double it
13741                          * to be sure.
13742                          */
13743                         mdelay(40);
13744
13745                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13746                                 data[TG3_EXT_LOOPB_TEST] |=
13747                                                         TG3_STD_LOOPBACK_FAILED;
13748                         if (tg3_flag(tp, TSO_CAPABLE) &&
13749                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13750                                 data[TG3_EXT_LOOPB_TEST] |=
13751                                                         TG3_TSO_LOOPBACK_FAILED;
13752                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13753                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13754                                 data[TG3_EXT_LOOPB_TEST] |=
13755                                                         TG3_JMB_LOOPBACK_FAILED;
13756                 }
13757
13758                 /* Re-enable gphy autopowerdown. */
13759                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13760                         tg3_phy_toggle_apd(tp, true);
13761         }
13762
13763         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13764                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13765
13766 done:
13767         tp->phy_flags |= eee_cap;
13768
13769         return err;
13770 }
13771
13772 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13773                           u64 *data)
13774 {
13775         struct tg3 *tp = netdev_priv(dev);
13776         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13777
13778         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13779                 if (tg3_power_up(tp)) {
13780                         etest->flags |= ETH_TEST_FL_FAILED;
13781                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13782                         return;
13783                 }
13784                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13785         }
13786
13787         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13788
13789         if (tg3_test_nvram(tp) != 0) {
13790                 etest->flags |= ETH_TEST_FL_FAILED;
13791                 data[TG3_NVRAM_TEST] = 1;
13792         }
13793         if (!doextlpbk && tg3_test_link(tp)) {
13794                 etest->flags |= ETH_TEST_FL_FAILED;
13795                 data[TG3_LINK_TEST] = 1;
13796         }
13797         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13798                 int err, err2 = 0, irq_sync = 0;
13799
13800                 if (netif_running(dev)) {
13801                         tg3_phy_stop(tp);
13802                         tg3_netif_stop(tp);
13803                         irq_sync = 1;
13804                 }
13805
13806                 tg3_full_lock(tp, irq_sync);
13807                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13808                 err = tg3_nvram_lock(tp);
13809                 tg3_halt_cpu(tp, RX_CPU_BASE);
13810                 if (!tg3_flag(tp, 5705_PLUS))
13811                         tg3_halt_cpu(tp, TX_CPU_BASE);
13812                 if (!err)
13813                         tg3_nvram_unlock(tp);
13814
13815                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13816                         tg3_phy_reset(tp);
13817
13818                 if (tg3_test_registers(tp) != 0) {
13819                         etest->flags |= ETH_TEST_FL_FAILED;
13820                         data[TG3_REGISTER_TEST] = 1;
13821                 }
13822
13823                 if (tg3_test_memory(tp) != 0) {
13824                         etest->flags |= ETH_TEST_FL_FAILED;
13825                         data[TG3_MEMORY_TEST] = 1;
13826                 }
13827
13828                 if (doextlpbk)
13829                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13830
13831                 if (tg3_test_loopback(tp, data, doextlpbk))
13832                         etest->flags |= ETH_TEST_FL_FAILED;
13833
13834                 tg3_full_unlock(tp);
13835
13836                 if (tg3_test_interrupt(tp) != 0) {
13837                         etest->flags |= ETH_TEST_FL_FAILED;
13838                         data[TG3_INTERRUPT_TEST] = 1;
13839                 }
13840
13841                 tg3_full_lock(tp, 0);
13842
13843                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13844                 if (netif_running(dev)) {
13845                         tg3_flag_set(tp, INIT_COMPLETE);
13846                         err2 = tg3_restart_hw(tp, true);
13847                         if (!err2)
13848                                 tg3_netif_start(tp);
13849                 }
13850
13851                 tg3_full_unlock(tp);
13852
13853                 if (irq_sync && !err2)
13854                         tg3_phy_start(tp);
13855         }
13856         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13857                 tg3_power_down_prepare(tp);
13858
13859 }
13860
13861 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13862 {
13863         struct tg3 *tp = netdev_priv(dev);
13864         struct hwtstamp_config stmpconf;
13865
13866         if (!tg3_flag(tp, PTP_CAPABLE))
13867                 return -EOPNOTSUPP;
13868
13869         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13870                 return -EFAULT;
13871
13872         if (stmpconf.flags)
13873                 return -EINVAL;
13874
13875         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13876             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13877                 return -ERANGE;
13878
13879         switch (stmpconf.rx_filter) {
13880         case HWTSTAMP_FILTER_NONE:
13881                 tp->rxptpctl = 0;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13885                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13889                                TG3_RX_PTP_CTL_SYNC_EVNT;
13890                 break;
13891         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13893                                TG3_RX_PTP_CTL_DELAY_REQ;
13894                 break;
13895         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13897                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13898                 break;
13899         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13900                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13901                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13902                 break;
13903         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13904                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13905                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13906                 break;
13907         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13908                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13909                                TG3_RX_PTP_CTL_SYNC_EVNT;
13910                 break;
13911         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13912                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13913                                TG3_RX_PTP_CTL_SYNC_EVNT;
13914                 break;
13915         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13916                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13917                                TG3_RX_PTP_CTL_SYNC_EVNT;
13918                 break;
13919         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13920                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13921                                TG3_RX_PTP_CTL_DELAY_REQ;
13922                 break;
13923         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13924                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13925                                TG3_RX_PTP_CTL_DELAY_REQ;
13926                 break;
13927         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13928                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13929                                TG3_RX_PTP_CTL_DELAY_REQ;
13930                 break;
13931         default:
13932                 return -ERANGE;
13933         }
13934
13935         if (netif_running(dev) && tp->rxptpctl)
13936                 tw32(TG3_RX_PTP_CTL,
13937                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13938
13939         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13940                 tg3_flag_set(tp, TX_TSTAMP_EN);
13941         else
13942                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13943
13944         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13945                 -EFAULT : 0;
13946 }
13947
13948 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13949 {
13950         struct tg3 *tp = netdev_priv(dev);
13951         struct hwtstamp_config stmpconf;
13952
13953         if (!tg3_flag(tp, PTP_CAPABLE))
13954                 return -EOPNOTSUPP;
13955
13956         stmpconf.flags = 0;
13957         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13958                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13959
13960         switch (tp->rxptpctl) {
13961         case 0:
13962                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13963                 break;
13964         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13965                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13966                 break;
13967         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13968                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13969                 break;
13970         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13971                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13972                 break;
13973         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13974                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13975                 break;
13976         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13977                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13978                 break;
13979         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13980                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13981                 break;
13982         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13983                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13984                 break;
13985         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13986                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13987                 break;
13988         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13989                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13990                 break;
13991         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13992                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13993                 break;
13994         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13995                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13996                 break;
13997         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13998                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13999                 break;
14000         default:
14001                 WARN_ON_ONCE(1);
14002                 return -ERANGE;
14003         }
14004
14005         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14006                 -EFAULT : 0;
14007 }
14008
14009 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14010 {
14011         struct mii_ioctl_data *data = if_mii(ifr);
14012         struct tg3 *tp = netdev_priv(dev);
14013         int err;
14014
14015         if (tg3_flag(tp, USE_PHYLIB)) {
14016                 struct phy_device *phydev;
14017                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14018                         return -EAGAIN;
14019                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14020                 return phy_mii_ioctl(phydev, ifr, cmd);
14021         }
14022
14023         switch (cmd) {
14024         case SIOCGMIIPHY:
14025                 data->phy_id = tp->phy_addr;
14026
14027                 /* fallthru */
14028         case SIOCGMIIREG: {
14029                 u32 mii_regval;
14030
14031                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14032                         break;                  /* We have no PHY */
14033
14034                 if (!netif_running(dev))
14035                         return -EAGAIN;
14036
14037                 spin_lock_bh(&tp->lock);
14038                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14039                                     data->reg_num & 0x1f, &mii_regval);
14040                 spin_unlock_bh(&tp->lock);
14041
14042                 data->val_out = mii_regval;
14043
14044                 return err;
14045         }
14046
14047         case SIOCSMIIREG:
14048                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14049                         break;                  /* We have no PHY */
14050
14051                 if (!netif_running(dev))
14052                         return -EAGAIN;
14053
14054                 spin_lock_bh(&tp->lock);
14055                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14056                                      data->reg_num & 0x1f, data->val_in);
14057                 spin_unlock_bh(&tp->lock);
14058
14059                 return err;
14060
14061         case SIOCSHWTSTAMP:
14062                 return tg3_hwtstamp_set(dev, ifr);
14063
14064         case SIOCGHWTSTAMP:
14065                 return tg3_hwtstamp_get(dev, ifr);
14066
14067         default:
14068                 /* do nothing */
14069                 break;
14070         }
14071         return -EOPNOTSUPP;
14072 }
14073
14074 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14075 {
14076         struct tg3 *tp = netdev_priv(dev);
14077
14078         memcpy(ec, &tp->coal, sizeof(*ec));
14079         return 0;
14080 }
14081
14082 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14083 {
14084         struct tg3 *tp = netdev_priv(dev);
14085         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14086         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14087
14088         if (!tg3_flag(tp, 5705_PLUS)) {
14089                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14090                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14091                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14092                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14093         }
14094
14095         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14096             (!ec->rx_coalesce_usecs) ||
14097             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14098             (!ec->tx_coalesce_usecs) ||
14099             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14100             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14101             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14102             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14103             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14104             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14105             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14106             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14107                 return -EINVAL;
14108
14109         /* Only copy relevant parameters, ignore all others. */
14110         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14111         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14112         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14113         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14114         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14115         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14116         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14117         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14118         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14119
14120         if (netif_running(dev)) {
14121                 tg3_full_lock(tp, 0);
14122                 __tg3_set_coalesce(tp, &tp->coal);
14123                 tg3_full_unlock(tp);
14124         }
14125         return 0;
14126 }
14127
14128 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14129 {
14130         struct tg3 *tp = netdev_priv(dev);
14131
14132         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14133                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14134                 return -EOPNOTSUPP;
14135         }
14136
14137         if (edata->advertised != tp->eee.advertised) {
14138                 netdev_warn(tp->dev,
14139                             "Direct manipulation of EEE advertisement is not supported\n");
14140                 return -EINVAL;
14141         }
14142
14143         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14144                 netdev_warn(tp->dev,
14145                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14146                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14147                 return -EINVAL;
14148         }
14149
14150         tp->eee = *edata;
14151
14152         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14153         tg3_warn_mgmt_link_flap(tp);
14154
14155         if (netif_running(tp->dev)) {
14156                 tg3_full_lock(tp, 0);
14157                 tg3_setup_eee(tp);
14158                 tg3_phy_reset(tp);
14159                 tg3_full_unlock(tp);
14160         }
14161
14162         return 0;
14163 }
14164
14165 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14166 {
14167         struct tg3 *tp = netdev_priv(dev);
14168
14169         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14170                 netdev_warn(tp->dev,
14171                             "Board does not support EEE!\n");
14172                 return -EOPNOTSUPP;
14173         }
14174
14175         *edata = tp->eee;
14176         return 0;
14177 }
14178
14179 static const struct ethtool_ops tg3_ethtool_ops = {
14180         .get_drvinfo            = tg3_get_drvinfo,
14181         .get_regs_len           = tg3_get_regs_len,
14182         .get_regs               = tg3_get_regs,
14183         .get_wol                = tg3_get_wol,
14184         .set_wol                = tg3_set_wol,
14185         .get_msglevel           = tg3_get_msglevel,
14186         .set_msglevel           = tg3_set_msglevel,
14187         .nway_reset             = tg3_nway_reset,
14188         .get_link               = ethtool_op_get_link,
14189         .get_eeprom_len         = tg3_get_eeprom_len,
14190         .get_eeprom             = tg3_get_eeprom,
14191         .set_eeprom             = tg3_set_eeprom,
14192         .get_ringparam          = tg3_get_ringparam,
14193         .set_ringparam          = tg3_set_ringparam,
14194         .get_pauseparam         = tg3_get_pauseparam,
14195         .set_pauseparam         = tg3_set_pauseparam,
14196         .self_test              = tg3_self_test,
14197         .get_strings            = tg3_get_strings,
14198         .set_phys_id            = tg3_set_phys_id,
14199         .get_ethtool_stats      = tg3_get_ethtool_stats,
14200         .get_coalesce           = tg3_get_coalesce,
14201         .set_coalesce           = tg3_set_coalesce,
14202         .get_sset_count         = tg3_get_sset_count,
14203         .get_rxnfc              = tg3_get_rxnfc,
14204         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14205         .get_rxfh               = tg3_get_rxfh,
14206         .set_rxfh               = tg3_set_rxfh,
14207         .get_channels           = tg3_get_channels,
14208         .set_channels           = tg3_set_channels,
14209         .get_ts_info            = tg3_get_ts_info,
14210         .get_eee                = tg3_get_eee,
14211         .set_eee                = tg3_set_eee,
14212         .get_link_ksettings     = tg3_get_link_ksettings,
14213         .set_link_ksettings     = tg3_set_link_ksettings,
14214 };
14215
14216 static void tg3_get_stats64(struct net_device *dev,
14217                             struct rtnl_link_stats64 *stats)
14218 {
14219         struct tg3 *tp = netdev_priv(dev);
14220
14221         spin_lock_bh(&tp->lock);
14222         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14223                 *stats = tp->net_stats_prev;
14224                 spin_unlock_bh(&tp->lock);
14225                 return;
14226         }
14227
14228         tg3_get_nstats(tp, stats);
14229         spin_unlock_bh(&tp->lock);
14230 }
14231
14232 static void tg3_set_rx_mode(struct net_device *dev)
14233 {
14234         struct tg3 *tp = netdev_priv(dev);
14235
14236         if (!netif_running(dev))
14237                 return;
14238
14239         tg3_full_lock(tp, 0);
14240         __tg3_set_rx_mode(dev);
14241         tg3_full_unlock(tp);
14242 }
14243
14244 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14245                                int new_mtu)
14246 {
14247         dev->mtu = new_mtu;
14248
14249         if (new_mtu > ETH_DATA_LEN) {
14250                 if (tg3_flag(tp, 5780_CLASS)) {
14251                         netdev_update_features(dev);
14252                         tg3_flag_clear(tp, TSO_CAPABLE);
14253                 } else {
14254                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14255                 }
14256         } else {
14257                 if (tg3_flag(tp, 5780_CLASS)) {
14258                         tg3_flag_set(tp, TSO_CAPABLE);
14259                         netdev_update_features(dev);
14260                 }
14261                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14262         }
14263 }
14264
14265 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14266 {
14267         struct tg3 *tp = netdev_priv(dev);
14268         int err;
14269         bool reset_phy = false;
14270
14271         if (!netif_running(dev)) {
14272                 /* We'll just catch it later when the
14273                  * device is up'd.
14274                  */
14275                 tg3_set_mtu(dev, tp, new_mtu);
14276                 return 0;
14277         }
14278
14279         tg3_phy_stop(tp);
14280
14281         tg3_netif_stop(tp);
14282
14283         tg3_set_mtu(dev, tp, new_mtu);
14284
14285         tg3_full_lock(tp, 1);
14286
14287         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14288
14289         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14290          * breaks all requests to 256 bytes.
14291          */
14292         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14293             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14294             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14295             tg3_asic_rev(tp) == ASIC_REV_5720)
14296                 reset_phy = true;
14297
14298         err = tg3_restart_hw(tp, reset_phy);
14299
14300         if (!err)
14301                 tg3_netif_start(tp);
14302
14303         tg3_full_unlock(tp);
14304
14305         if (!err)
14306                 tg3_phy_start(tp);
14307
14308         return err;
14309 }
14310
14311 static const struct net_device_ops tg3_netdev_ops = {
14312         .ndo_open               = tg3_open,
14313         .ndo_stop               = tg3_close,
14314         .ndo_start_xmit         = tg3_start_xmit,
14315         .ndo_get_stats64        = tg3_get_stats64,
14316         .ndo_validate_addr      = eth_validate_addr,
14317         .ndo_set_rx_mode        = tg3_set_rx_mode,
14318         .ndo_set_mac_address    = tg3_set_mac_addr,
14319         .ndo_do_ioctl           = tg3_ioctl,
14320         .ndo_tx_timeout         = tg3_tx_timeout,
14321         .ndo_change_mtu         = tg3_change_mtu,
14322         .ndo_fix_features       = tg3_fix_features,
14323         .ndo_set_features       = tg3_set_features,
14324 #ifdef CONFIG_NET_POLL_CONTROLLER
14325         .ndo_poll_controller    = tg3_poll_controller,
14326 #endif
14327 };
14328
14329 static void tg3_get_eeprom_size(struct tg3 *tp)
14330 {
14331         u32 cursize, val, magic;
14332
14333         tp->nvram_size = EEPROM_CHIP_SIZE;
14334
14335         if (tg3_nvram_read(tp, 0, &magic) != 0)
14336                 return;
14337
14338         if ((magic != TG3_EEPROM_MAGIC) &&
14339             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14340             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14341                 return;
14342
14343         /*
14344          * Size the chip by reading offsets at increasing powers of two.
14345          * When we encounter our validation signature, we know the addressing
14346          * has wrapped around, and thus have our chip size.
14347          */
14348         cursize = 0x10;
14349
14350         while (cursize < tp->nvram_size) {
14351                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14352                         return;
14353
14354                 if (val == magic)
14355                         break;
14356
14357                 cursize <<= 1;
14358         }
14359
14360         tp->nvram_size = cursize;
14361 }
14362
14363 static void tg3_get_nvram_size(struct tg3 *tp)
14364 {
14365         u32 val;
14366
14367         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14368                 return;
14369
14370         /* Selfboot format */
14371         if (val != TG3_EEPROM_MAGIC) {
14372                 tg3_get_eeprom_size(tp);
14373                 return;
14374         }
14375
14376         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14377                 if (val != 0) {
14378                         /* This is confusing.  We want to operate on the
14379                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14380                          * call will read from NVRAM and byteswap the data
14381                          * according to the byteswapping settings for all
14382                          * other register accesses.  This ensures the data we
14383                          * want will always reside in the lower 16-bits.
14384                          * However, the data in NVRAM is in LE format, which
14385                          * means the data from the NVRAM read will always be
14386                          * opposite the endianness of the CPU.  The 16-bit
14387                          * byteswap then brings the data to CPU endianness.
14388                          */
14389                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14390                         return;
14391                 }
14392         }
14393         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14394 }
14395
14396 static void tg3_get_nvram_info(struct tg3 *tp)
14397 {
14398         u32 nvcfg1;
14399
14400         nvcfg1 = tr32(NVRAM_CFG1);
14401         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14402                 tg3_flag_set(tp, FLASH);
14403         } else {
14404                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14405                 tw32(NVRAM_CFG1, nvcfg1);
14406         }
14407
14408         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14409             tg3_flag(tp, 5780_CLASS)) {
14410                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14411                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14412                         tp->nvram_jedecnum = JEDEC_ATMEL;
14413                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14414                         tg3_flag_set(tp, NVRAM_BUFFERED);
14415                         break;
14416                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14417                         tp->nvram_jedecnum = JEDEC_ATMEL;
14418                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14419                         break;
14420                 case FLASH_VENDOR_ATMEL_EEPROM:
14421                         tp->nvram_jedecnum = JEDEC_ATMEL;
14422                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14423                         tg3_flag_set(tp, NVRAM_BUFFERED);
14424                         break;
14425                 case FLASH_VENDOR_ST:
14426                         tp->nvram_jedecnum = JEDEC_ST;
14427                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14428                         tg3_flag_set(tp, NVRAM_BUFFERED);
14429                         break;
14430                 case FLASH_VENDOR_SAIFUN:
14431                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14432                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14433                         break;
14434                 case FLASH_VENDOR_SST_SMALL:
14435                 case FLASH_VENDOR_SST_LARGE:
14436                         tp->nvram_jedecnum = JEDEC_SST;
14437                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14438                         break;
14439                 }
14440         } else {
14441                 tp->nvram_jedecnum = JEDEC_ATMEL;
14442                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14443                 tg3_flag_set(tp, NVRAM_BUFFERED);
14444         }
14445 }
14446
14447 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14448 {
14449         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14450         case FLASH_5752PAGE_SIZE_256:
14451                 tp->nvram_pagesize = 256;
14452                 break;
14453         case FLASH_5752PAGE_SIZE_512:
14454                 tp->nvram_pagesize = 512;
14455                 break;
14456         case FLASH_5752PAGE_SIZE_1K:
14457                 tp->nvram_pagesize = 1024;
14458                 break;
14459         case FLASH_5752PAGE_SIZE_2K:
14460                 tp->nvram_pagesize = 2048;
14461                 break;
14462         case FLASH_5752PAGE_SIZE_4K:
14463                 tp->nvram_pagesize = 4096;
14464                 break;
14465         case FLASH_5752PAGE_SIZE_264:
14466                 tp->nvram_pagesize = 264;
14467                 break;
14468         case FLASH_5752PAGE_SIZE_528:
14469                 tp->nvram_pagesize = 528;
14470                 break;
14471         }
14472 }
14473
14474 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14475 {
14476         u32 nvcfg1;
14477
14478         nvcfg1 = tr32(NVRAM_CFG1);
14479
14480         /* NVRAM protection for TPM */
14481         if (nvcfg1 & (1 << 27))
14482                 tg3_flag_set(tp, PROTECTED_NVRAM);
14483
14484         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14485         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14486         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14487                 tp->nvram_jedecnum = JEDEC_ATMEL;
14488                 tg3_flag_set(tp, NVRAM_BUFFERED);
14489                 break;
14490         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14491                 tp->nvram_jedecnum = JEDEC_ATMEL;
14492                 tg3_flag_set(tp, NVRAM_BUFFERED);
14493                 tg3_flag_set(tp, FLASH);
14494                 break;
14495         case FLASH_5752VENDOR_ST_M45PE10:
14496         case FLASH_5752VENDOR_ST_M45PE20:
14497         case FLASH_5752VENDOR_ST_M45PE40:
14498                 tp->nvram_jedecnum = JEDEC_ST;
14499                 tg3_flag_set(tp, NVRAM_BUFFERED);
14500                 tg3_flag_set(tp, FLASH);
14501                 break;
14502         }
14503
14504         if (tg3_flag(tp, FLASH)) {
14505                 tg3_nvram_get_pagesize(tp, nvcfg1);
14506         } else {
14507                 /* For eeprom, set pagesize to maximum eeprom size */
14508                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14509
14510                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14511                 tw32(NVRAM_CFG1, nvcfg1);
14512         }
14513 }
14514
14515 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14516 {
14517         u32 nvcfg1, protect = 0;
14518
14519         nvcfg1 = tr32(NVRAM_CFG1);
14520
14521         /* NVRAM protection for TPM */
14522         if (nvcfg1 & (1 << 27)) {
14523                 tg3_flag_set(tp, PROTECTED_NVRAM);
14524                 protect = 1;
14525         }
14526
14527         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14528         switch (nvcfg1) {
14529         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14530         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14531         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14532         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14533                 tp->nvram_jedecnum = JEDEC_ATMEL;
14534                 tg3_flag_set(tp, NVRAM_BUFFERED);
14535                 tg3_flag_set(tp, FLASH);
14536                 tp->nvram_pagesize = 264;
14537                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14538                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14539                         tp->nvram_size = (protect ? 0x3e200 :
14540                                           TG3_NVRAM_SIZE_512KB);
14541                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14542                         tp->nvram_size = (protect ? 0x1f200 :
14543                                           TG3_NVRAM_SIZE_256KB);
14544                 else
14545                         tp->nvram_size = (protect ? 0x1f200 :
14546                                           TG3_NVRAM_SIZE_128KB);
14547                 break;
14548         case FLASH_5752VENDOR_ST_M45PE10:
14549         case FLASH_5752VENDOR_ST_M45PE20:
14550         case FLASH_5752VENDOR_ST_M45PE40:
14551                 tp->nvram_jedecnum = JEDEC_ST;
14552                 tg3_flag_set(tp, NVRAM_BUFFERED);
14553                 tg3_flag_set(tp, FLASH);
14554                 tp->nvram_pagesize = 256;
14555                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14556                         tp->nvram_size = (protect ?
14557                                           TG3_NVRAM_SIZE_64KB :
14558                                           TG3_NVRAM_SIZE_128KB);
14559                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14560                         tp->nvram_size = (protect ?
14561                                           TG3_NVRAM_SIZE_64KB :
14562                                           TG3_NVRAM_SIZE_256KB);
14563                 else
14564                         tp->nvram_size = (protect ?
14565                                           TG3_NVRAM_SIZE_128KB :
14566                                           TG3_NVRAM_SIZE_512KB);
14567                 break;
14568         }
14569 }
14570
14571 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14572 {
14573         u32 nvcfg1;
14574
14575         nvcfg1 = tr32(NVRAM_CFG1);
14576
14577         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14578         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14579         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14580         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14581         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14582                 tp->nvram_jedecnum = JEDEC_ATMEL;
14583                 tg3_flag_set(tp, NVRAM_BUFFERED);
14584                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14585
14586                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14587                 tw32(NVRAM_CFG1, nvcfg1);
14588                 break;
14589         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14590         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14591         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14592         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14593                 tp->nvram_jedecnum = JEDEC_ATMEL;
14594                 tg3_flag_set(tp, NVRAM_BUFFERED);
14595                 tg3_flag_set(tp, FLASH);
14596                 tp->nvram_pagesize = 264;
14597                 break;
14598         case FLASH_5752VENDOR_ST_M45PE10:
14599         case FLASH_5752VENDOR_ST_M45PE20:
14600         case FLASH_5752VENDOR_ST_M45PE40:
14601                 tp->nvram_jedecnum = JEDEC_ST;
14602                 tg3_flag_set(tp, NVRAM_BUFFERED);
14603                 tg3_flag_set(tp, FLASH);
14604                 tp->nvram_pagesize = 256;
14605                 break;
14606         }
14607 }
14608
14609 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14610 {
14611         u32 nvcfg1, protect = 0;
14612
14613         nvcfg1 = tr32(NVRAM_CFG1);
14614
14615         /* NVRAM protection for TPM */
14616         if (nvcfg1 & (1 << 27)) {
14617                 tg3_flag_set(tp, PROTECTED_NVRAM);
14618                 protect = 1;
14619         }
14620
14621         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14622         switch (nvcfg1) {
14623         case FLASH_5761VENDOR_ATMEL_ADB021D:
14624         case FLASH_5761VENDOR_ATMEL_ADB041D:
14625         case FLASH_5761VENDOR_ATMEL_ADB081D:
14626         case FLASH_5761VENDOR_ATMEL_ADB161D:
14627         case FLASH_5761VENDOR_ATMEL_MDB021D:
14628         case FLASH_5761VENDOR_ATMEL_MDB041D:
14629         case FLASH_5761VENDOR_ATMEL_MDB081D:
14630         case FLASH_5761VENDOR_ATMEL_MDB161D:
14631                 tp->nvram_jedecnum = JEDEC_ATMEL;
14632                 tg3_flag_set(tp, NVRAM_BUFFERED);
14633                 tg3_flag_set(tp, FLASH);
14634                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14635                 tp->nvram_pagesize = 256;
14636                 break;
14637         case FLASH_5761VENDOR_ST_A_M45PE20:
14638         case FLASH_5761VENDOR_ST_A_M45PE40:
14639         case FLASH_5761VENDOR_ST_A_M45PE80:
14640         case FLASH_5761VENDOR_ST_A_M45PE16:
14641         case FLASH_5761VENDOR_ST_M_M45PE20:
14642         case FLASH_5761VENDOR_ST_M_M45PE40:
14643         case FLASH_5761VENDOR_ST_M_M45PE80:
14644         case FLASH_5761VENDOR_ST_M_M45PE16:
14645                 tp->nvram_jedecnum = JEDEC_ST;
14646                 tg3_flag_set(tp, NVRAM_BUFFERED);
14647                 tg3_flag_set(tp, FLASH);
14648                 tp->nvram_pagesize = 256;
14649                 break;
14650         }
14651
14652         if (protect) {
14653                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14654         } else {
14655                 switch (nvcfg1) {
14656                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14657                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14658                 case FLASH_5761VENDOR_ST_A_M45PE16:
14659                 case FLASH_5761VENDOR_ST_M_M45PE16:
14660                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14661                         break;
14662                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14663                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14664                 case FLASH_5761VENDOR_ST_A_M45PE80:
14665                 case FLASH_5761VENDOR_ST_M_M45PE80:
14666                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14667                         break;
14668                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14669                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14670                 case FLASH_5761VENDOR_ST_A_M45PE40:
14671                 case FLASH_5761VENDOR_ST_M_M45PE40:
14672                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14673                         break;
14674                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14675                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14676                 case FLASH_5761VENDOR_ST_A_M45PE20:
14677                 case FLASH_5761VENDOR_ST_M_M45PE20:
14678                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14679                         break;
14680                 }
14681         }
14682 }
14683
14684 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14685 {
14686         tp->nvram_jedecnum = JEDEC_ATMEL;
14687         tg3_flag_set(tp, NVRAM_BUFFERED);
14688         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14689 }
14690
14691 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14692 {
14693         u32 nvcfg1;
14694
14695         nvcfg1 = tr32(NVRAM_CFG1);
14696
14697         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14698         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14699         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14700                 tp->nvram_jedecnum = JEDEC_ATMEL;
14701                 tg3_flag_set(tp, NVRAM_BUFFERED);
14702                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14703
14704                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14705                 tw32(NVRAM_CFG1, nvcfg1);
14706                 return;
14707         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14708         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14709         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14710         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14711         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14712         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14713         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14714                 tp->nvram_jedecnum = JEDEC_ATMEL;
14715                 tg3_flag_set(tp, NVRAM_BUFFERED);
14716                 tg3_flag_set(tp, FLASH);
14717
14718                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14720                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14721                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14722                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14723                         break;
14724                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14725                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14726                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14727                         break;
14728                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14729                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14731                         break;
14732                 }
14733                 break;
14734         case FLASH_5752VENDOR_ST_M45PE10:
14735         case FLASH_5752VENDOR_ST_M45PE20:
14736         case FLASH_5752VENDOR_ST_M45PE40:
14737                 tp->nvram_jedecnum = JEDEC_ST;
14738                 tg3_flag_set(tp, NVRAM_BUFFERED);
14739                 tg3_flag_set(tp, FLASH);
14740
14741                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14742                 case FLASH_5752VENDOR_ST_M45PE10:
14743                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14744                         break;
14745                 case FLASH_5752VENDOR_ST_M45PE20:
14746                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14747                         break;
14748                 case FLASH_5752VENDOR_ST_M45PE40:
14749                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14750                         break;
14751                 }
14752                 break;
14753         default:
14754                 tg3_flag_set(tp, NO_NVRAM);
14755                 return;
14756         }
14757
14758         tg3_nvram_get_pagesize(tp, nvcfg1);
14759         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14760                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14761 }
14762
14763
14764 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14765 {
14766         u32 nvcfg1;
14767
14768         nvcfg1 = tr32(NVRAM_CFG1);
14769
14770         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14771         case FLASH_5717VENDOR_ATMEL_EEPROM:
14772         case FLASH_5717VENDOR_MICRO_EEPROM:
14773                 tp->nvram_jedecnum = JEDEC_ATMEL;
14774                 tg3_flag_set(tp, NVRAM_BUFFERED);
14775                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14776
14777                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14778                 tw32(NVRAM_CFG1, nvcfg1);
14779                 return;
14780         case FLASH_5717VENDOR_ATMEL_MDB011D:
14781         case FLASH_5717VENDOR_ATMEL_ADB011B:
14782         case FLASH_5717VENDOR_ATMEL_ADB011D:
14783         case FLASH_5717VENDOR_ATMEL_MDB021D:
14784         case FLASH_5717VENDOR_ATMEL_ADB021B:
14785         case FLASH_5717VENDOR_ATMEL_ADB021D:
14786         case FLASH_5717VENDOR_ATMEL_45USPT:
14787                 tp->nvram_jedecnum = JEDEC_ATMEL;
14788                 tg3_flag_set(tp, NVRAM_BUFFERED);
14789                 tg3_flag_set(tp, FLASH);
14790
14791                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14792                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14793                         /* Detect size with tg3_nvram_get_size() */
14794                         break;
14795                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14796                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14797                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14798                         break;
14799                 default:
14800                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14801                         break;
14802                 }
14803                 break;
14804         case FLASH_5717VENDOR_ST_M_M25PE10:
14805         case FLASH_5717VENDOR_ST_A_M25PE10:
14806         case FLASH_5717VENDOR_ST_M_M45PE10:
14807         case FLASH_5717VENDOR_ST_A_M45PE10:
14808         case FLASH_5717VENDOR_ST_M_M25PE20:
14809         case FLASH_5717VENDOR_ST_A_M25PE20:
14810         case FLASH_5717VENDOR_ST_M_M45PE20:
14811         case FLASH_5717VENDOR_ST_A_M45PE20:
14812         case FLASH_5717VENDOR_ST_25USPT:
14813         case FLASH_5717VENDOR_ST_45USPT:
14814                 tp->nvram_jedecnum = JEDEC_ST;
14815                 tg3_flag_set(tp, NVRAM_BUFFERED);
14816                 tg3_flag_set(tp, FLASH);
14817
14818                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14819                 case FLASH_5717VENDOR_ST_M_M25PE20:
14820                 case FLASH_5717VENDOR_ST_M_M45PE20:
14821                         /* Detect size with tg3_nvram_get_size() */
14822                         break;
14823                 case FLASH_5717VENDOR_ST_A_M25PE20:
14824                 case FLASH_5717VENDOR_ST_A_M45PE20:
14825                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14826                         break;
14827                 default:
14828                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14829                         break;
14830                 }
14831                 break;
14832         default:
14833                 tg3_flag_set(tp, NO_NVRAM);
14834                 return;
14835         }
14836
14837         tg3_nvram_get_pagesize(tp, nvcfg1);
14838         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14839                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14840 }
14841
14842 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14843 {
14844         u32 nvcfg1, nvmpinstrp, nv_status;
14845
14846         nvcfg1 = tr32(NVRAM_CFG1);
14847         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14848
14849         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14850                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14851                         tg3_flag_set(tp, NO_NVRAM);
14852                         return;
14853                 }
14854
14855                 switch (nvmpinstrp) {
14856                 case FLASH_5762_MX25L_100:
14857                 case FLASH_5762_MX25L_200:
14858                 case FLASH_5762_MX25L_400:
14859                 case FLASH_5762_MX25L_800:
14860                 case FLASH_5762_MX25L_160_320:
14861                         tp->nvram_pagesize = 4096;
14862                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14863                         tg3_flag_set(tp, NVRAM_BUFFERED);
14864                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14865                         tg3_flag_set(tp, FLASH);
14866                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14867                         tp->nvram_size =
14868                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14869                                                 AUTOSENSE_DEVID_MASK)
14870                                         << AUTOSENSE_SIZE_IN_MB);
14871                         return;
14872
14873                 case FLASH_5762_EEPROM_HD:
14874                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14875                         break;
14876                 case FLASH_5762_EEPROM_LD:
14877                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14878                         break;
14879                 case FLASH_5720VENDOR_M_ST_M45PE20:
14880                         /* This pinstrap supports multiple sizes, so force it
14881                          * to read the actual size from location 0xf0.
14882                          */
14883                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14884                         break;
14885                 }
14886         }
14887
14888         switch (nvmpinstrp) {
14889         case FLASH_5720_EEPROM_HD:
14890         case FLASH_5720_EEPROM_LD:
14891                 tp->nvram_jedecnum = JEDEC_ATMEL;
14892                 tg3_flag_set(tp, NVRAM_BUFFERED);
14893
14894                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14895                 tw32(NVRAM_CFG1, nvcfg1);
14896                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14897                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14898                 else
14899                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14900                 return;
14901         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14902         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14903         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14904         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14905         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14906         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14907         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14908         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14909         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14910         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14911         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14912         case FLASH_5720VENDOR_ATMEL_45USPT:
14913                 tp->nvram_jedecnum = JEDEC_ATMEL;
14914                 tg3_flag_set(tp, NVRAM_BUFFERED);
14915                 tg3_flag_set(tp, FLASH);
14916
14917                 switch (nvmpinstrp) {
14918                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14919                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14920                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14921                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14922                         break;
14923                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14924                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14925                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14926                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14927                         break;
14928                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14929                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14930                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14931                         break;
14932                 default:
14933                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14934                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14935                         break;
14936                 }
14937                 break;
14938         case FLASH_5720VENDOR_M_ST_M25PE10:
14939         case FLASH_5720VENDOR_M_ST_M45PE10:
14940         case FLASH_5720VENDOR_A_ST_M25PE10:
14941         case FLASH_5720VENDOR_A_ST_M45PE10:
14942         case FLASH_5720VENDOR_M_ST_M25PE20:
14943         case FLASH_5720VENDOR_M_ST_M45PE20:
14944         case FLASH_5720VENDOR_A_ST_M25PE20:
14945         case FLASH_5720VENDOR_A_ST_M45PE20:
14946         case FLASH_5720VENDOR_M_ST_M25PE40:
14947         case FLASH_5720VENDOR_M_ST_M45PE40:
14948         case FLASH_5720VENDOR_A_ST_M25PE40:
14949         case FLASH_5720VENDOR_A_ST_M45PE40:
14950         case FLASH_5720VENDOR_M_ST_M25PE80:
14951         case FLASH_5720VENDOR_M_ST_M45PE80:
14952         case FLASH_5720VENDOR_A_ST_M25PE80:
14953         case FLASH_5720VENDOR_A_ST_M45PE80:
14954         case FLASH_5720VENDOR_ST_25USPT:
14955         case FLASH_5720VENDOR_ST_45USPT:
14956                 tp->nvram_jedecnum = JEDEC_ST;
14957                 tg3_flag_set(tp, NVRAM_BUFFERED);
14958                 tg3_flag_set(tp, FLASH);
14959
14960                 switch (nvmpinstrp) {
14961                 case FLASH_5720VENDOR_M_ST_M25PE20:
14962                 case FLASH_5720VENDOR_M_ST_M45PE20:
14963                 case FLASH_5720VENDOR_A_ST_M25PE20:
14964                 case FLASH_5720VENDOR_A_ST_M45PE20:
14965                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14966                         break;
14967                 case FLASH_5720VENDOR_M_ST_M25PE40:
14968                 case FLASH_5720VENDOR_M_ST_M45PE40:
14969                 case FLASH_5720VENDOR_A_ST_M25PE40:
14970                 case FLASH_5720VENDOR_A_ST_M45PE40:
14971                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14972                         break;
14973                 case FLASH_5720VENDOR_M_ST_M25PE80:
14974                 case FLASH_5720VENDOR_M_ST_M45PE80:
14975                 case FLASH_5720VENDOR_A_ST_M25PE80:
14976                 case FLASH_5720VENDOR_A_ST_M45PE80:
14977                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14978                         break;
14979                 default:
14980                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14981                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14982                         break;
14983                 }
14984                 break;
14985         default:
14986                 tg3_flag_set(tp, NO_NVRAM);
14987                 return;
14988         }
14989
14990         tg3_nvram_get_pagesize(tp, nvcfg1);
14991         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14992                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14993
14994         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14995                 u32 val;
14996
14997                 if (tg3_nvram_read(tp, 0, &val))
14998                         return;
14999
15000                 if (val != TG3_EEPROM_MAGIC &&
15001                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15002                         tg3_flag_set(tp, NO_NVRAM);
15003         }
15004 }
15005
15006 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15007 static void tg3_nvram_init(struct tg3 *tp)
15008 {
15009         if (tg3_flag(tp, IS_SSB_CORE)) {
15010                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15011                 tg3_flag_clear(tp, NVRAM);
15012                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15013                 tg3_flag_set(tp, NO_NVRAM);
15014                 return;
15015         }
15016
15017         tw32_f(GRC_EEPROM_ADDR,
15018              (EEPROM_ADDR_FSM_RESET |
15019               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15020                EEPROM_ADDR_CLKPERD_SHIFT)));
15021
15022         msleep(1);
15023
15024         /* Enable seeprom accesses. */
15025         tw32_f(GRC_LOCAL_CTRL,
15026              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15027         udelay(100);
15028
15029         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15030             tg3_asic_rev(tp) != ASIC_REV_5701) {
15031                 tg3_flag_set(tp, NVRAM);
15032
15033                 if (tg3_nvram_lock(tp)) {
15034                         netdev_warn(tp->dev,
15035                                     "Cannot get nvram lock, %s failed\n",
15036                                     __func__);
15037                         return;
15038                 }
15039                 tg3_enable_nvram_access(tp);
15040
15041                 tp->nvram_size = 0;
15042
15043                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15044                         tg3_get_5752_nvram_info(tp);
15045                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15046                         tg3_get_5755_nvram_info(tp);
15047                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15048                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15049                          tg3_asic_rev(tp) == ASIC_REV_5785)
15050                         tg3_get_5787_nvram_info(tp);
15051                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15052                         tg3_get_5761_nvram_info(tp);
15053                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15054                         tg3_get_5906_nvram_info(tp);
15055                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15056                          tg3_flag(tp, 57765_CLASS))
15057                         tg3_get_57780_nvram_info(tp);
15058                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15059                          tg3_asic_rev(tp) == ASIC_REV_5719)
15060                         tg3_get_5717_nvram_info(tp);
15061                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15062                          tg3_asic_rev(tp) == ASIC_REV_5762)
15063                         tg3_get_5720_nvram_info(tp);
15064                 else
15065                         tg3_get_nvram_info(tp);
15066
15067                 if (tp->nvram_size == 0)
15068                         tg3_get_nvram_size(tp);
15069
15070                 tg3_disable_nvram_access(tp);
15071                 tg3_nvram_unlock(tp);
15072
15073         } else {
15074                 tg3_flag_clear(tp, NVRAM);
15075                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15076
15077                 tg3_get_eeprom_size(tp);
15078         }
15079 }
15080
15081 struct subsys_tbl_ent {
15082         u16 subsys_vendor, subsys_devid;
15083         u32 phy_id;
15084 };
15085
15086 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15087         /* Broadcom boards. */
15088         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15090         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15091           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15092         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15093           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15094         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15095           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15096         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15097           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15098         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15099           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15100         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15101           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15102         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15103           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15104         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15105           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15106         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15107           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15108         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15109           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15110
15111         /* 3com boards. */
15112         { TG3PCI_SUBVENDOR_ID_3COM,
15113           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15114         { TG3PCI_SUBVENDOR_ID_3COM,
15115           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15116         { TG3PCI_SUBVENDOR_ID_3COM,
15117           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15118         { TG3PCI_SUBVENDOR_ID_3COM,
15119           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15120         { TG3PCI_SUBVENDOR_ID_3COM,
15121           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15122
15123         /* DELL boards. */
15124         { TG3PCI_SUBVENDOR_ID_DELL,
15125           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15126         { TG3PCI_SUBVENDOR_ID_DELL,
15127           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15128         { TG3PCI_SUBVENDOR_ID_DELL,
15129           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15130         { TG3PCI_SUBVENDOR_ID_DELL,
15131           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15132
15133         /* Compaq boards. */
15134         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15135           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15136         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15137           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15138         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15139           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15140         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15141           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15142         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15143           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15144
15145         /* IBM boards. */
15146         { TG3PCI_SUBVENDOR_ID_IBM,
15147           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15148 };
15149
15150 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15151 {
15152         int i;
15153
15154         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15155                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15156                      tp->pdev->subsystem_vendor) &&
15157                     (subsys_id_to_phy_id[i].subsys_devid ==
15158                      tp->pdev->subsystem_device))
15159                         return &subsys_id_to_phy_id[i];
15160         }
15161         return NULL;
15162 }
15163
15164 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15165 {
15166         u32 val;
15167
15168         tp->phy_id = TG3_PHY_ID_INVALID;
15169         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15170
15171         /* Assume an onboard device and WOL capable by default.  */
15172         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15173         tg3_flag_set(tp, WOL_CAP);
15174
15175         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15176                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15177                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15178                         tg3_flag_set(tp, IS_NIC);
15179                 }
15180                 val = tr32(VCPU_CFGSHDW);
15181                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15182                         tg3_flag_set(tp, ASPM_WORKAROUND);
15183                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15184                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15185                         tg3_flag_set(tp, WOL_ENABLE);
15186                         device_set_wakeup_enable(&tp->pdev->dev, true);
15187                 }
15188                 goto done;
15189         }
15190
15191         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15192         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15193                 u32 nic_cfg, led_cfg;
15194                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15195                 u32 nic_phy_id, ver, eeprom_phy_id;
15196                 int eeprom_phy_serdes = 0;
15197
15198                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15199                 tp->nic_sram_data_cfg = nic_cfg;
15200
15201                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15202                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15203                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15204                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15205                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15206                     (ver > 0) && (ver < 0x100))
15207                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15208
15209                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15210                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15211
15212                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15213                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15214                     tg3_asic_rev(tp) == ASIC_REV_5720)
15215                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15216
15217                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15218                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15219                         eeprom_phy_serdes = 1;
15220
15221                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15222                 if (nic_phy_id != 0) {
15223                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15224                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15225
15226                         eeprom_phy_id  = (id1 >> 16) << 10;
15227                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15228                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15229                 } else
15230                         eeprom_phy_id = 0;
15231
15232                 tp->phy_id = eeprom_phy_id;
15233                 if (eeprom_phy_serdes) {
15234                         if (!tg3_flag(tp, 5705_PLUS))
15235                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15236                         else
15237                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15238                 }
15239
15240                 if (tg3_flag(tp, 5750_PLUS))
15241                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15242                                     SHASTA_EXT_LED_MODE_MASK);
15243                 else
15244                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15245
15246                 switch (led_cfg) {
15247                 default:
15248                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15249                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15250                         break;
15251
15252                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15253                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15254                         break;
15255
15256                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15257                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15258
15259                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15260                          * read on some older 5700/5701 bootcode.
15261                          */
15262                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15263                             tg3_asic_rev(tp) == ASIC_REV_5701)
15264                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15265
15266                         break;
15267
15268                 case SHASTA_EXT_LED_SHARED:
15269                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15270                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15271                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15272                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15273                                                  LED_CTRL_MODE_PHY_2);
15274
15275                         if (tg3_flag(tp, 5717_PLUS) ||
15276                             tg3_asic_rev(tp) == ASIC_REV_5762)
15277                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15278                                                 LED_CTRL_BLINK_RATE_MASK;
15279
15280                         break;
15281
15282                 case SHASTA_EXT_LED_MAC:
15283                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15284                         break;
15285
15286                 case SHASTA_EXT_LED_COMBO:
15287                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15288                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15289                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15290                                                  LED_CTRL_MODE_PHY_2);
15291                         break;
15292
15293                 }
15294
15295                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15296                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15297                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15298                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15299
15300                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15301                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15302
15303                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15304                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15305                         if ((tp->pdev->subsystem_vendor ==
15306                              PCI_VENDOR_ID_ARIMA) &&
15307                             (tp->pdev->subsystem_device == 0x205a ||
15308                              tp->pdev->subsystem_device == 0x2063))
15309                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15310                 } else {
15311                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15312                         tg3_flag_set(tp, IS_NIC);
15313                 }
15314
15315                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15316                         tg3_flag_set(tp, ENABLE_ASF);
15317                         if (tg3_flag(tp, 5750_PLUS))
15318                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15319                 }
15320
15321                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15322                     tg3_flag(tp, 5750_PLUS))
15323                         tg3_flag_set(tp, ENABLE_APE);
15324
15325                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15326                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15327                         tg3_flag_clear(tp, WOL_CAP);
15328
15329                 if (tg3_flag(tp, WOL_CAP) &&
15330                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15331                         tg3_flag_set(tp, WOL_ENABLE);
15332                         device_set_wakeup_enable(&tp->pdev->dev, true);
15333                 }
15334
15335                 if (cfg2 & (1 << 17))
15336                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15337
15338                 /* serdes signal pre-emphasis in register 0x590 set by */
15339                 /* bootcode if bit 18 is set */
15340                 if (cfg2 & (1 << 18))
15341                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15342
15343                 if ((tg3_flag(tp, 57765_PLUS) ||
15344                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15345                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15346                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15347                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15348
15349                 if (tg3_flag(tp, PCI_EXPRESS)) {
15350                         u32 cfg3;
15351
15352                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15353                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15354                             !tg3_flag(tp, 57765_PLUS) &&
15355                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15356                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15357                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15358                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15359                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15360                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15361                 }
15362
15363                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15364                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15365                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15366                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15367                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15368                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15369
15370                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15371                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15372         }
15373 done:
15374         if (tg3_flag(tp, WOL_CAP))
15375                 device_set_wakeup_enable(&tp->pdev->dev,
15376                                          tg3_flag(tp, WOL_ENABLE));
15377         else
15378                 device_set_wakeup_capable(&tp->pdev->dev, false);
15379 }
15380
15381 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15382 {
15383         int i, err;
15384         u32 val2, off = offset * 8;
15385
15386         err = tg3_nvram_lock(tp);
15387         if (err)
15388                 return err;
15389
15390         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15391         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15392                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15393         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15394         udelay(10);
15395
15396         for (i = 0; i < 100; i++) {
15397                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15398                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15399                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15400                         break;
15401                 }
15402                 udelay(10);
15403         }
15404
15405         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15406
15407         tg3_nvram_unlock(tp);
15408         if (val2 & APE_OTP_STATUS_CMD_DONE)
15409                 return 0;
15410
15411         return -EBUSY;
15412 }
15413
15414 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15415 {
15416         int i;
15417         u32 val;
15418
15419         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15420         tw32(OTP_CTRL, cmd);
15421
15422         /* Wait for up to 1 ms for command to execute. */
15423         for (i = 0; i < 100; i++) {
15424                 val = tr32(OTP_STATUS);
15425                 if (val & OTP_STATUS_CMD_DONE)
15426                         break;
15427                 udelay(10);
15428         }
15429
15430         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15431 }
15432
15433 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15434  * configuration is a 32-bit value that straddles the alignment boundary.
15435  * We do two 32-bit reads and then shift and merge the results.
15436  */
15437 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15438 {
15439         u32 bhalf_otp, thalf_otp;
15440
15441         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15442
15443         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15444                 return 0;
15445
15446         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15447
15448         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15449                 return 0;
15450
15451         thalf_otp = tr32(OTP_READ_DATA);
15452
15453         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15454
15455         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15456                 return 0;
15457
15458         bhalf_otp = tr32(OTP_READ_DATA);
15459
15460         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15461 }
15462
15463 static void tg3_phy_init_link_config(struct tg3 *tp)
15464 {
15465         u32 adv = ADVERTISED_Autoneg;
15466
15467         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15468                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15469                         adv |= ADVERTISED_1000baseT_Half;
15470                 adv |= ADVERTISED_1000baseT_Full;
15471         }
15472
15473         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15474                 adv |= ADVERTISED_100baseT_Half |
15475                        ADVERTISED_100baseT_Full |
15476                        ADVERTISED_10baseT_Half |
15477                        ADVERTISED_10baseT_Full |
15478                        ADVERTISED_TP;
15479         else
15480                 adv |= ADVERTISED_FIBRE;
15481
15482         tp->link_config.advertising = adv;
15483         tp->link_config.speed = SPEED_UNKNOWN;
15484         tp->link_config.duplex = DUPLEX_UNKNOWN;
15485         tp->link_config.autoneg = AUTONEG_ENABLE;
15486         tp->link_config.active_speed = SPEED_UNKNOWN;
15487         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15488
15489         tp->old_link = -1;
15490 }
15491
15492 static int tg3_phy_probe(struct tg3 *tp)
15493 {
15494         u32 hw_phy_id_1, hw_phy_id_2;
15495         u32 hw_phy_id, hw_phy_id_masked;
15496         int err;
15497
15498         /* flow control autonegotiation is default behavior */
15499         tg3_flag_set(tp, PAUSE_AUTONEG);
15500         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15501
15502         if (tg3_flag(tp, ENABLE_APE)) {
15503                 switch (tp->pci_fn) {
15504                 case 0:
15505                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15506                         break;
15507                 case 1:
15508                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15509                         break;
15510                 case 2:
15511                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15512                         break;
15513                 case 3:
15514                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15515                         break;
15516                 }
15517         }
15518
15519         if (!tg3_flag(tp, ENABLE_ASF) &&
15520             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15521             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15522                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15523                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15524
15525         if (tg3_flag(tp, USE_PHYLIB))
15526                 return tg3_phy_init(tp);
15527
15528         /* Reading the PHY ID register can conflict with ASF
15529          * firmware access to the PHY hardware.
15530          */
15531         err = 0;
15532         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15533                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15534         } else {
15535                 /* Now read the physical PHY_ID from the chip and verify
15536                  * that it is sane.  If it doesn't look good, we fall back
15537                  * to either the hard-coded table based PHY_ID and failing
15538                  * that the value found in the eeprom area.
15539                  */
15540                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15541                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15542
15543                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15544                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15545                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15546
15547                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15548         }
15549
15550         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15551                 tp->phy_id = hw_phy_id;
15552                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15553                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15554                 else
15555                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15556         } else {
15557                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15558                         /* Do nothing, phy ID already set up in
15559                          * tg3_get_eeprom_hw_cfg().
15560                          */
15561                 } else {
15562                         struct subsys_tbl_ent *p;
15563
15564                         /* No eeprom signature?  Try the hardcoded
15565                          * subsys device table.
15566                          */
15567                         p = tg3_lookup_by_subsys(tp);
15568                         if (p) {
15569                                 tp->phy_id = p->phy_id;
15570                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15571                                 /* For now we saw the IDs 0xbc050cd0,
15572                                  * 0xbc050f80 and 0xbc050c30 on devices
15573                                  * connected to an BCM4785 and there are
15574                                  * probably more. Just assume that the phy is
15575                                  * supported when it is connected to a SSB core
15576                                  * for now.
15577                                  */
15578                                 return -ENODEV;
15579                         }
15580
15581                         if (!tp->phy_id ||
15582                             tp->phy_id == TG3_PHY_ID_BCM8002)
15583                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15584                 }
15585         }
15586
15587         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15588             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15589              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15590              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15591              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15592              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15593               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15594              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15595               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15596                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15597
15598                 tp->eee.supported = SUPPORTED_100baseT_Full |
15599                                     SUPPORTED_1000baseT_Full;
15600                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15601                                      ADVERTISED_1000baseT_Full;
15602                 tp->eee.eee_enabled = 1;
15603                 tp->eee.tx_lpi_enabled = 1;
15604                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15605         }
15606
15607         tg3_phy_init_link_config(tp);
15608
15609         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15610             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15611             !tg3_flag(tp, ENABLE_APE) &&
15612             !tg3_flag(tp, ENABLE_ASF)) {
15613                 u32 bmsr, dummy;
15614
15615                 tg3_readphy(tp, MII_BMSR, &bmsr);
15616                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15617                     (bmsr & BMSR_LSTATUS))
15618                         goto skip_phy_reset;
15619
15620                 err = tg3_phy_reset(tp);
15621                 if (err)
15622                         return err;
15623
15624                 tg3_phy_set_wirespeed(tp);
15625
15626                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15627                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15628                                             tp->link_config.flowctrl);
15629
15630                         tg3_writephy(tp, MII_BMCR,
15631                                      BMCR_ANENABLE | BMCR_ANRESTART);
15632                 }
15633         }
15634
15635 skip_phy_reset:
15636         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15637                 err = tg3_init_5401phy_dsp(tp);
15638                 if (err)
15639                         return err;
15640
15641                 err = tg3_init_5401phy_dsp(tp);
15642         }
15643
15644         return err;
15645 }
15646
15647 static void tg3_read_vpd(struct tg3 *tp)
15648 {
15649         u8 *vpd_data;
15650         unsigned int block_end, rosize, len;
15651         u32 vpdlen;
15652         int j, i = 0;
15653
15654         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15655         if (!vpd_data)
15656                 goto out_no_vpd;
15657
15658         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15659         if (i < 0)
15660                 goto out_not_found;
15661
15662         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15663         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15664         i += PCI_VPD_LRDT_TAG_SIZE;
15665
15666         if (block_end > vpdlen)
15667                 goto out_not_found;
15668
15669         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15670                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15671         if (j > 0) {
15672                 len = pci_vpd_info_field_size(&vpd_data[j]);
15673
15674                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15675                 if (j + len > block_end || len != 4 ||
15676                     memcmp(&vpd_data[j], "1028", 4))
15677                         goto partno;
15678
15679                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15680                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15681                 if (j < 0)
15682                         goto partno;
15683
15684                 len = pci_vpd_info_field_size(&vpd_data[j]);
15685
15686                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15687                 if (j + len > block_end)
15688                         goto partno;
15689
15690                 if (len >= sizeof(tp->fw_ver))
15691                         len = sizeof(tp->fw_ver) - 1;
15692                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15693                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15694                          &vpd_data[j]);
15695         }
15696
15697 partno:
15698         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15699                                       PCI_VPD_RO_KEYWORD_PARTNO);
15700         if (i < 0)
15701                 goto out_not_found;
15702
15703         len = pci_vpd_info_field_size(&vpd_data[i]);
15704
15705         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15706         if (len > TG3_BPN_SIZE ||
15707             (len + i) > vpdlen)
15708                 goto out_not_found;
15709
15710         memcpy(tp->board_part_number, &vpd_data[i], len);
15711
15712 out_not_found:
15713         kfree(vpd_data);
15714         if (tp->board_part_number[0])
15715                 return;
15716
15717 out_no_vpd:
15718         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15719                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15720                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15721                         strcpy(tp->board_part_number, "BCM5717");
15722                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15723                         strcpy(tp->board_part_number, "BCM5718");
15724                 else
15725                         goto nomatch;
15726         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15727                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15728                         strcpy(tp->board_part_number, "BCM57780");
15729                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15730                         strcpy(tp->board_part_number, "BCM57760");
15731                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15732                         strcpy(tp->board_part_number, "BCM57790");
15733                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15734                         strcpy(tp->board_part_number, "BCM57788");
15735                 else
15736                         goto nomatch;
15737         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15738                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15739                         strcpy(tp->board_part_number, "BCM57761");
15740                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15741                         strcpy(tp->board_part_number, "BCM57765");
15742                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15743                         strcpy(tp->board_part_number, "BCM57781");
15744                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15745                         strcpy(tp->board_part_number, "BCM57785");
15746                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15747                         strcpy(tp->board_part_number, "BCM57791");
15748                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15749                         strcpy(tp->board_part_number, "BCM57795");
15750                 else
15751                         goto nomatch;
15752         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15753                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15754                         strcpy(tp->board_part_number, "BCM57762");
15755                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15756                         strcpy(tp->board_part_number, "BCM57766");
15757                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15758                         strcpy(tp->board_part_number, "BCM57782");
15759                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15760                         strcpy(tp->board_part_number, "BCM57786");
15761                 else
15762                         goto nomatch;
15763         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15764                 strcpy(tp->board_part_number, "BCM95906");
15765         } else {
15766 nomatch:
15767                 strcpy(tp->board_part_number, "none");
15768         }
15769 }
15770
15771 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15772 {
15773         u32 val;
15774
15775         if (tg3_nvram_read(tp, offset, &val) ||
15776             (val & 0xfc000000) != 0x0c000000 ||
15777             tg3_nvram_read(tp, offset + 4, &val) ||
15778             val != 0)
15779                 return 0;
15780
15781         return 1;
15782 }
15783
15784 static void tg3_read_bc_ver(struct tg3 *tp)
15785 {
15786         u32 val, offset, start, ver_offset;
15787         int i, dst_off;
15788         bool newver = false;
15789
15790         if (tg3_nvram_read(tp, 0xc, &offset) ||
15791             tg3_nvram_read(tp, 0x4, &start))
15792                 return;
15793
15794         offset = tg3_nvram_logical_addr(tp, offset);
15795
15796         if (tg3_nvram_read(tp, offset, &val))
15797                 return;
15798
15799         if ((val & 0xfc000000) == 0x0c000000) {
15800                 if (tg3_nvram_read(tp, offset + 4, &val))
15801                         return;
15802
15803                 if (val == 0)
15804                         newver = true;
15805         }
15806
15807         dst_off = strlen(tp->fw_ver);
15808
15809         if (newver) {
15810                 if (TG3_VER_SIZE - dst_off < 16 ||
15811                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15812                         return;
15813
15814                 offset = offset + ver_offset - start;
15815                 for (i = 0; i < 16; i += 4) {
15816                         __be32 v;
15817                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15818                                 return;
15819
15820                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15821                 }
15822         } else {
15823                 u32 major, minor;
15824
15825                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15826                         return;
15827
15828                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15829                         TG3_NVM_BCVER_MAJSFT;
15830                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15831                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15832                          "v%d.%02d", major, minor);
15833         }
15834 }
15835
15836 static void tg3_read_hwsb_ver(struct tg3 *tp)
15837 {
15838         u32 val, major, minor;
15839
15840         /* Use native endian representation */
15841         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15842                 return;
15843
15844         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15845                 TG3_NVM_HWSB_CFG1_MAJSFT;
15846         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15847                 TG3_NVM_HWSB_CFG1_MINSFT;
15848
15849         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15850 }
15851
15852 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15853 {
15854         u32 offset, major, minor, build;
15855
15856         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15857
15858         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15859                 return;
15860
15861         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15862         case TG3_EEPROM_SB_REVISION_0:
15863                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15864                 break;
15865         case TG3_EEPROM_SB_REVISION_2:
15866                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15867                 break;
15868         case TG3_EEPROM_SB_REVISION_3:
15869                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15870                 break;
15871         case TG3_EEPROM_SB_REVISION_4:
15872                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15873                 break;
15874         case TG3_EEPROM_SB_REVISION_5:
15875                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15876                 break;
15877         case TG3_EEPROM_SB_REVISION_6:
15878                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15879                 break;
15880         default:
15881                 return;
15882         }
15883
15884         if (tg3_nvram_read(tp, offset, &val))
15885                 return;
15886
15887         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15888                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15889         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15890                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15891         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15892
15893         if (minor > 99 || build > 26)
15894                 return;
15895
15896         offset = strlen(tp->fw_ver);
15897         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15898                  " v%d.%02d", major, minor);
15899
15900         if (build > 0) {
15901                 offset = strlen(tp->fw_ver);
15902                 if (offset < TG3_VER_SIZE - 1)
15903                         tp->fw_ver[offset] = 'a' + build - 1;
15904         }
15905 }
15906
15907 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15908 {
15909         u32 val, offset, start;
15910         int i, vlen;
15911
15912         for (offset = TG3_NVM_DIR_START;
15913              offset < TG3_NVM_DIR_END;
15914              offset += TG3_NVM_DIRENT_SIZE) {
15915                 if (tg3_nvram_read(tp, offset, &val))
15916                         return;
15917
15918                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15919                         break;
15920         }
15921
15922         if (offset == TG3_NVM_DIR_END)
15923                 return;
15924
15925         if (!tg3_flag(tp, 5705_PLUS))
15926                 start = 0x08000000;
15927         else if (tg3_nvram_read(tp, offset - 4, &start))
15928                 return;
15929
15930         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15931             !tg3_fw_img_is_valid(tp, offset) ||
15932             tg3_nvram_read(tp, offset + 8, &val))
15933                 return;
15934
15935         offset += val - start;
15936
15937         vlen = strlen(tp->fw_ver);
15938
15939         tp->fw_ver[vlen++] = ',';
15940         tp->fw_ver[vlen++] = ' ';
15941
15942         for (i = 0; i < 4; i++) {
15943                 __be32 v;
15944                 if (tg3_nvram_read_be32(tp, offset, &v))
15945                         return;
15946
15947                 offset += sizeof(v);
15948
15949                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15950                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15951                         break;
15952                 }
15953
15954                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15955                 vlen += sizeof(v);
15956         }
15957 }
15958
15959 static void tg3_probe_ncsi(struct tg3 *tp)
15960 {
15961         u32 apedata;
15962
15963         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15964         if (apedata != APE_SEG_SIG_MAGIC)
15965                 return;
15966
15967         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15968         if (!(apedata & APE_FW_STATUS_READY))
15969                 return;
15970
15971         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15972                 tg3_flag_set(tp, APE_HAS_NCSI);
15973 }
15974
15975 static void tg3_read_dash_ver(struct tg3 *tp)
15976 {
15977         int vlen;
15978         u32 apedata;
15979         char *fwtype;
15980
15981         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15982
15983         if (tg3_flag(tp, APE_HAS_NCSI))
15984                 fwtype = "NCSI";
15985         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15986                 fwtype = "SMASH";
15987         else
15988                 fwtype = "DASH";
15989
15990         vlen = strlen(tp->fw_ver);
15991
15992         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15993                  fwtype,
15994                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15995                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15996                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15997                  (apedata & APE_FW_VERSION_BLDMSK));
15998 }
15999
16000 static void tg3_read_otp_ver(struct tg3 *tp)
16001 {
16002         u32 val, val2;
16003
16004         if (tg3_asic_rev(tp) != ASIC_REV_5762)
16005                 return;
16006
16007         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16008             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16009             TG3_OTP_MAGIC0_VALID(val)) {
16010                 u64 val64 = (u64) val << 32 | val2;
16011                 u32 ver = 0;
16012                 int i, vlen;
16013
16014                 for (i = 0; i < 7; i++) {
16015                         if ((val64 & 0xff) == 0)
16016                                 break;
16017                         ver = val64 & 0xff;
16018                         val64 >>= 8;
16019                 }
16020                 vlen = strlen(tp->fw_ver);
16021                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16022         }
16023 }
16024
16025 static void tg3_read_fw_ver(struct tg3 *tp)
16026 {
16027         u32 val;
16028         bool vpd_vers = false;
16029
16030         if (tp->fw_ver[0] != 0)
16031                 vpd_vers = true;
16032
16033         if (tg3_flag(tp, NO_NVRAM)) {
16034                 strcat(tp->fw_ver, "sb");
16035                 tg3_read_otp_ver(tp);
16036                 return;
16037         }
16038
16039         if (tg3_nvram_read(tp, 0, &val))
16040                 return;
16041
16042         if (val == TG3_EEPROM_MAGIC)
16043                 tg3_read_bc_ver(tp);
16044         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16045                 tg3_read_sb_ver(tp, val);
16046         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16047                 tg3_read_hwsb_ver(tp);
16048
16049         if (tg3_flag(tp, ENABLE_ASF)) {
16050                 if (tg3_flag(tp, ENABLE_APE)) {
16051                         tg3_probe_ncsi(tp);
16052                         if (!vpd_vers)
16053                                 tg3_read_dash_ver(tp);
16054                 } else if (!vpd_vers) {
16055                         tg3_read_mgmtfw_ver(tp);
16056                 }
16057         }
16058
16059         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16060 }
16061
16062 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16063 {
16064         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16065                 return TG3_RX_RET_MAX_SIZE_5717;
16066         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16067                 return TG3_RX_RET_MAX_SIZE_5700;
16068         else
16069                 return TG3_RX_RET_MAX_SIZE_5705;
16070 }
16071
16072 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16073         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16074         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16075         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16076         { },
16077 };
16078
16079 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16080 {
16081         struct pci_dev *peer;
16082         unsigned int func, devnr = tp->pdev->devfn & ~7;
16083
16084         for (func = 0; func < 8; func++) {
16085                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16086                 if (peer && peer != tp->pdev)
16087                         break;
16088                 pci_dev_put(peer);
16089         }
16090         /* 5704 can be configured in single-port mode, set peer to
16091          * tp->pdev in that case.
16092          */
16093         if (!peer) {
16094                 peer = tp->pdev;
16095                 return peer;
16096         }
16097
16098         /*
16099          * We don't need to keep the refcount elevated; there's no way
16100          * to remove one half of this device without removing the other
16101          */
16102         pci_dev_put(peer);
16103
16104         return peer;
16105 }
16106
16107 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16108 {
16109         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16110         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16111                 u32 reg;
16112
16113                 /* All devices that use the alternate
16114                  * ASIC REV location have a CPMU.
16115                  */
16116                 tg3_flag_set(tp, CPMU_PRESENT);
16117
16118                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16119                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16120                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16121                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16122                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16123                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16124                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16125                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16126                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16127                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16128                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16129                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16130                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16131                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16132                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16133                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16134                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16135                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16136                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16137                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16138                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16139                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16140                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16141                 else
16142                         reg = TG3PCI_PRODID_ASICREV;
16143
16144                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16145         }
16146
16147         /* Wrong chip ID in 5752 A0. This code can be removed later
16148          * as A0 is not in production.
16149          */
16150         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16151                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16152
16153         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16154                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16155
16156         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16157             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16158             tg3_asic_rev(tp) == ASIC_REV_5720)
16159                 tg3_flag_set(tp, 5717_PLUS);
16160
16161         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16162             tg3_asic_rev(tp) == ASIC_REV_57766)
16163                 tg3_flag_set(tp, 57765_CLASS);
16164
16165         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16166              tg3_asic_rev(tp) == ASIC_REV_5762)
16167                 tg3_flag_set(tp, 57765_PLUS);
16168
16169         /* Intentionally exclude ASIC_REV_5906 */
16170         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16171             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16172             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16173             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16174             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16175             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16176             tg3_flag(tp, 57765_PLUS))
16177                 tg3_flag_set(tp, 5755_PLUS);
16178
16179         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16180             tg3_asic_rev(tp) == ASIC_REV_5714)
16181                 tg3_flag_set(tp, 5780_CLASS);
16182
16183         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16184             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16185             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16186             tg3_flag(tp, 5755_PLUS) ||
16187             tg3_flag(tp, 5780_CLASS))
16188                 tg3_flag_set(tp, 5750_PLUS);
16189
16190         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16191             tg3_flag(tp, 5750_PLUS))
16192                 tg3_flag_set(tp, 5705_PLUS);
16193 }
16194
16195 static bool tg3_10_100_only_device(struct tg3 *tp,
16196                                    const struct pci_device_id *ent)
16197 {
16198         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16199
16200         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16201              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16202             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16203                 return true;
16204
16205         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16206                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16207                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16208                                 return true;
16209                 } else {
16210                         return true;
16211                 }
16212         }
16213
16214         return false;
16215 }
16216
16217 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16218 {
16219         u32 misc_ctrl_reg;
16220         u32 pci_state_reg, grc_misc_cfg;
16221         u32 val;
16222         u16 pci_cmd;
16223         int err;
16224
16225         /* Force memory write invalidate off.  If we leave it on,
16226          * then on 5700_BX chips we have to enable a workaround.
16227          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16228          * to match the cacheline size.  The Broadcom driver have this
16229          * workaround but turns MWI off all the times so never uses
16230          * it.  This seems to suggest that the workaround is insufficient.
16231          */
16232         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16233         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16234         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16235
16236         /* Important! -- Make sure register accesses are byteswapped
16237          * correctly.  Also, for those chips that require it, make
16238          * sure that indirect register accesses are enabled before
16239          * the first operation.
16240          */
16241         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16242                               &misc_ctrl_reg);
16243         tp->misc_host_ctrl |= (misc_ctrl_reg &
16244                                MISC_HOST_CTRL_CHIPREV);
16245         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16246                                tp->misc_host_ctrl);
16247
16248         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16249
16250         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16251          * we need to disable memory and use config. cycles
16252          * only to access all registers. The 5702/03 chips
16253          * can mistakenly decode the special cycles from the
16254          * ICH chipsets as memory write cycles, causing corruption
16255          * of register and memory space. Only certain ICH bridges
16256          * will drive special cycles with non-zero data during the
16257          * address phase which can fall within the 5703's address
16258          * range. This is not an ICH bug as the PCI spec allows
16259          * non-zero address during special cycles. However, only
16260          * these ICH bridges are known to drive non-zero addresses
16261          * during special cycles.
16262          *
16263          * Since special cycles do not cross PCI bridges, we only
16264          * enable this workaround if the 5703 is on the secondary
16265          * bus of these ICH bridges.
16266          */
16267         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16268             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16269                 static struct tg3_dev_id {
16270                         u32     vendor;
16271                         u32     device;
16272                         u32     rev;
16273                 } ich_chipsets[] = {
16274                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16275                           PCI_ANY_ID },
16276                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16277                           PCI_ANY_ID },
16278                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16279                           0xa },
16280                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16281                           PCI_ANY_ID },
16282                         { },
16283                 };
16284                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16285                 struct pci_dev *bridge = NULL;
16286
16287                 while (pci_id->vendor != 0) {
16288                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16289                                                 bridge);
16290                         if (!bridge) {
16291                                 pci_id++;
16292                                 continue;
16293                         }
16294                         if (pci_id->rev != PCI_ANY_ID) {
16295                                 if (bridge->revision > pci_id->rev)
16296                                         continue;
16297                         }
16298                         if (bridge->subordinate &&
16299                             (bridge->subordinate->number ==
16300                              tp->pdev->bus->number)) {
16301                                 tg3_flag_set(tp, ICH_WORKAROUND);
16302                                 pci_dev_put(bridge);
16303                                 break;
16304                         }
16305                 }
16306         }
16307
16308         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16309                 static struct tg3_dev_id {
16310                         u32     vendor;
16311                         u32     device;
16312                 } bridge_chipsets[] = {
16313                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16314                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16315                         { },
16316                 };
16317                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16318                 struct pci_dev *bridge = NULL;
16319
16320                 while (pci_id->vendor != 0) {
16321                         bridge = pci_get_device(pci_id->vendor,
16322                                                 pci_id->device,
16323                                                 bridge);
16324                         if (!bridge) {
16325                                 pci_id++;
16326                                 continue;
16327                         }
16328                         if (bridge->subordinate &&
16329                             (bridge->subordinate->number <=
16330                              tp->pdev->bus->number) &&
16331                             (bridge->subordinate->busn_res.end >=
16332                              tp->pdev->bus->number)) {
16333                                 tg3_flag_set(tp, 5701_DMA_BUG);
16334                                 pci_dev_put(bridge);
16335                                 break;
16336                         }
16337                 }
16338         }
16339
16340         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16341          * DMA addresses > 40-bit. This bridge may have other additional
16342          * 57xx devices behind it in some 4-port NIC designs for example.
16343          * Any tg3 device found behind the bridge will also need the 40-bit
16344          * DMA workaround.
16345          */
16346         if (tg3_flag(tp, 5780_CLASS)) {
16347                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16348                 tp->msi_cap = tp->pdev->msi_cap;
16349         } else {
16350                 struct pci_dev *bridge = NULL;
16351
16352                 do {
16353                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16354                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16355                                                 bridge);
16356                         if (bridge && bridge->subordinate &&
16357                             (bridge->subordinate->number <=
16358                              tp->pdev->bus->number) &&
16359                             (bridge->subordinate->busn_res.end >=
16360                              tp->pdev->bus->number)) {
16361                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16362                                 pci_dev_put(bridge);
16363                                 break;
16364                         }
16365                 } while (bridge);
16366         }
16367
16368         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16369             tg3_asic_rev(tp) == ASIC_REV_5714)
16370                 tp->pdev_peer = tg3_find_peer(tp);
16371
16372         /* Determine TSO capabilities */
16373         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16374                 ; /* Do nothing. HW bug. */
16375         else if (tg3_flag(tp, 57765_PLUS))
16376                 tg3_flag_set(tp, HW_TSO_3);
16377         else if (tg3_flag(tp, 5755_PLUS) ||
16378                  tg3_asic_rev(tp) == ASIC_REV_5906)
16379                 tg3_flag_set(tp, HW_TSO_2);
16380         else if (tg3_flag(tp, 5750_PLUS)) {
16381                 tg3_flag_set(tp, HW_TSO_1);
16382                 tg3_flag_set(tp, TSO_BUG);
16383                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16384                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16385                         tg3_flag_clear(tp, TSO_BUG);
16386         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16387                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16388                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16389                 tg3_flag_set(tp, FW_TSO);
16390                 tg3_flag_set(tp, TSO_BUG);
16391                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16392                         tp->fw_needed = FIRMWARE_TG3TSO5;
16393                 else
16394                         tp->fw_needed = FIRMWARE_TG3TSO;
16395         }
16396
16397         /* Selectively allow TSO based on operating conditions */
16398         if (tg3_flag(tp, HW_TSO_1) ||
16399             tg3_flag(tp, HW_TSO_2) ||
16400             tg3_flag(tp, HW_TSO_3) ||
16401             tg3_flag(tp, FW_TSO)) {
16402                 /* For firmware TSO, assume ASF is disabled.
16403                  * We'll disable TSO later if we discover ASF
16404                  * is enabled in tg3_get_eeprom_hw_cfg().
16405                  */
16406                 tg3_flag_set(tp, TSO_CAPABLE);
16407         } else {
16408                 tg3_flag_clear(tp, TSO_CAPABLE);
16409                 tg3_flag_clear(tp, TSO_BUG);
16410                 tp->fw_needed = NULL;
16411         }
16412
16413         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16414                 tp->fw_needed = FIRMWARE_TG3;
16415
16416         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16417                 tp->fw_needed = FIRMWARE_TG357766;
16418
16419         tp->irq_max = 1;
16420
16421         if (tg3_flag(tp, 5750_PLUS)) {
16422                 tg3_flag_set(tp, SUPPORT_MSI);
16423                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16424                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16425                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16426                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16427                      tp->pdev_peer == tp->pdev))
16428                         tg3_flag_clear(tp, SUPPORT_MSI);
16429
16430                 if (tg3_flag(tp, 5755_PLUS) ||
16431                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16432                         tg3_flag_set(tp, 1SHOT_MSI);
16433                 }
16434
16435                 if (tg3_flag(tp, 57765_PLUS)) {
16436                         tg3_flag_set(tp, SUPPORT_MSIX);
16437                         tp->irq_max = TG3_IRQ_MAX_VECS;
16438                 }
16439         }
16440
16441         tp->txq_max = 1;
16442         tp->rxq_max = 1;
16443         if (tp->irq_max > 1) {
16444                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16445                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16446
16447                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16448                     tg3_asic_rev(tp) == ASIC_REV_5720)
16449                         tp->txq_max = tp->irq_max - 1;
16450         }
16451
16452         if (tg3_flag(tp, 5755_PLUS) ||
16453             tg3_asic_rev(tp) == ASIC_REV_5906)
16454                 tg3_flag_set(tp, SHORT_DMA_BUG);
16455
16456         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16457                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16458
16459         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16460             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16461             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16462             tg3_asic_rev(tp) == ASIC_REV_5762)
16463                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16464
16465         if (tg3_flag(tp, 57765_PLUS) &&
16466             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16467                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16468
16469         if (!tg3_flag(tp, 5705_PLUS) ||
16470             tg3_flag(tp, 5780_CLASS) ||
16471             tg3_flag(tp, USE_JUMBO_BDFLAG))
16472                 tg3_flag_set(tp, JUMBO_CAPABLE);
16473
16474         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16475                               &pci_state_reg);
16476
16477         if (pci_is_pcie(tp->pdev)) {
16478                 u16 lnkctl;
16479
16480                 tg3_flag_set(tp, PCI_EXPRESS);
16481
16482                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16483                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16484                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16485                                 tg3_flag_clear(tp, HW_TSO_2);
16486                                 tg3_flag_clear(tp, TSO_CAPABLE);
16487                         }
16488                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16489                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16490                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16491                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16492                                 tg3_flag_set(tp, CLKREQ_BUG);
16493                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16494                         tg3_flag_set(tp, L1PLLPD_EN);
16495                 }
16496         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16497                 /* BCM5785 devices are effectively PCIe devices, and should
16498                  * follow PCIe codepaths, but do not have a PCIe capabilities
16499                  * section.
16500                  */
16501                 tg3_flag_set(tp, PCI_EXPRESS);
16502         } else if (!tg3_flag(tp, 5705_PLUS) ||
16503                    tg3_flag(tp, 5780_CLASS)) {
16504                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16505                 if (!tp->pcix_cap) {
16506                         dev_err(&tp->pdev->dev,
16507                                 "Cannot find PCI-X capability, aborting\n");
16508                         return -EIO;
16509                 }
16510
16511                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16512                         tg3_flag_set(tp, PCIX_MODE);
16513         }
16514
16515         /* If we have an AMD 762 or VIA K8T800 chipset, write
16516          * reordering to the mailbox registers done by the host
16517          * controller can cause major troubles.  We read back from
16518          * every mailbox register write to force the writes to be
16519          * posted to the chip in order.
16520          */
16521         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16522             !tg3_flag(tp, PCI_EXPRESS))
16523                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16524
16525         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16526                              &tp->pci_cacheline_sz);
16527         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16528                              &tp->pci_lat_timer);
16529         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16530             tp->pci_lat_timer < 64) {
16531                 tp->pci_lat_timer = 64;
16532                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16533                                       tp->pci_lat_timer);
16534         }
16535
16536         /* Important! -- It is critical that the PCI-X hw workaround
16537          * situation is decided before the first MMIO register access.
16538          */
16539         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16540                 /* 5700 BX chips need to have their TX producer index
16541                  * mailboxes written twice to workaround a bug.
16542                  */
16543                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16544
16545                 /* If we are in PCI-X mode, enable register write workaround.
16546                  *
16547                  * The workaround is to use indirect register accesses
16548                  * for all chip writes not to mailbox registers.
16549                  */
16550                 if (tg3_flag(tp, PCIX_MODE)) {
16551                         u32 pm_reg;
16552
16553                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16554
16555                         /* The chip can have it's power management PCI config
16556                          * space registers clobbered due to this bug.
16557                          * So explicitly force the chip into D0 here.
16558                          */
16559                         pci_read_config_dword(tp->pdev,
16560                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16561                                               &pm_reg);
16562                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16563                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16564                         pci_write_config_dword(tp->pdev,
16565                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16566                                                pm_reg);
16567
16568                         /* Also, force SERR#/PERR# in PCI command. */
16569                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16570                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16571                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16572                 }
16573         }
16574
16575         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16576                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16577         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16578                 tg3_flag_set(tp, PCI_32BIT);
16579
16580         /* Chip-specific fixup from Broadcom driver */
16581         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16582             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16583                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16584                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16585         }
16586
16587         /* Default fast path register access methods */
16588         tp->read32 = tg3_read32;
16589         tp->write32 = tg3_write32;
16590         tp->read32_mbox = tg3_read32;
16591         tp->write32_mbox = tg3_write32;
16592         tp->write32_tx_mbox = tg3_write32;
16593         tp->write32_rx_mbox = tg3_write32;
16594
16595         /* Various workaround register access methods */
16596         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16597                 tp->write32 = tg3_write_indirect_reg32;
16598         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16599                  (tg3_flag(tp, PCI_EXPRESS) &&
16600                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16601                 /*
16602                  * Back to back register writes can cause problems on these
16603                  * chips, the workaround is to read back all reg writes
16604                  * except those to mailbox regs.
16605                  *
16606                  * See tg3_write_indirect_reg32().
16607                  */
16608                 tp->write32 = tg3_write_flush_reg32;
16609         }
16610
16611         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16612                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16613                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16614                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16615         }
16616
16617         if (tg3_flag(tp, ICH_WORKAROUND)) {
16618                 tp->read32 = tg3_read_indirect_reg32;
16619                 tp->write32 = tg3_write_indirect_reg32;
16620                 tp->read32_mbox = tg3_read_indirect_mbox;
16621                 tp->write32_mbox = tg3_write_indirect_mbox;
16622                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16623                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16624
16625                 iounmap(tp->regs);
16626                 tp->regs = NULL;
16627
16628                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16629                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16630                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16631         }
16632         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16633                 tp->read32_mbox = tg3_read32_mbox_5906;
16634                 tp->write32_mbox = tg3_write32_mbox_5906;
16635                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16636                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16637         }
16638
16639         if (tp->write32 == tg3_write_indirect_reg32 ||
16640             (tg3_flag(tp, PCIX_MODE) &&
16641              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16642               tg3_asic_rev(tp) == ASIC_REV_5701)))
16643                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16644
16645         /* The memory arbiter has to be enabled in order for SRAM accesses
16646          * to succeed.  Normally on powerup the tg3 chip firmware will make
16647          * sure it is enabled, but other entities such as system netboot
16648          * code might disable it.
16649          */
16650         val = tr32(MEMARB_MODE);
16651         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16652
16653         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16654         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16655             tg3_flag(tp, 5780_CLASS)) {
16656                 if (tg3_flag(tp, PCIX_MODE)) {
16657                         pci_read_config_dword(tp->pdev,
16658                                               tp->pcix_cap + PCI_X_STATUS,
16659                                               &val);
16660                         tp->pci_fn = val & 0x7;
16661                 }
16662         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16663                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16664                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16665                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16666                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16667                         val = tr32(TG3_CPMU_STATUS);
16668
16669                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16670                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16671                 else
16672                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16673                                      TG3_CPMU_STATUS_FSHFT_5719;
16674         }
16675
16676         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16677                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16678                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16679         }
16680
16681         /* Get eeprom hw config before calling tg3_set_power_state().
16682          * In particular, the TG3_FLAG_IS_NIC flag must be
16683          * determined before calling tg3_set_power_state() so that
16684          * we know whether or not to switch out of Vaux power.
16685          * When the flag is set, it means that GPIO1 is used for eeprom
16686          * write protect and also implies that it is a LOM where GPIOs
16687          * are not used to switch power.
16688          */
16689         tg3_get_eeprom_hw_cfg(tp);
16690
16691         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16692                 tg3_flag_clear(tp, TSO_CAPABLE);
16693                 tg3_flag_clear(tp, TSO_BUG);
16694                 tp->fw_needed = NULL;
16695         }
16696
16697         if (tg3_flag(tp, ENABLE_APE)) {
16698                 /* Allow reads and writes to the
16699                  * APE register and memory space.
16700                  */
16701                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16702                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16703                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16704                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16705                                        pci_state_reg);
16706
16707                 tg3_ape_lock_init(tp);
16708                 tp->ape_hb_interval =
16709                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16710         }
16711
16712         /* Set up tp->grc_local_ctrl before calling
16713          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16714          * will bring 5700's external PHY out of reset.
16715          * It is also used as eeprom write protect on LOMs.
16716          */
16717         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16718         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16719             tg3_flag(tp, EEPROM_WRITE_PROT))
16720                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16721                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16722         /* Unused GPIO3 must be driven as output on 5752 because there
16723          * are no pull-up resistors on unused GPIO pins.
16724          */
16725         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16726                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16727
16728         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16729             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16730             tg3_flag(tp, 57765_CLASS))
16731                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16732
16733         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16734             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16735                 /* Turn off the debug UART. */
16736                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16737                 if (tg3_flag(tp, IS_NIC))
16738                         /* Keep VMain power. */
16739                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16740                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16741         }
16742
16743         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16744                 tp->grc_local_ctrl |=
16745                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16746
16747         /* Switch out of Vaux if it is a NIC */
16748         tg3_pwrsrc_switch_to_vmain(tp);
16749
16750         /* Derive initial jumbo mode from MTU assigned in
16751          * ether_setup() via the alloc_etherdev() call
16752          */
16753         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16754                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16755
16756         /* Determine WakeOnLan speed to use. */
16757         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16758             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16759             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16760             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16761                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16762         } else {
16763                 tg3_flag_set(tp, WOL_SPEED_100MB);
16764         }
16765
16766         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16767                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16768
16769         /* A few boards don't want Ethernet@WireSpeed phy feature */
16770         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16771             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16772              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16773              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16774             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16775             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16776                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16777
16778         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16779             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16780                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16781         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16782                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16783
16784         if (tg3_flag(tp, 5705_PLUS) &&
16785             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16786             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16787             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16788             !tg3_flag(tp, 57765_PLUS)) {
16789                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16790                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16791                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16792                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16793                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16794                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16795                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16796                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16797                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16798                 } else
16799                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16800         }
16801
16802         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16803             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16804                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16805                 if (tp->phy_otp == 0)
16806                         tp->phy_otp = TG3_OTP_DEFAULT;
16807         }
16808
16809         if (tg3_flag(tp, CPMU_PRESENT))
16810                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16811         else
16812                 tp->mi_mode = MAC_MI_MODE_BASE;
16813
16814         tp->coalesce_mode = 0;
16815         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16816             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16817                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16818
16819         /* Set these bits to enable statistics workaround. */
16820         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16821             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16822             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16823             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16824                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16825                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16826         }
16827
16828         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16829             tg3_asic_rev(tp) == ASIC_REV_57780)
16830                 tg3_flag_set(tp, USE_PHYLIB);
16831
16832         err = tg3_mdio_init(tp);
16833         if (err)
16834                 return err;
16835
16836         /* Initialize data/descriptor byte/word swapping. */
16837         val = tr32(GRC_MODE);
16838         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16839             tg3_asic_rev(tp) == ASIC_REV_5762)
16840                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16841                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16842                         GRC_MODE_B2HRX_ENABLE |
16843                         GRC_MODE_HTX2B_ENABLE |
16844                         GRC_MODE_HOST_STACKUP);
16845         else
16846                 val &= GRC_MODE_HOST_STACKUP;
16847
16848         tw32(GRC_MODE, val | tp->grc_mode);
16849
16850         tg3_switch_clocks(tp);
16851
16852         /* Clear this out for sanity. */
16853         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16854
16855         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16856         tw32(TG3PCI_REG_BASE_ADDR, 0);
16857
16858         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16859                               &pci_state_reg);
16860         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16861             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16862                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16863                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16864                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16865                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16866                         void __iomem *sram_base;
16867
16868                         /* Write some dummy words into the SRAM status block
16869                          * area, see if it reads back correctly.  If the return
16870                          * value is bad, force enable the PCIX workaround.
16871                          */
16872                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16873
16874                         writel(0x00000000, sram_base);
16875                         writel(0x00000000, sram_base + 4);
16876                         writel(0xffffffff, sram_base + 4);
16877                         if (readl(sram_base) != 0x00000000)
16878                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16879                 }
16880         }
16881
16882         udelay(50);
16883         tg3_nvram_init(tp);
16884
16885         /* If the device has an NVRAM, no need to load patch firmware */
16886         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16887             !tg3_flag(tp, NO_NVRAM))
16888                 tp->fw_needed = NULL;
16889
16890         grc_misc_cfg = tr32(GRC_MISC_CFG);
16891         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16892
16893         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16894             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16895              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16896                 tg3_flag_set(tp, IS_5788);
16897
16898         if (!tg3_flag(tp, IS_5788) &&
16899             tg3_asic_rev(tp) != ASIC_REV_5700)
16900                 tg3_flag_set(tp, TAGGED_STATUS);
16901         if (tg3_flag(tp, TAGGED_STATUS)) {
16902                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16903                                       HOSTCC_MODE_CLRTICK_TXBD);
16904
16905                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16906                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16907                                        tp->misc_host_ctrl);
16908         }
16909
16910         /* Preserve the APE MAC_MODE bits */
16911         if (tg3_flag(tp, ENABLE_APE))
16912                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16913         else
16914                 tp->mac_mode = 0;
16915
16916         if (tg3_10_100_only_device(tp, ent))
16917                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16918
16919         err = tg3_phy_probe(tp);
16920         if (err) {
16921                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16922                 /* ... but do not return immediately ... */
16923                 tg3_mdio_fini(tp);
16924         }
16925
16926         tg3_read_vpd(tp);
16927         tg3_read_fw_ver(tp);
16928
16929         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16930                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16931         } else {
16932                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16933                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16934                 else
16935                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16936         }
16937
16938         /* 5700 {AX,BX} chips have a broken status block link
16939          * change bit implementation, so we must use the
16940          * status register in those cases.
16941          */
16942         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16943                 tg3_flag_set(tp, USE_LINKCHG_REG);
16944         else
16945                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16946
16947         /* The led_ctrl is set during tg3_phy_probe, here we might
16948          * have to force the link status polling mechanism based
16949          * upon subsystem IDs.
16950          */
16951         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16952             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16953             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16954                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16955                 tg3_flag_set(tp, USE_LINKCHG_REG);
16956         }
16957
16958         /* For all SERDES we poll the MAC status register. */
16959         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16960                 tg3_flag_set(tp, POLL_SERDES);
16961         else
16962                 tg3_flag_clear(tp, POLL_SERDES);
16963
16964         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16965                 tg3_flag_set(tp, POLL_CPMU_LINK);
16966
16967         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16968         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16969         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16970             tg3_flag(tp, PCIX_MODE)) {
16971                 tp->rx_offset = NET_SKB_PAD;
16972 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16973                 tp->rx_copy_thresh = ~(u16)0;
16974 #endif
16975         }
16976
16977         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16978         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16979         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16980
16981         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16982
16983         /* Increment the rx prod index on the rx std ring by at most
16984          * 8 for these chips to workaround hw errata.
16985          */
16986         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16987             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16988             tg3_asic_rev(tp) == ASIC_REV_5755)
16989                 tp->rx_std_max_post = 8;
16990
16991         if (tg3_flag(tp, ASPM_WORKAROUND))
16992                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16993                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16994
16995         return err;
16996 }
16997
16998 #ifdef CONFIG_SPARC
16999 static int tg3_get_macaddr_sparc(struct tg3 *tp)
17000 {
17001         struct net_device *dev = tp->dev;
17002         struct pci_dev *pdev = tp->pdev;
17003         struct device_node *dp = pci_device_to_OF_node(pdev);
17004         const unsigned char *addr;
17005         int len;
17006
17007         addr = of_get_property(dp, "local-mac-address", &len);
17008         if (addr && len == ETH_ALEN) {
17009                 memcpy(dev->dev_addr, addr, ETH_ALEN);
17010                 return 0;
17011         }
17012         return -ENODEV;
17013 }
17014
17015 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
17016 {
17017         struct net_device *dev = tp->dev;
17018
17019         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
17020         return 0;
17021 }
17022 #endif
17023
17024 static int tg3_get_device_address(struct tg3 *tp)
17025 {
17026         struct net_device *dev = tp->dev;
17027         u32 hi, lo, mac_offset;
17028         int addr_ok = 0;
17029         int err;
17030
17031 #ifdef CONFIG_SPARC
17032         if (!tg3_get_macaddr_sparc(tp))
17033                 return 0;
17034 #endif
17035
17036         if (tg3_flag(tp, IS_SSB_CORE)) {
17037                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17038                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17039                         return 0;
17040         }
17041
17042         mac_offset = 0x7c;
17043         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17044             tg3_flag(tp, 5780_CLASS)) {
17045                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17046                         mac_offset = 0xcc;
17047                 if (tg3_nvram_lock(tp))
17048                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17049                 else
17050                         tg3_nvram_unlock(tp);
17051         } else if (tg3_flag(tp, 5717_PLUS)) {
17052                 if (tp->pci_fn & 1)
17053                         mac_offset = 0xcc;
17054                 if (tp->pci_fn > 1)
17055                         mac_offset += 0x18c;
17056         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17057                 mac_offset = 0x10;
17058
17059         /* First try to get it from MAC address mailbox. */
17060         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17061         if ((hi >> 16) == 0x484b) {
17062                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17063                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17064
17065                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17066                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17067                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17068                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17069                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17070
17071                 /* Some old bootcode may report a 0 MAC address in SRAM */
17072                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17073         }
17074         if (!addr_ok) {
17075                 /* Next, try NVRAM. */
17076                 if (!tg3_flag(tp, NO_NVRAM) &&
17077                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17078                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17079                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17080                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17081                 }
17082                 /* Finally just fetch it out of the MAC control regs. */
17083                 else {
17084                         hi = tr32(MAC_ADDR_0_HIGH);
17085                         lo = tr32(MAC_ADDR_0_LOW);
17086
17087                         dev->dev_addr[5] = lo & 0xff;
17088                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17089                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17090                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17091                         dev->dev_addr[1] = hi & 0xff;
17092                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17093                 }
17094         }
17095
17096         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17097 #ifdef CONFIG_SPARC
17098                 if (!tg3_get_default_macaddr_sparc(tp))
17099                         return 0;
17100 #endif
17101                 return -EINVAL;
17102         }
17103         return 0;
17104 }
17105
17106 #define BOUNDARY_SINGLE_CACHELINE       1
17107 #define BOUNDARY_MULTI_CACHELINE        2
17108
17109 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17110 {
17111         int cacheline_size;
17112         u8 byte;
17113         int goal;
17114
17115         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17116         if (byte == 0)
17117                 cacheline_size = 1024;
17118         else
17119                 cacheline_size = (int) byte * 4;
17120
17121         /* On 5703 and later chips, the boundary bits have no
17122          * effect.
17123          */
17124         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17125             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17126             !tg3_flag(tp, PCI_EXPRESS))
17127                 goto out;
17128
17129 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17130         goal = BOUNDARY_MULTI_CACHELINE;
17131 #else
17132 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17133         goal = BOUNDARY_SINGLE_CACHELINE;
17134 #else
17135         goal = 0;
17136 #endif
17137 #endif
17138
17139         if (tg3_flag(tp, 57765_PLUS)) {
17140                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17141                 goto out;
17142         }
17143
17144         if (!goal)
17145                 goto out;
17146
17147         /* PCI controllers on most RISC systems tend to disconnect
17148          * when a device tries to burst across a cache-line boundary.
17149          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17150          *
17151          * Unfortunately, for PCI-E there are only limited
17152          * write-side controls for this, and thus for reads
17153          * we will still get the disconnects.  We'll also waste
17154          * these PCI cycles for both read and write for chips
17155          * other than 5700 and 5701 which do not implement the
17156          * boundary bits.
17157          */
17158         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17159                 switch (cacheline_size) {
17160                 case 16:
17161                 case 32:
17162                 case 64:
17163                 case 128:
17164                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17165                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17166                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17167                         } else {
17168                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17169                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17170                         }
17171                         break;
17172
17173                 case 256:
17174                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17175                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17176                         break;
17177
17178                 default:
17179                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17180                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17181                         break;
17182                 }
17183         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17184                 switch (cacheline_size) {
17185                 case 16:
17186                 case 32:
17187                 case 64:
17188                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17190                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17191                                 break;
17192                         }
17193                         /* fallthrough */
17194                 case 128:
17195                 default:
17196                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17197                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17198                         break;
17199                 }
17200         } else {
17201                 switch (cacheline_size) {
17202                 case 16:
17203                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17204                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17205                                         DMA_RWCTRL_WRITE_BNDRY_16);
17206                                 break;
17207                         }
17208                         /* fallthrough */
17209                 case 32:
17210                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17211                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17212                                         DMA_RWCTRL_WRITE_BNDRY_32);
17213                                 break;
17214                         }
17215                         /* fallthrough */
17216                 case 64:
17217                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17218                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17219                                         DMA_RWCTRL_WRITE_BNDRY_64);
17220                                 break;
17221                         }
17222                         /* fallthrough */
17223                 case 128:
17224                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17225                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17226                                         DMA_RWCTRL_WRITE_BNDRY_128);
17227                                 break;
17228                         }
17229                         /* fallthrough */
17230                 case 256:
17231                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17232                                 DMA_RWCTRL_WRITE_BNDRY_256);
17233                         break;
17234                 case 512:
17235                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17236                                 DMA_RWCTRL_WRITE_BNDRY_512);
17237                         break;
17238                 case 1024:
17239                 default:
17240                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17241                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17242                         break;
17243                 }
17244         }
17245
17246 out:
17247         return val;
17248 }
17249
17250 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17251                            int size, bool to_device)
17252 {
17253         struct tg3_internal_buffer_desc test_desc;
17254         u32 sram_dma_descs;
17255         int i, ret;
17256
17257         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17258
17259         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17260         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17261         tw32(RDMAC_STATUS, 0);
17262         tw32(WDMAC_STATUS, 0);
17263
17264         tw32(BUFMGR_MODE, 0);
17265         tw32(FTQ_RESET, 0);
17266
17267         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17268         test_desc.addr_lo = buf_dma & 0xffffffff;
17269         test_desc.nic_mbuf = 0x00002100;
17270         test_desc.len = size;
17271
17272         /*
17273          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17274          * the *second* time the tg3 driver was getting loaded after an
17275          * initial scan.
17276          *
17277          * Broadcom tells me:
17278          *   ...the DMA engine is connected to the GRC block and a DMA
17279          *   reset may affect the GRC block in some unpredictable way...
17280          *   The behavior of resets to individual blocks has not been tested.
17281          *
17282          * Broadcom noted the GRC reset will also reset all sub-components.
17283          */
17284         if (to_device) {
17285                 test_desc.cqid_sqid = (13 << 8) | 2;
17286
17287                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17288                 udelay(40);
17289         } else {
17290                 test_desc.cqid_sqid = (16 << 8) | 7;
17291
17292                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17293                 udelay(40);
17294         }
17295         test_desc.flags = 0x00000005;
17296
17297         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17298                 u32 val;
17299
17300                 val = *(((u32 *)&test_desc) + i);
17301                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17302                                        sram_dma_descs + (i * sizeof(u32)));
17303                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17304         }
17305         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17306
17307         if (to_device)
17308                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17309         else
17310                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17311
17312         ret = -ENODEV;
17313         for (i = 0; i < 40; i++) {
17314                 u32 val;
17315
17316                 if (to_device)
17317                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17318                 else
17319                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17320                 if ((val & 0xffff) == sram_dma_descs) {
17321                         ret = 0;
17322                         break;
17323                 }
17324
17325                 udelay(100);
17326         }
17327
17328         return ret;
17329 }
17330
17331 #define TEST_BUFFER_SIZE        0x2000
17332
17333 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17334         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17335         { },
17336 };
17337
17338 static int tg3_test_dma(struct tg3 *tp)
17339 {
17340         dma_addr_t buf_dma;
17341         u32 *buf, saved_dma_rwctrl;
17342         int ret = 0;
17343
17344         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17345                                  &buf_dma, GFP_KERNEL);
17346         if (!buf) {
17347                 ret = -ENOMEM;
17348                 goto out_nofree;
17349         }
17350
17351         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17352                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17353
17354         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17355
17356         if (tg3_flag(tp, 57765_PLUS))
17357                 goto out;
17358
17359         if (tg3_flag(tp, PCI_EXPRESS)) {
17360                 /* DMA read watermark not used on PCIE */
17361                 tp->dma_rwctrl |= 0x00180000;
17362         } else if (!tg3_flag(tp, PCIX_MODE)) {
17363                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17364                     tg3_asic_rev(tp) == ASIC_REV_5750)
17365                         tp->dma_rwctrl |= 0x003f0000;
17366                 else
17367                         tp->dma_rwctrl |= 0x003f000f;
17368         } else {
17369                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17370                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17371                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17372                         u32 read_water = 0x7;
17373
17374                         /* If the 5704 is behind the EPB bridge, we can
17375                          * do the less restrictive ONE_DMA workaround for
17376                          * better performance.
17377                          */
17378                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17379                             tg3_asic_rev(tp) == ASIC_REV_5704)
17380                                 tp->dma_rwctrl |= 0x8000;
17381                         else if (ccval == 0x6 || ccval == 0x7)
17382                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17383
17384                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17385                                 read_water = 4;
17386                         /* Set bit 23 to enable PCIX hw bug fix */
17387                         tp->dma_rwctrl |=
17388                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17389                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17390                                 (1 << 23);
17391                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17392                         /* 5780 always in PCIX mode */
17393                         tp->dma_rwctrl |= 0x00144000;
17394                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17395                         /* 5714 always in PCIX mode */
17396                         tp->dma_rwctrl |= 0x00148000;
17397                 } else {
17398                         tp->dma_rwctrl |= 0x001b000f;
17399                 }
17400         }
17401         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17402                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17403
17404         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17405             tg3_asic_rev(tp) == ASIC_REV_5704)
17406                 tp->dma_rwctrl &= 0xfffffff0;
17407
17408         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17409             tg3_asic_rev(tp) == ASIC_REV_5701) {
17410                 /* Remove this if it causes problems for some boards. */
17411                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17412
17413                 /* On 5700/5701 chips, we need to set this bit.
17414                  * Otherwise the chip will issue cacheline transactions
17415                  * to streamable DMA memory with not all the byte
17416                  * enables turned on.  This is an error on several
17417                  * RISC PCI controllers, in particular sparc64.
17418                  *
17419                  * On 5703/5704 chips, this bit has been reassigned
17420                  * a different meaning.  In particular, it is used
17421                  * on those chips to enable a PCI-X workaround.
17422                  */
17423                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17424         }
17425
17426         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17427
17428
17429         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17430             tg3_asic_rev(tp) != ASIC_REV_5701)
17431                 goto out;
17432
17433         /* It is best to perform DMA test with maximum write burst size
17434          * to expose the 5700/5701 write DMA bug.
17435          */
17436         saved_dma_rwctrl = tp->dma_rwctrl;
17437         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17438         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17439
17440         while (1) {
17441                 u32 *p = buf, i;
17442
17443                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17444                         p[i] = i;
17445
17446                 /* Send the buffer to the chip. */
17447                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17448                 if (ret) {
17449                         dev_err(&tp->pdev->dev,
17450                                 "%s: Buffer write failed. err = %d\n",
17451                                 __func__, ret);
17452                         break;
17453                 }
17454
17455                 /* Now read it back. */
17456                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17457                 if (ret) {
17458                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17459                                 "err = %d\n", __func__, ret);
17460                         break;
17461                 }
17462
17463                 /* Verify it. */
17464                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17465                         if (p[i] == i)
17466                                 continue;
17467
17468                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17469                             DMA_RWCTRL_WRITE_BNDRY_16) {
17470                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17471                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17472                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17473                                 break;
17474                         } else {
17475                                 dev_err(&tp->pdev->dev,
17476                                         "%s: Buffer corrupted on read back! "
17477                                         "(%d != %d)\n", __func__, p[i], i);
17478                                 ret = -ENODEV;
17479                                 goto out;
17480                         }
17481                 }
17482
17483                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17484                         /* Success. */
17485                         ret = 0;
17486                         break;
17487                 }
17488         }
17489         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17490             DMA_RWCTRL_WRITE_BNDRY_16) {
17491                 /* DMA test passed without adjusting DMA boundary,
17492                  * now look for chipsets that are known to expose the
17493                  * DMA bug without failing the test.
17494                  */
17495                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17496                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17497                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17498                 } else {
17499                         /* Safe to use the calculated DMA boundary. */
17500                         tp->dma_rwctrl = saved_dma_rwctrl;
17501                 }
17502
17503                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17504         }
17505
17506 out:
17507         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17508 out_nofree:
17509         return ret;
17510 }
17511
17512 static void tg3_init_bufmgr_config(struct tg3 *tp)
17513 {
17514         if (tg3_flag(tp, 57765_PLUS)) {
17515                 tp->bufmgr_config.mbuf_read_dma_low_water =
17516                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17517                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17518                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17519                 tp->bufmgr_config.mbuf_high_water =
17520                         DEFAULT_MB_HIGH_WATER_57765;
17521
17522                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17523                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17524                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17525                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17526                 tp->bufmgr_config.mbuf_high_water_jumbo =
17527                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17528         } else if (tg3_flag(tp, 5705_PLUS)) {
17529                 tp->bufmgr_config.mbuf_read_dma_low_water =
17530                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17531                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17532                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17533                 tp->bufmgr_config.mbuf_high_water =
17534                         DEFAULT_MB_HIGH_WATER_5705;
17535                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17536                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17537                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17538                         tp->bufmgr_config.mbuf_high_water =
17539                                 DEFAULT_MB_HIGH_WATER_5906;
17540                 }
17541
17542                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17543                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17544                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17545                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17546                 tp->bufmgr_config.mbuf_high_water_jumbo =
17547                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17548         } else {
17549                 tp->bufmgr_config.mbuf_read_dma_low_water =
17550                         DEFAULT_MB_RDMA_LOW_WATER;
17551                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17552                         DEFAULT_MB_MACRX_LOW_WATER;
17553                 tp->bufmgr_config.mbuf_high_water =
17554                         DEFAULT_MB_HIGH_WATER;
17555
17556                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17557                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17558                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17559                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17560                 tp->bufmgr_config.mbuf_high_water_jumbo =
17561                         DEFAULT_MB_HIGH_WATER_JUMBO;
17562         }
17563
17564         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17565         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17566 }
17567
17568 static char *tg3_phy_string(struct tg3 *tp)
17569 {
17570         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17571         case TG3_PHY_ID_BCM5400:        return "5400";
17572         case TG3_PHY_ID_BCM5401:        return "5401";
17573         case TG3_PHY_ID_BCM5411:        return "5411";
17574         case TG3_PHY_ID_BCM5701:        return "5701";
17575         case TG3_PHY_ID_BCM5703:        return "5703";
17576         case TG3_PHY_ID_BCM5704:        return "5704";
17577         case TG3_PHY_ID_BCM5705:        return "5705";
17578         case TG3_PHY_ID_BCM5750:        return "5750";
17579         case TG3_PHY_ID_BCM5752:        return "5752";
17580         case TG3_PHY_ID_BCM5714:        return "5714";
17581         case TG3_PHY_ID_BCM5780:        return "5780";
17582         case TG3_PHY_ID_BCM5755:        return "5755";
17583         case TG3_PHY_ID_BCM5787:        return "5787";
17584         case TG3_PHY_ID_BCM5784:        return "5784";
17585         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17586         case TG3_PHY_ID_BCM5906:        return "5906";
17587         case TG3_PHY_ID_BCM5761:        return "5761";
17588         case TG3_PHY_ID_BCM5718C:       return "5718C";
17589         case TG3_PHY_ID_BCM5718S:       return "5718S";
17590         case TG3_PHY_ID_BCM57765:       return "57765";
17591         case TG3_PHY_ID_BCM5719C:       return "5719C";
17592         case TG3_PHY_ID_BCM5720C:       return "5720C";
17593         case TG3_PHY_ID_BCM5762:        return "5762C";
17594         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17595         case 0:                 return "serdes";
17596         default:                return "unknown";
17597         }
17598 }
17599
17600 static char *tg3_bus_string(struct tg3 *tp, char *str)
17601 {
17602         if (tg3_flag(tp, PCI_EXPRESS)) {
17603                 strcpy(str, "PCI Express");
17604                 return str;
17605         } else if (tg3_flag(tp, PCIX_MODE)) {
17606                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17607
17608                 strcpy(str, "PCIX:");
17609
17610                 if ((clock_ctrl == 7) ||
17611                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17612                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17613                         strcat(str, "133MHz");
17614                 else if (clock_ctrl == 0)
17615                         strcat(str, "33MHz");
17616                 else if (clock_ctrl == 2)
17617                         strcat(str, "50MHz");
17618                 else if (clock_ctrl == 4)
17619                         strcat(str, "66MHz");
17620                 else if (clock_ctrl == 6)
17621                         strcat(str, "100MHz");
17622         } else {
17623                 strcpy(str, "PCI:");
17624                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17625                         strcat(str, "66MHz");
17626                 else
17627                         strcat(str, "33MHz");
17628         }
17629         if (tg3_flag(tp, PCI_32BIT))
17630                 strcat(str, ":32-bit");
17631         else
17632                 strcat(str, ":64-bit");
17633         return str;
17634 }
17635
17636 static void tg3_init_coal(struct tg3 *tp)
17637 {
17638         struct ethtool_coalesce *ec = &tp->coal;
17639
17640         memset(ec, 0, sizeof(*ec));
17641         ec->cmd = ETHTOOL_GCOALESCE;
17642         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17643         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17644         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17645         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17646         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17647         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17648         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17649         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17650         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17651
17652         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17653                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17654                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17655                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17656                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17657                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17658         }
17659
17660         if (tg3_flag(tp, 5705_PLUS)) {
17661                 ec->rx_coalesce_usecs_irq = 0;
17662                 ec->tx_coalesce_usecs_irq = 0;
17663                 ec->stats_block_coalesce_usecs = 0;
17664         }
17665 }
17666
17667 static int tg3_init_one(struct pci_dev *pdev,
17668                                   const struct pci_device_id *ent)
17669 {
17670         struct net_device *dev;
17671         struct tg3 *tp;
17672         int i, err;
17673         u32 sndmbx, rcvmbx, intmbx;
17674         char str[40];
17675         u64 dma_mask, persist_dma_mask;
17676         netdev_features_t features = 0;
17677
17678         printk_once(KERN_INFO "%s\n", version);
17679
17680         err = pci_enable_device(pdev);
17681         if (err) {
17682                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17683                 return err;
17684         }
17685
17686         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17687         if (err) {
17688                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17689                 goto err_out_disable_pdev;
17690         }
17691
17692         pci_set_master(pdev);
17693
17694         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17695         if (!dev) {
17696                 err = -ENOMEM;
17697                 goto err_out_free_res;
17698         }
17699
17700         SET_NETDEV_DEV(dev, &pdev->dev);
17701
17702         tp = netdev_priv(dev);
17703         tp->pdev = pdev;
17704         tp->dev = dev;
17705         tp->rx_mode = TG3_DEF_RX_MODE;
17706         tp->tx_mode = TG3_DEF_TX_MODE;
17707         tp->irq_sync = 1;
17708         tp->pcierr_recovery = false;
17709
17710         if (tg3_debug > 0)
17711                 tp->msg_enable = tg3_debug;
17712         else
17713                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17714
17715         if (pdev_is_ssb_gige_core(pdev)) {
17716                 tg3_flag_set(tp, IS_SSB_CORE);
17717                 if (ssb_gige_must_flush_posted_writes(pdev))
17718                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17719                 if (ssb_gige_one_dma_at_once(pdev))
17720                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17721                 if (ssb_gige_have_roboswitch(pdev)) {
17722                         tg3_flag_set(tp, USE_PHYLIB);
17723                         tg3_flag_set(tp, ROBOSWITCH);
17724                 }
17725                 if (ssb_gige_is_rgmii(pdev))
17726                         tg3_flag_set(tp, RGMII_MODE);
17727         }
17728
17729         /* The word/byte swap controls here control register access byte
17730          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17731          * setting below.
17732          */
17733         tp->misc_host_ctrl =
17734                 MISC_HOST_CTRL_MASK_PCI_INT |
17735                 MISC_HOST_CTRL_WORD_SWAP |
17736                 MISC_HOST_CTRL_INDIR_ACCESS |
17737                 MISC_HOST_CTRL_PCISTATE_RW;
17738
17739         /* The NONFRM (non-frame) byte/word swap controls take effect
17740          * on descriptor entries, anything which isn't packet data.
17741          *
17742          * The StrongARM chips on the board (one for tx, one for rx)
17743          * are running in big-endian mode.
17744          */
17745         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17746                         GRC_MODE_WSWAP_NONFRM_DATA);
17747 #ifdef __BIG_ENDIAN
17748         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17749 #endif
17750         spin_lock_init(&tp->lock);
17751         spin_lock_init(&tp->indirect_lock);
17752         INIT_WORK(&tp->reset_task, tg3_reset_task);
17753
17754         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17755         if (!tp->regs) {
17756                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17757                 err = -ENOMEM;
17758                 goto err_out_free_dev;
17759         }
17760
17761         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17762             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17763             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17764             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17765             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17766             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17767             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17768             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17769             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17770             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17771             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17772             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17773             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17774             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17775             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17776                 tg3_flag_set(tp, ENABLE_APE);
17777                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17778                 if (!tp->aperegs) {
17779                         dev_err(&pdev->dev,
17780                                 "Cannot map APE registers, aborting\n");
17781                         err = -ENOMEM;
17782                         goto err_out_iounmap;
17783                 }
17784         }
17785
17786         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17787         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17788
17789         dev->ethtool_ops = &tg3_ethtool_ops;
17790         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17791         dev->netdev_ops = &tg3_netdev_ops;
17792         dev->irq = pdev->irq;
17793
17794         err = tg3_get_invariants(tp, ent);
17795         if (err) {
17796                 dev_err(&pdev->dev,
17797                         "Problem fetching invariants of chip, aborting\n");
17798                 goto err_out_apeunmap;
17799         }
17800
17801         /* The EPB bridge inside 5714, 5715, and 5780 and any
17802          * device behind the EPB cannot support DMA addresses > 40-bit.
17803          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17804          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17805          * do DMA address check in tg3_start_xmit().
17806          */
17807         if (tg3_flag(tp, IS_5788))
17808                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17809         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17810                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17811 #ifdef CONFIG_HIGHMEM
17812                 dma_mask = DMA_BIT_MASK(64);
17813 #endif
17814         } else
17815                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17816
17817         /* Configure DMA attributes. */
17818         if (dma_mask > DMA_BIT_MASK(32)) {
17819                 err = pci_set_dma_mask(pdev, dma_mask);
17820                 if (!err) {
17821                         features |= NETIF_F_HIGHDMA;
17822                         err = pci_set_consistent_dma_mask(pdev,
17823                                                           persist_dma_mask);
17824                         if (err < 0) {
17825                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17826                                         "DMA for consistent allocations\n");
17827                                 goto err_out_apeunmap;
17828                         }
17829                 }
17830         }
17831         if (err || dma_mask == DMA_BIT_MASK(32)) {
17832                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17833                 if (err) {
17834                         dev_err(&pdev->dev,
17835                                 "No usable DMA configuration, aborting\n");
17836                         goto err_out_apeunmap;
17837                 }
17838         }
17839
17840         tg3_init_bufmgr_config(tp);
17841
17842         /* 5700 B0 chips do not support checksumming correctly due
17843          * to hardware bugs.
17844          */
17845         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17846                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17847
17848                 if (tg3_flag(tp, 5755_PLUS))
17849                         features |= NETIF_F_IPV6_CSUM;
17850         }
17851
17852         /* TSO is on by default on chips that support hardware TSO.
17853          * Firmware TSO on older chips gives lower performance, so it
17854          * is off by default, but can be enabled using ethtool.
17855          */
17856         if ((tg3_flag(tp, HW_TSO_1) ||
17857              tg3_flag(tp, HW_TSO_2) ||
17858              tg3_flag(tp, HW_TSO_3)) &&
17859             (features & NETIF_F_IP_CSUM))
17860                 features |= NETIF_F_TSO;
17861         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17862                 if (features & NETIF_F_IPV6_CSUM)
17863                         features |= NETIF_F_TSO6;
17864                 if (tg3_flag(tp, HW_TSO_3) ||
17865                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17866                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17867                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17868                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17869                     tg3_asic_rev(tp) == ASIC_REV_57780)
17870                         features |= NETIF_F_TSO_ECN;
17871         }
17872
17873         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17874                          NETIF_F_HW_VLAN_CTAG_RX;
17875         dev->vlan_features |= features;
17876
17877         /*
17878          * Add loopback capability only for a subset of devices that support
17879          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17880          * loopback for the remaining devices.
17881          */
17882         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17883             !tg3_flag(tp, CPMU_PRESENT))
17884                 /* Add the loopback capability */
17885                 features |= NETIF_F_LOOPBACK;
17886
17887         dev->hw_features |= features;
17888         dev->priv_flags |= IFF_UNICAST_FLT;
17889
17890         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17891         dev->min_mtu = TG3_MIN_MTU;
17892         dev->max_mtu = TG3_MAX_MTU(tp);
17893
17894         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17895             !tg3_flag(tp, TSO_CAPABLE) &&
17896             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17897                 tg3_flag_set(tp, MAX_RXPEND_64);
17898                 tp->rx_pending = 63;
17899         }
17900
17901         err = tg3_get_device_address(tp);
17902         if (err) {
17903                 dev_err(&pdev->dev,
17904                         "Could not obtain valid ethernet address, aborting\n");
17905                 goto err_out_apeunmap;
17906         }
17907
17908         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17909         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17910         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17911         for (i = 0; i < tp->irq_max; i++) {
17912                 struct tg3_napi *tnapi = &tp->napi[i];
17913
17914                 tnapi->tp = tp;
17915                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17916
17917                 tnapi->int_mbox = intmbx;
17918                 if (i <= 4)
17919                         intmbx += 0x8;
17920                 else
17921                         intmbx += 0x4;
17922
17923                 tnapi->consmbox = rcvmbx;
17924                 tnapi->prodmbox = sndmbx;
17925
17926                 if (i)
17927                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17928                 else
17929                         tnapi->coal_now = HOSTCC_MODE_NOW;
17930
17931                 if (!tg3_flag(tp, SUPPORT_MSIX))
17932                         break;
17933
17934                 /*
17935                  * If we support MSIX, we'll be using RSS.  If we're using
17936                  * RSS, the first vector only handles link interrupts and the
17937                  * remaining vectors handle rx and tx interrupts.  Reuse the
17938                  * mailbox values for the next iteration.  The values we setup
17939                  * above are still useful for the single vectored mode.
17940                  */
17941                 if (!i)
17942                         continue;
17943
17944                 rcvmbx += 0x8;
17945
17946                 if (sndmbx & 0x4)
17947                         sndmbx -= 0x4;
17948                 else
17949                         sndmbx += 0xc;
17950         }
17951
17952         /*
17953          * Reset chip in case UNDI or EFI driver did not shutdown
17954          * DMA self test will enable WDMAC and we'll see (spurious)
17955          * pending DMA on the PCI bus at that point.
17956          */
17957         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17958             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17959                 tg3_full_lock(tp, 0);
17960                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17961                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17962                 tg3_full_unlock(tp);
17963         }
17964
17965         err = tg3_test_dma(tp);
17966         if (err) {
17967                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17968                 goto err_out_apeunmap;
17969         }
17970
17971         tg3_init_coal(tp);
17972
17973         pci_set_drvdata(pdev, dev);
17974
17975         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17976             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17977             tg3_asic_rev(tp) == ASIC_REV_5762)
17978                 tg3_flag_set(tp, PTP_CAPABLE);
17979
17980         tg3_timer_init(tp);
17981
17982         tg3_carrier_off(tp);
17983
17984         err = register_netdev(dev);
17985         if (err) {
17986                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17987                 goto err_out_apeunmap;
17988         }
17989
17990         if (tg3_flag(tp, PTP_CAPABLE)) {
17991                 tg3_ptp_init(tp);
17992                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17993                                                    &tp->pdev->dev);
17994                 if (IS_ERR(tp->ptp_clock))
17995                         tp->ptp_clock = NULL;
17996         }
17997
17998         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17999                     tp->board_part_number,
18000                     tg3_chip_rev_id(tp),
18001                     tg3_bus_string(tp, str),
18002                     dev->dev_addr);
18003
18004         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18005                 char *ethtype;
18006
18007                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18008                         ethtype = "10/100Base-TX";
18009                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18010                         ethtype = "1000Base-SX";
18011                 else
18012                         ethtype = "10/100/1000Base-T";
18013
18014                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18015                             "(WireSpeed[%d], EEE[%d])\n",
18016                             tg3_phy_string(tp), ethtype,
18017                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18018                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18019         }
18020
18021         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18022                     (dev->features & NETIF_F_RXCSUM) != 0,
18023                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
18024                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18025                     tg3_flag(tp, ENABLE_ASF) != 0,
18026                     tg3_flag(tp, TSO_CAPABLE) != 0);
18027         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18028                     tp->dma_rwctrl,
18029                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18030                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18031
18032         pci_save_state(pdev);
18033
18034         return 0;
18035
18036 err_out_apeunmap:
18037         if (tp->aperegs) {
18038                 iounmap(tp->aperegs);
18039                 tp->aperegs = NULL;
18040         }
18041
18042 err_out_iounmap:
18043         if (tp->regs) {
18044                 iounmap(tp->regs);
18045                 tp->regs = NULL;
18046         }
18047
18048 err_out_free_dev:
18049         free_netdev(dev);
18050
18051 err_out_free_res:
18052         pci_release_regions(pdev);
18053
18054 err_out_disable_pdev:
18055         if (pci_is_enabled(pdev))
18056                 pci_disable_device(pdev);
18057         return err;
18058 }
18059
18060 static void tg3_remove_one(struct pci_dev *pdev)
18061 {
18062         struct net_device *dev = pci_get_drvdata(pdev);
18063
18064         if (dev) {
18065                 struct tg3 *tp = netdev_priv(dev);
18066
18067                 tg3_ptp_fini(tp);
18068
18069                 release_firmware(tp->fw);
18070
18071                 tg3_reset_task_cancel(tp);
18072
18073                 if (tg3_flag(tp, USE_PHYLIB)) {
18074                         tg3_phy_fini(tp);
18075                         tg3_mdio_fini(tp);
18076                 }
18077
18078                 unregister_netdev(dev);
18079                 if (tp->aperegs) {
18080                         iounmap(tp->aperegs);
18081                         tp->aperegs = NULL;
18082                 }
18083                 if (tp->regs) {
18084                         iounmap(tp->regs);
18085                         tp->regs = NULL;
18086                 }
18087                 free_netdev(dev);
18088                 pci_release_regions(pdev);
18089                 pci_disable_device(pdev);
18090         }
18091 }
18092
18093 #ifdef CONFIG_PM_SLEEP
18094 static int tg3_suspend(struct device *device)
18095 {
18096         struct pci_dev *pdev = to_pci_dev(device);
18097         struct net_device *dev = pci_get_drvdata(pdev);
18098         struct tg3 *tp = netdev_priv(dev);
18099         int err = 0;
18100
18101         rtnl_lock();
18102
18103         if (!netif_running(dev))
18104                 goto unlock;
18105
18106         tg3_reset_task_cancel(tp);
18107         tg3_phy_stop(tp);
18108         tg3_netif_stop(tp);
18109
18110         tg3_timer_stop(tp);
18111
18112         tg3_full_lock(tp, 1);
18113         tg3_disable_ints(tp);
18114         tg3_full_unlock(tp);
18115
18116         netif_device_detach(dev);
18117
18118         tg3_full_lock(tp, 0);
18119         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18120         tg3_flag_clear(tp, INIT_COMPLETE);
18121         tg3_full_unlock(tp);
18122
18123         err = tg3_power_down_prepare(tp);
18124         if (err) {
18125                 int err2;
18126
18127                 tg3_full_lock(tp, 0);
18128
18129                 tg3_flag_set(tp, INIT_COMPLETE);
18130                 err2 = tg3_restart_hw(tp, true);
18131                 if (err2)
18132                         goto out;
18133
18134                 tg3_timer_start(tp);
18135
18136                 netif_device_attach(dev);
18137                 tg3_netif_start(tp);
18138
18139 out:
18140                 tg3_full_unlock(tp);
18141
18142                 if (!err2)
18143                         tg3_phy_start(tp);
18144         }
18145
18146 unlock:
18147         rtnl_unlock();
18148         return err;
18149 }
18150
18151 static int tg3_resume(struct device *device)
18152 {
18153         struct pci_dev *pdev = to_pci_dev(device);
18154         struct net_device *dev = pci_get_drvdata(pdev);
18155         struct tg3 *tp = netdev_priv(dev);
18156         int err = 0;
18157
18158         rtnl_lock();
18159
18160         if (!netif_running(dev))
18161                 goto unlock;
18162
18163         netif_device_attach(dev);
18164
18165         tg3_full_lock(tp, 0);
18166
18167         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18168
18169         tg3_flag_set(tp, INIT_COMPLETE);
18170         err = tg3_restart_hw(tp,
18171                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18172         if (err)
18173                 goto out;
18174
18175         tg3_timer_start(tp);
18176
18177         tg3_netif_start(tp);
18178
18179 out:
18180         tg3_full_unlock(tp);
18181
18182         if (!err)
18183                 tg3_phy_start(tp);
18184
18185 unlock:
18186         rtnl_unlock();
18187         return err;
18188 }
18189 #endif /* CONFIG_PM_SLEEP */
18190
18191 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18192
18193 static void tg3_shutdown(struct pci_dev *pdev)
18194 {
18195         struct net_device *dev = pci_get_drvdata(pdev);
18196         struct tg3 *tp = netdev_priv(dev);
18197
18198         tg3_reset_task_cancel(tp);
18199
18200         rtnl_lock();
18201
18202         netif_device_detach(dev);
18203
18204         if (netif_running(dev))
18205                 dev_close(dev);
18206
18207         tg3_power_down(tp);
18208
18209         rtnl_unlock();
18210
18211         pci_disable_device(pdev);
18212 }
18213
18214 /**
18215  * tg3_io_error_detected - called when PCI error is detected
18216  * @pdev: Pointer to PCI device
18217  * @state: The current pci connection state
18218  *
18219  * This function is called after a PCI bus error affecting
18220  * this device has been detected.
18221  */
18222 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18223                                               pci_channel_state_t state)
18224 {
18225         struct net_device *netdev = pci_get_drvdata(pdev);
18226         struct tg3 *tp = netdev_priv(netdev);
18227         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18228
18229         netdev_info(netdev, "PCI I/O error detected\n");
18230
18231         /* Want to make sure that the reset task doesn't run */
18232         tg3_reset_task_cancel(tp);
18233
18234         rtnl_lock();
18235
18236         /* Could be second call or maybe we don't have netdev yet */
18237         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18238                 goto done;
18239
18240         /* We needn't recover from permanent error */
18241         if (state == pci_channel_io_frozen)
18242                 tp->pcierr_recovery = true;
18243
18244         tg3_phy_stop(tp);
18245
18246         tg3_netif_stop(tp);
18247
18248         tg3_timer_stop(tp);
18249
18250         netif_device_detach(netdev);
18251
18252         /* Clean up software state, even if MMIO is blocked */
18253         tg3_full_lock(tp, 0);
18254         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18255         tg3_full_unlock(tp);
18256
18257 done:
18258         if (state == pci_channel_io_perm_failure) {
18259                 if (netdev) {
18260                         tg3_napi_enable(tp);
18261                         dev_close(netdev);
18262                 }
18263                 err = PCI_ERS_RESULT_DISCONNECT;
18264         } else {
18265                 pci_disable_device(pdev);
18266         }
18267
18268         rtnl_unlock();
18269
18270         return err;
18271 }
18272
18273 /**
18274  * tg3_io_slot_reset - called after the pci bus has been reset.
18275  * @pdev: Pointer to PCI device
18276  *
18277  * Restart the card from scratch, as if from a cold-boot.
18278  * At this point, the card has exprienced a hard reset,
18279  * followed by fixups by BIOS, and has its config space
18280  * set up identically to what it was at cold boot.
18281  */
18282 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18283 {
18284         struct net_device *netdev = pci_get_drvdata(pdev);
18285         struct tg3 *tp = netdev_priv(netdev);
18286         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18287         int err;
18288
18289         rtnl_lock();
18290
18291         if (pci_enable_device(pdev)) {
18292                 dev_err(&pdev->dev,
18293                         "Cannot re-enable PCI device after reset.\n");
18294                 goto done;
18295         }
18296
18297         pci_set_master(pdev);
18298         pci_restore_state(pdev);
18299         pci_save_state(pdev);
18300
18301         if (!netdev || !netif_running(netdev)) {
18302                 rc = PCI_ERS_RESULT_RECOVERED;
18303                 goto done;
18304         }
18305
18306         err = tg3_power_up(tp);
18307         if (err)
18308                 goto done;
18309
18310         rc = PCI_ERS_RESULT_RECOVERED;
18311
18312 done:
18313         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18314                 tg3_napi_enable(tp);
18315                 dev_close(netdev);
18316         }
18317         rtnl_unlock();
18318
18319         return rc;
18320 }
18321
18322 /**
18323  * tg3_io_resume - called when traffic can start flowing again.
18324  * @pdev: Pointer to PCI device
18325  *
18326  * This callback is called when the error recovery driver tells
18327  * us that its OK to resume normal operation.
18328  */
18329 static void tg3_io_resume(struct pci_dev *pdev)
18330 {
18331         struct net_device *netdev = pci_get_drvdata(pdev);
18332         struct tg3 *tp = netdev_priv(netdev);
18333         int err;
18334
18335         rtnl_lock();
18336
18337         if (!netdev || !netif_running(netdev))
18338                 goto done;
18339
18340         tg3_full_lock(tp, 0);
18341         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18342         tg3_flag_set(tp, INIT_COMPLETE);
18343         err = tg3_restart_hw(tp, true);
18344         if (err) {
18345                 tg3_full_unlock(tp);
18346                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18347                 goto done;
18348         }
18349
18350         netif_device_attach(netdev);
18351
18352         tg3_timer_start(tp);
18353
18354         tg3_netif_start(tp);
18355
18356         tg3_full_unlock(tp);
18357
18358         tg3_phy_start(tp);
18359
18360 done:
18361         tp->pcierr_recovery = false;
18362         rtnl_unlock();
18363 }
18364
18365 static const struct pci_error_handlers tg3_err_handler = {
18366         .error_detected = tg3_io_error_detected,
18367         .slot_reset     = tg3_io_slot_reset,
18368         .resume         = tg3_io_resume
18369 };
18370
18371 static struct pci_driver tg3_driver = {
18372         .name           = DRV_MODULE_NAME,
18373         .id_table       = tg3_pci_tbl,
18374         .probe          = tg3_init_one,
18375         .remove         = tg3_remove_one,
18376         .err_handler    = &tg3_err_handler,
18377         .driver.pm      = &tg3_pm_ops,
18378         .shutdown       = tg3_shutdown,
18379 };
18380
18381 module_pci_driver(tg3_driver);