GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / net / ethernet / ibm / emac / core.c
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/slab.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/dma.h>
50 #include <asm/uaccess.h>
51 #include <asm/dcr.h>
52 #include <asm/dcr-regs.h>
53
54 #include "core.h"
55
56 /*
57  * Lack of dma_unmap_???? calls is intentional.
58  *
59  * API-correct usage requires additional support state information to be
60  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
61  * EMAC design (e.g. TX buffer passed from network stack can be split into
62  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
63  * maintaining such information will add additional overhead.
64  * Current DMA API implementation for 4xx processors only ensures cache coherency
65  * and dma_unmap_???? routines are empty and are likely to stay this way.
66  * I decided to omit dma_unmap_??? calls because I don't want to add additional
67  * complexity just for the sake of following some abstract API, when it doesn't
68  * add any real benefit to the driver. I understand that this decision maybe
69  * controversial, but I really tried to make code API-correct and efficient
70  * at the same time and didn't come up with code I liked :(.                --ebs
71  */
72
73 #define DRV_NAME        "emac"
74 #define DRV_VERSION     "3.54"
75 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
76
77 MODULE_DESCRIPTION(DRV_DESC);
78 MODULE_AUTHOR
79     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
80 MODULE_LICENSE("GPL");
81
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
84
85 /* If packet size is less than this number, we allocate small skb and copy packet
86  * contents into it instead of just sending original big skb up
87  */
88 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
89
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91  * to avoid re-using the same PHY ID in cases where the arch didn't
92  * setup precise phy_map entries
93  *
94  * XXX This is something that needs to be reworked as we can have multiple
95  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96  * probably require in that case to have explicit PHY IDs in the device-tree
97  */
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
100
101 /* This is the wait queue used to wait on any event related to probe, that
102  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106 /* Having stable interface names is a doomed idea. However, it would be nice
107  * if we didn't have completely random interface names at boot too :-) It's
108  * just a matter of making everybody's life easier. Since we are doing
109  * threaded probing, it's a bit harder though. The base idea here is that
110  * we make up a list of all emacs in the device-tree before we register the
111  * driver. Every emac will then wait for the previous one in the list to
112  * initialize before itself. We should also keep that list ordered by
113  * cell_index.
114  * That list is only 4 entries long, meaning that additional EMACs don't
115  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116  */
117
118 #define EMAC_BOOT_LIST_SIZE     4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
123
124 /* I don't want to litter system log with timeout errors
125  * when we have brain-damaged PHY.
126  */
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128                                              const char *error)
129 {
130         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
131                                   EMAC_FTR_460EX_PHY_CLK_FIX |
132                                   EMAC_FTR_440EP_PHY_CLK_FIX))
133                 DBG(dev, "%s" NL, error);
134         else if (net_ratelimit())
135                 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
136                         error);
137 }
138
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147                 dcri_clrset(SDR0, SDR0_MFR,
148                             0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156                 dcri_clrset(SDR0, SDR0_MFR,
157                             SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON        HZ
163 #define PHY_POLL_LINK_OFF       (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10         1230
169 #define STOP_TIMEOUT_100        124
170 #define STOP_TIMEOUT_1000       13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190         "tx_bd_excessive_collisions", "tx_bd_late_collision",
191         "tx_bd_multple_collisions", "tx_bd_single_collision",
192         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193         "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202         return  phy_mode == PHY_MODE_GMII ||
203                 phy_mode == PHY_MODE_RGMII ||
204                 phy_mode == PHY_MODE_SGMII ||
205                 phy_mode == PHY_MODE_TBI ||
206                 phy_mode == PHY_MODE_RTBI;
207 }
208
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211         return  phy_mode == PHY_MODE_SGMII ||
212                 phy_mode == PHY_MODE_TBI ||
213                 phy_mode == PHY_MODE_RTBI;
214 }
215
216 static inline void emac_tx_enable(struct emac_instance *dev)
217 {
218         struct emac_regs __iomem *p = dev->emacp;
219         u32 r;
220
221         DBG(dev, "tx_enable" NL);
222
223         r = in_be32(&p->mr0);
224         if (!(r & EMAC_MR0_TXE))
225                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 }
227
228 static void emac_tx_disable(struct emac_instance *dev)
229 {
230         struct emac_regs __iomem *p = dev->emacp;
231         u32 r;
232
233         DBG(dev, "tx_disable" NL);
234
235         r = in_be32(&p->mr0);
236         if (r & EMAC_MR0_TXE) {
237                 int n = dev->stop_timeout;
238                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
240                         udelay(1);
241                         --n;
242                 }
243                 if (unlikely(!n))
244                         emac_report_timeout_error(dev, "TX disable timeout");
245         }
246 }
247
248 static void emac_rx_enable(struct emac_instance *dev)
249 {
250         struct emac_regs __iomem *p = dev->emacp;
251         u32 r;
252
253         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254                 goto out;
255
256         DBG(dev, "rx_enable" NL);
257
258         r = in_be32(&p->mr0);
259         if (!(r & EMAC_MR0_RXE)) {
260                 if (unlikely(!(r & EMAC_MR0_RXI))) {
261                         /* Wait if previous async disable is still in progress */
262                         int n = dev->stop_timeout;
263                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264                                 udelay(1);
265                                 --n;
266                         }
267                         if (unlikely(!n))
268                                 emac_report_timeout_error(dev,
269                                                           "RX disable timeout");
270                 }
271                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
272         }
273  out:
274         ;
275 }
276
277 static void emac_rx_disable(struct emac_instance *dev)
278 {
279         struct emac_regs __iomem *p = dev->emacp;
280         u32 r;
281
282         DBG(dev, "rx_disable" NL);
283
284         r = in_be32(&p->mr0);
285         if (r & EMAC_MR0_RXE) {
286                 int n = dev->stop_timeout;
287                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289                         udelay(1);
290                         --n;
291                 }
292                 if (unlikely(!n))
293                         emac_report_timeout_error(dev, "RX disable timeout");
294         }
295 }
296
297 static inline void emac_netif_stop(struct emac_instance *dev)
298 {
299         netif_tx_lock_bh(dev->ndev);
300         netif_addr_lock(dev->ndev);
301         dev->no_mcast = 1;
302         netif_addr_unlock(dev->ndev);
303         netif_tx_unlock_bh(dev->ndev);
304         netif_trans_update(dev->ndev);  /* prevent tx timeout */
305         mal_poll_disable(dev->mal, &dev->commac);
306         netif_tx_disable(dev->ndev);
307 }
308
309 static inline void emac_netif_start(struct emac_instance *dev)
310 {
311         netif_tx_lock_bh(dev->ndev);
312         netif_addr_lock(dev->ndev);
313         dev->no_mcast = 0;
314         if (dev->mcast_pending && netif_running(dev->ndev))
315                 __emac_set_multicast_list(dev);
316         netif_addr_unlock(dev->ndev);
317         netif_tx_unlock_bh(dev->ndev);
318
319         netif_wake_queue(dev->ndev);
320
321         /* NOTE: unconditional netif_wake_queue is only appropriate
322          * so long as all callers are assured to have free tx slots
323          * (taken from tg3... though the case where that is wrong is
324          *  not terribly harmful)
325          */
326         mal_poll_enable(dev->mal, &dev->commac);
327 }
328
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 {
331         struct emac_regs __iomem *p = dev->emacp;
332         u32 r;
333
334         DBG(dev, "rx_disable_async" NL);
335
336         r = in_be32(&p->mr0);
337         if (r & EMAC_MR0_RXE)
338                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 }
340
341 static int emac_reset(struct emac_instance *dev)
342 {
343         struct emac_regs __iomem *p = dev->emacp;
344         int n = 20;
345         bool __maybe_unused try_internal_clock = false;
346
347         DBG(dev, "reset" NL);
348
349         if (!dev->reset_failed) {
350                 /* 40x erratum suggests stopping RX channel before reset,
351                  * we stop TX as well
352                  */
353                 emac_rx_disable(dev);
354                 emac_tx_disable(dev);
355         }
356
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 do_retry:
359         /*
360          * PPC460EX/GT Embedded Processor Advanced User's Manual
361          * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362          * Note: The PHY must provide a TX Clk in order to perform a soft reset
363          * of the EMAC. If none is present, select the internal clock
364          * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365          * After a soft reset, select the external clock.
366          *
367          * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368          * ethernet cable is not attached. This causes the reset to timeout
369          * and the PHY detection code in emac_init_phy() is unable to
370          * communicate and detect the AR8035-A PHY. As a result, the emac
371          * driver bails out early and the user has no ethernet.
372          * In order to stay compatible with existing configurations, the
373          * driver will temporarily switch to the internal clock, after
374          * the first reset fails.
375          */
376         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377                 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378                                            dev->phy_map == 0xffffffff)) {
379                         /* No PHY: select internal loop clock before reset */
380                         dcri_clrset(SDR0, SDR0_ETH_CFG,
381                                     0, SDR0_ETH_CFG_ECS << dev->cell_index);
382                 } else {
383                         /* PHY present: select external clock before reset */
384                         dcri_clrset(SDR0, SDR0_ETH_CFG,
385                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
386                 }
387         }
388 #endif
389
390         out_be32(&p->mr0, EMAC_MR0_SRST);
391         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
392                 --n;
393
394 #ifdef CONFIG_PPC_DCR_NATIVE
395         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396                 if (!n && !try_internal_clock) {
397                         /* first attempt has timed out. */
398                         n = 20;
399                         try_internal_clock = true;
400                         goto do_retry;
401                 }
402
403                 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404                                            dev->phy_map == 0xffffffff)) {
405                         /* No PHY: restore external clock source after reset */
406                         dcri_clrset(SDR0, SDR0_ETH_CFG,
407                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
408                 }
409         }
410 #endif
411
412         if (n) {
413                 dev->reset_failed = 0;
414                 return 0;
415         } else {
416                 emac_report_timeout_error(dev, "reset timeout");
417                 dev->reset_failed = 1;
418                 return -ETIMEDOUT;
419         }
420 }
421
422 static void emac_hash_mc(struct emac_instance *dev)
423 {
424         const int regs = EMAC_XAHT_REGS(dev);
425         u32 *gaht_base = emac_gaht_base(dev);
426         u32 gaht_temp[regs];
427         struct netdev_hw_addr *ha;
428         int i;
429
430         DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
431
432         memset(gaht_temp, 0, sizeof (gaht_temp));
433
434         netdev_for_each_mc_addr(ha, dev->ndev) {
435                 int slot, reg, mask;
436                 DBG2(dev, "mc %pM" NL, ha->addr);
437
438                 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439                                              ether_crc(ETH_ALEN, ha->addr));
440                 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441                 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
442
443                 gaht_temp[reg] |= mask;
444         }
445
446         for (i = 0; i < regs; i++)
447                 out_be32(gaht_base + i, gaht_temp[i]);
448 }
449
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
451 {
452         struct emac_instance *dev = netdev_priv(ndev);
453         u32 r;
454
455         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
456
457         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458             r |= EMAC4_RMR_BASE;
459         else
460             r |= EMAC_RMR_BASE;
461
462         if (ndev->flags & IFF_PROMISC)
463                 r |= EMAC_RMR_PME;
464         else if (ndev->flags & IFF_ALLMULTI ||
465                          (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
466                 r |= EMAC_RMR_PMME;
467         else if (!netdev_mc_empty(ndev))
468                 r |= EMAC_RMR_MAE;
469
470         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471                 r &= ~EMAC4_RMR_MJS_MASK;
472                 r |= EMAC4_RMR_MJS(ndev->mtu);
473         }
474
475         return r;
476 }
477
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
479 {
480         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
481
482         DBG2(dev, "__emac_calc_base_mr1" NL);
483
484         switch(tx_size) {
485         case 2048:
486                 ret |= EMAC_MR1_TFS_2K;
487                 break;
488         default:
489                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490                        dev->ndev->name, tx_size);
491         }
492
493         switch(rx_size) {
494         case 16384:
495                 ret |= EMAC_MR1_RFS_16K;
496                 break;
497         case 4096:
498                 ret |= EMAC_MR1_RFS_4K;
499                 break;
500         default:
501                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502                        dev->ndev->name, rx_size);
503         }
504
505         return ret;
506 }
507
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
509 {
510         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
512
513         DBG2(dev, "__emac4_calc_base_mr1" NL);
514
515         switch(tx_size) {
516         case 16384:
517                 ret |= EMAC4_MR1_TFS_16K;
518                 break;
519         case 4096:
520                 ret |= EMAC4_MR1_TFS_4K;
521                 break;
522         case 2048:
523                 ret |= EMAC4_MR1_TFS_2K;
524                 break;
525         default:
526                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
527                        dev->ndev->name, tx_size);
528         }
529
530         switch(rx_size) {
531         case 16384:
532                 ret |= EMAC4_MR1_RFS_16K;
533                 break;
534         case 4096:
535                 ret |= EMAC4_MR1_RFS_4K;
536                 break;
537         case 2048:
538                 ret |= EMAC4_MR1_RFS_2K;
539                 break;
540         default:
541                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
542                        dev->ndev->name, rx_size);
543         }
544
545         return ret;
546 }
547
548 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
549 {
550         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
551                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
552                 __emac_calc_base_mr1(dev, tx_size, rx_size);
553 }
554
555 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
556 {
557         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
558                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
559         else
560                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
561 }
562
563 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
564                                  unsigned int low, unsigned int high)
565 {
566         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
567                 return (low << 22) | ( (high & 0x3ff) << 6);
568         else
569                 return (low << 23) | ( (high & 0x1ff) << 7);
570 }
571
572 static int emac_configure(struct emac_instance *dev)
573 {
574         struct emac_regs __iomem *p = dev->emacp;
575         struct net_device *ndev = dev->ndev;
576         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
577         u32 r, mr1 = 0;
578
579         DBG(dev, "configure" NL);
580
581         if (!link) {
582                 out_be32(&p->mr1, in_be32(&p->mr1)
583                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
584                 udelay(100);
585         } else if (emac_reset(dev) < 0)
586                 return -ETIMEDOUT;
587
588         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
589                 tah_reset(dev->tah_dev);
590
591         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
592             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
593
594         /* Default fifo sizes */
595         tx_size = dev->tx_fifo_size;
596         rx_size = dev->rx_fifo_size;
597
598         /* No link, force loopback */
599         if (!link)
600                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
601
602         /* Check for full duplex */
603         else if (dev->phy.duplex == DUPLEX_FULL)
604                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
605
606         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
607         dev->stop_timeout = STOP_TIMEOUT_10;
608         switch (dev->phy.speed) {
609         case SPEED_1000:
610                 if (emac_phy_gpcs(dev->phy.mode)) {
611                         mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
612                                 (dev->phy.gpcs_address != 0xffffffff) ?
613                                  dev->phy.gpcs_address : dev->phy.address);
614
615                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
616                          * identify this GPCS PHY later.
617                          */
618                         out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
619                 } else
620                         mr1 |= EMAC_MR1_MF_1000;
621
622                 /* Extended fifo sizes */
623                 tx_size = dev->tx_fifo_size_gige;
624                 rx_size = dev->rx_fifo_size_gige;
625
626                 if (dev->ndev->mtu > ETH_DATA_LEN) {
627                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
628                                 mr1 |= EMAC4_MR1_JPSM;
629                         else
630                                 mr1 |= EMAC_MR1_JPSM;
631                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
632                 } else
633                         dev->stop_timeout = STOP_TIMEOUT_1000;
634                 break;
635         case SPEED_100:
636                 mr1 |= EMAC_MR1_MF_100;
637                 dev->stop_timeout = STOP_TIMEOUT_100;
638                 break;
639         default: /* make gcc happy */
640                 break;
641         }
642
643         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
644                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
645                                 dev->phy.speed);
646         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
647                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
648
649         /* on 40x erratum forces us to NOT use integrated flow control,
650          * let's hope it works on 44x ;)
651          */
652         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
653             dev->phy.duplex == DUPLEX_FULL) {
654                 if (dev->phy.pause)
655                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
656                 else if (dev->phy.asym_pause)
657                         mr1 |= EMAC_MR1_APP;
658         }
659
660         /* Add base settings & fifo sizes & program MR1 */
661         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
662         out_be32(&p->mr1, mr1);
663
664         /* Set individual MAC address */
665         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
666         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
667                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
668                  ndev->dev_addr[5]);
669
670         /* VLAN Tag Protocol ID */
671         out_be32(&p->vtpid, 0x8100);
672
673         /* Receive mode register */
674         r = emac_iff2rmr(ndev);
675         if (r & EMAC_RMR_MAE)
676                 emac_hash_mc(dev);
677         out_be32(&p->rmr, r);
678
679         /* FIFOs thresholds */
680         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
681                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
682                                tx_size / 2 / dev->fifo_entry_size);
683         else
684                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
685                               tx_size / 2 / dev->fifo_entry_size);
686         out_be32(&p->tmr1, r);
687         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
688
689         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
690            there should be still enough space in FIFO to allow the our link
691            partner time to process this frame and also time to send PAUSE
692            frame itself.
693
694            Here is the worst case scenario for the RX FIFO "headroom"
695            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
696
697            1) One maximum-length frame on TX                    1522 bytes
698            2) One PAUSE frame time                                64 bytes
699            3) PAUSE frame decode time allowance                   64 bytes
700            4) One maximum-length frame on RX                    1522 bytes
701            5) Round-trip propagation delay of the link (100Mb)    15 bytes
702            ----------
703            3187 bytes
704
705            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
706            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
707          */
708         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
709                            rx_size / 4 / dev->fifo_entry_size);
710         out_be32(&p->rwmr, r);
711
712         /* Set PAUSE timer to the maximum */
713         out_be32(&p->ptr, 0xffff);
714
715         /* IRQ sources */
716         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
717                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
718                 EMAC_ISR_IRE | EMAC_ISR_TE;
719         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
720             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
721                                                   EMAC4_ISR_RXOE | */;
722         out_be32(&p->iser,  r);
723
724         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
725         if (emac_phy_gpcs(dev->phy.mode)) {
726                 if (dev->phy.gpcs_address != 0xffffffff)
727                         emac_mii_reset_gpcs(&dev->phy);
728                 else
729                         emac_mii_reset_phy(&dev->phy);
730         }
731
732         return 0;
733 }
734
735 static void emac_reinitialize(struct emac_instance *dev)
736 {
737         DBG(dev, "reinitialize" NL);
738
739         emac_netif_stop(dev);
740         if (!emac_configure(dev)) {
741                 emac_tx_enable(dev);
742                 emac_rx_enable(dev);
743         }
744         emac_netif_start(dev);
745 }
746
747 static void emac_full_tx_reset(struct emac_instance *dev)
748 {
749         DBG(dev, "full_tx_reset" NL);
750
751         emac_tx_disable(dev);
752         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
753         emac_clean_tx_ring(dev);
754         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
755
756         emac_configure(dev);
757
758         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
759         emac_tx_enable(dev);
760         emac_rx_enable(dev);
761 }
762
763 static void emac_reset_work(struct work_struct *work)
764 {
765         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
766
767         DBG(dev, "reset_work" NL);
768
769         mutex_lock(&dev->link_lock);
770         if (dev->opened) {
771                 emac_netif_stop(dev);
772                 emac_full_tx_reset(dev);
773                 emac_netif_start(dev);
774         }
775         mutex_unlock(&dev->link_lock);
776 }
777
778 static void emac_tx_timeout(struct net_device *ndev)
779 {
780         struct emac_instance *dev = netdev_priv(ndev);
781
782         DBG(dev, "tx_timeout" NL);
783
784         schedule_work(&dev->reset_work);
785 }
786
787
788 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
789 {
790         int done = !!(stacr & EMAC_STACR_OC);
791
792         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
793                 done = !done;
794
795         return done;
796 };
797
798 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
799 {
800         struct emac_regs __iomem *p = dev->emacp;
801         u32 r = 0;
802         int n, err = -ETIMEDOUT;
803
804         mutex_lock(&dev->mdio_lock);
805
806         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
807
808         /* Enable proper MDIO port */
809         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
810                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
811         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
813
814         /* Wait for management interface to become idle */
815         n = 20;
816         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
817                 udelay(1);
818                 if (!--n) {
819                         DBG2(dev, " -> timeout wait idle\n");
820                         goto bail;
821                 }
822         }
823
824         /* Issue read command */
825         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
826                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
827         else
828                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
829         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
830                 r |= EMAC_STACR_OC;
831         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
832                 r |= EMACX_STACR_STAC_READ;
833         else
834                 r |= EMAC_STACR_STAC_READ;
835         r |= (reg & EMAC_STACR_PRA_MASK)
836                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
837         out_be32(&p->stacr, r);
838
839         /* Wait for read to complete */
840         n = 200;
841         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
842                 udelay(1);
843                 if (!--n) {
844                         DBG2(dev, " -> timeout wait complete\n");
845                         goto bail;
846                 }
847         }
848
849         if (unlikely(r & EMAC_STACR_PHYE)) {
850                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
851                 err = -EREMOTEIO;
852                 goto bail;
853         }
854
855         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
856
857         DBG2(dev, "mdio_read -> %04x" NL, r);
858         err = 0;
859  bail:
860         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864         mutex_unlock(&dev->mdio_lock);
865
866         return err == 0 ? r : err;
867 }
868
869 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
870                               u16 val)
871 {
872         struct emac_regs __iomem *p = dev->emacp;
873         u32 r = 0;
874         int n, err = -ETIMEDOUT;
875
876         mutex_lock(&dev->mdio_lock);
877
878         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
879
880         /* Enable proper MDIO port */
881         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
882                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
883         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
884                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
885
886         /* Wait for management interface to be idle */
887         n = 20;
888         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
889                 udelay(1);
890                 if (!--n) {
891                         DBG2(dev, " -> timeout wait idle\n");
892                         goto bail;
893                 }
894         }
895
896         /* Issue write command */
897         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
898                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
899         else
900                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
901         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
902                 r |= EMAC_STACR_OC;
903         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
904                 r |= EMACX_STACR_STAC_WRITE;
905         else
906                 r |= EMAC_STACR_STAC_WRITE;
907         r |= (reg & EMAC_STACR_PRA_MASK) |
908                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
909                 (val << EMAC_STACR_PHYD_SHIFT);
910         out_be32(&p->stacr, r);
911
912         /* Wait for write to complete */
913         n = 200;
914         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
915                 udelay(1);
916                 if (!--n) {
917                         DBG2(dev, " -> timeout wait complete\n");
918                         goto bail;
919                 }
920         }
921         err = 0;
922  bail:
923         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
924                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
925         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
926                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
927         mutex_unlock(&dev->mdio_lock);
928 }
929
930 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
931 {
932         struct emac_instance *dev = netdev_priv(ndev);
933         int res;
934
935         res = __emac_mdio_read((dev->mdio_instance &&
936                                 dev->phy.gpcs_address != id) ?
937                                 dev->mdio_instance : dev,
938                                (u8) id, (u8) reg);
939         return res;
940 }
941
942 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
943 {
944         struct emac_instance *dev = netdev_priv(ndev);
945
946         __emac_mdio_write((dev->mdio_instance &&
947                            dev->phy.gpcs_address != id) ?
948                            dev->mdio_instance : dev,
949                           (u8) id, (u8) reg, (u16) val);
950 }
951
952 /* Tx lock BH */
953 static void __emac_set_multicast_list(struct emac_instance *dev)
954 {
955         struct emac_regs __iomem *p = dev->emacp;
956         u32 rmr = emac_iff2rmr(dev->ndev);
957
958         DBG(dev, "__multicast %08x" NL, rmr);
959
960         /* I decided to relax register access rules here to avoid
961          * full EMAC reset.
962          *
963          * There is a real problem with EMAC4 core if we use MWSW_001 bit
964          * in MR1 register and do a full EMAC reset.
965          * One TX BD status update is delayed and, after EMAC reset, it
966          * never happens, resulting in TX hung (it'll be recovered by TX
967          * timeout handler eventually, but this is just gross).
968          * So we either have to do full TX reset or try to cheat here :)
969          *
970          * The only required change is to RX mode register, so I *think* all
971          * we need is just to stop RX channel. This seems to work on all
972          * tested SoCs.                                                --ebs
973          *
974          * If we need the full reset, we might just trigger the workqueue
975          * and do it async... a bit nasty but should work --BenH
976          */
977         dev->mcast_pending = 0;
978         emac_rx_disable(dev);
979         if (rmr & EMAC_RMR_MAE)
980                 emac_hash_mc(dev);
981         out_be32(&p->rmr, rmr);
982         emac_rx_enable(dev);
983 }
984
985 /* Tx lock BH */
986 static void emac_set_multicast_list(struct net_device *ndev)
987 {
988         struct emac_instance *dev = netdev_priv(ndev);
989
990         DBG(dev, "multicast" NL);
991
992         BUG_ON(!netif_running(dev->ndev));
993
994         if (dev->no_mcast) {
995                 dev->mcast_pending = 1;
996                 return;
997         }
998
999         mutex_lock(&dev->link_lock);
1000         __emac_set_multicast_list(dev);
1001         mutex_unlock(&dev->link_lock);
1002 }
1003
1004 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1005 {
1006         struct emac_instance *dev = netdev_priv(ndev);
1007         struct sockaddr *addr = sa;
1008         struct emac_regs __iomem *p = dev->emacp;
1009
1010         if (!is_valid_ether_addr(addr->sa_data))
1011                return -EADDRNOTAVAIL;
1012
1013         mutex_lock(&dev->link_lock);
1014
1015         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1016
1017         emac_rx_disable(dev);
1018         emac_tx_disable(dev);
1019         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1020         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1021                 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1022                 ndev->dev_addr[5]);
1023         emac_tx_enable(dev);
1024         emac_rx_enable(dev);
1025
1026         mutex_unlock(&dev->link_lock);
1027
1028         return 0;
1029 }
1030
1031 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1032 {
1033         int rx_sync_size = emac_rx_sync_size(new_mtu);
1034         int rx_skb_size = emac_rx_skb_size(new_mtu);
1035         int i, ret = 0;
1036         int mr1_jumbo_bit_change = 0;
1037
1038         mutex_lock(&dev->link_lock);
1039         emac_netif_stop(dev);
1040         emac_rx_disable(dev);
1041         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1042
1043         if (dev->rx_sg_skb) {
1044                 ++dev->estats.rx_dropped_resize;
1045                 dev_kfree_skb(dev->rx_sg_skb);
1046                 dev->rx_sg_skb = NULL;
1047         }
1048
1049         /* Make a first pass over RX ring and mark BDs ready, dropping
1050          * non-processed packets on the way. We need this as a separate pass
1051          * to simplify error recovery in the case of allocation failure later.
1052          */
1053         for (i = 0; i < NUM_RX_BUFF; ++i) {
1054                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1055                         ++dev->estats.rx_dropped_resize;
1056
1057                 dev->rx_desc[i].data_len = 0;
1058                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1059                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1060         }
1061
1062         /* Reallocate RX ring only if bigger skb buffers are required */
1063         if (rx_skb_size <= dev->rx_skb_size)
1064                 goto skip;
1065
1066         /* Second pass, allocate new skbs */
1067         for (i = 0; i < NUM_RX_BUFF; ++i) {
1068                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1069                 if (!skb) {
1070                         ret = -ENOMEM;
1071                         goto oom;
1072                 }
1073
1074                 BUG_ON(!dev->rx_skb[i]);
1075                 dev_kfree_skb(dev->rx_skb[i]);
1076
1077                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1078                 dev->rx_desc[i].data_ptr =
1079                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1080                                    DMA_FROM_DEVICE) + 2;
1081                 dev->rx_skb[i] = skb;
1082         }
1083  skip:
1084         /* Check if we need to change "Jumbo" bit in MR1 */
1085         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1086                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1087                                 (dev->ndev->mtu > ETH_DATA_LEN);
1088         } else {
1089                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1090                                 (dev->ndev->mtu > ETH_DATA_LEN);
1091         }
1092
1093         if (mr1_jumbo_bit_change) {
1094                 /* This is to prevent starting RX channel in emac_rx_enable() */
1095                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1096
1097                 dev->ndev->mtu = new_mtu;
1098                 emac_full_tx_reset(dev);
1099         }
1100
1101         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1102  oom:
1103         /* Restart RX */
1104         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1105         dev->rx_slot = 0;
1106         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1107         emac_rx_enable(dev);
1108         emac_netif_start(dev);
1109         mutex_unlock(&dev->link_lock);
1110
1111         return ret;
1112 }
1113
1114 /* Process ctx, rtnl_lock semaphore */
1115 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1116 {
1117         struct emac_instance *dev = netdev_priv(ndev);
1118         int ret = 0;
1119
1120         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1121                 return -EINVAL;
1122
1123         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1124
1125         if (netif_running(ndev)) {
1126                 /* Check if we really need to reinitialize RX ring */
1127                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1128                         ret = emac_resize_rx_ring(dev, new_mtu);
1129         }
1130
1131         if (!ret) {
1132                 ndev->mtu = new_mtu;
1133                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1134                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1135         }
1136
1137         return ret;
1138 }
1139
1140 static void emac_clean_tx_ring(struct emac_instance *dev)
1141 {
1142         int i;
1143
1144         for (i = 0; i < NUM_TX_BUFF; ++i) {
1145                 if (dev->tx_skb[i]) {
1146                         dev_kfree_skb(dev->tx_skb[i]);
1147                         dev->tx_skb[i] = NULL;
1148                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1149                                 ++dev->estats.tx_dropped;
1150                 }
1151                 dev->tx_desc[i].ctrl = 0;
1152                 dev->tx_desc[i].data_ptr = 0;
1153         }
1154 }
1155
1156 static void emac_clean_rx_ring(struct emac_instance *dev)
1157 {
1158         int i;
1159
1160         for (i = 0; i < NUM_RX_BUFF; ++i)
1161                 if (dev->rx_skb[i]) {
1162                         dev->rx_desc[i].ctrl = 0;
1163                         dev_kfree_skb(dev->rx_skb[i]);
1164                         dev->rx_skb[i] = NULL;
1165                         dev->rx_desc[i].data_ptr = 0;
1166                 }
1167
1168         if (dev->rx_sg_skb) {
1169                 dev_kfree_skb(dev->rx_sg_skb);
1170                 dev->rx_sg_skb = NULL;
1171         }
1172 }
1173
1174 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1175                                     gfp_t flags)
1176 {
1177         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1178         if (unlikely(!skb))
1179                 return -ENOMEM;
1180
1181         dev->rx_skb[slot] = skb;
1182         dev->rx_desc[slot].data_len = 0;
1183
1184         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1185         dev->rx_desc[slot].data_ptr =
1186             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1187                            DMA_FROM_DEVICE) + 2;
1188         wmb();
1189         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1190             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1191
1192         return 0;
1193 }
1194
1195 static void emac_print_link_status(struct emac_instance *dev)
1196 {
1197         if (netif_carrier_ok(dev->ndev))
1198                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1199                        dev->ndev->name, dev->phy.speed,
1200                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1201                        dev->phy.pause ? ", pause enabled" :
1202                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1203         else
1204                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1205 }
1206
1207 /* Process ctx, rtnl_lock semaphore */
1208 static int emac_open(struct net_device *ndev)
1209 {
1210         struct emac_instance *dev = netdev_priv(ndev);
1211         int err, i;
1212
1213         DBG(dev, "open" NL);
1214
1215         /* Setup error IRQ handler */
1216         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1217         if (err) {
1218                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1219                        ndev->name, dev->emac_irq);
1220                 return err;
1221         }
1222
1223         /* Allocate RX ring */
1224         for (i = 0; i < NUM_RX_BUFF; ++i)
1225                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1226                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1227                                ndev->name);
1228                         goto oom;
1229                 }
1230
1231         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1232         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1233         dev->rx_sg_skb = NULL;
1234
1235         mutex_lock(&dev->link_lock);
1236         dev->opened = 1;
1237
1238         /* Start PHY polling now.
1239          */
1240         if (dev->phy.address >= 0) {
1241                 int link_poll_interval;
1242                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1243                         dev->phy.def->ops->read_link(&dev->phy);
1244                         emac_rx_clk_default(dev);
1245                         netif_carrier_on(dev->ndev);
1246                         link_poll_interval = PHY_POLL_LINK_ON;
1247                 } else {
1248                         emac_rx_clk_tx(dev);
1249                         netif_carrier_off(dev->ndev);
1250                         link_poll_interval = PHY_POLL_LINK_OFF;
1251                 }
1252                 dev->link_polling = 1;
1253                 wmb();
1254                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1255                 emac_print_link_status(dev);
1256         } else
1257                 netif_carrier_on(dev->ndev);
1258
1259         /* Required for Pause packet support in EMAC */
1260         dev_mc_add_global(ndev, default_mcast_addr);
1261
1262         emac_configure(dev);
1263         mal_poll_add(dev->mal, &dev->commac);
1264         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1265         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1266         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1267         emac_tx_enable(dev);
1268         emac_rx_enable(dev);
1269         emac_netif_start(dev);
1270
1271         mutex_unlock(&dev->link_lock);
1272
1273         return 0;
1274  oom:
1275         emac_clean_rx_ring(dev);
1276         free_irq(dev->emac_irq, dev);
1277
1278         return -ENOMEM;
1279 }
1280
1281 /* BHs disabled */
1282 #if 0
1283 static int emac_link_differs(struct emac_instance *dev)
1284 {
1285         u32 r = in_be32(&dev->emacp->mr1);
1286
1287         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1288         int speed, pause, asym_pause;
1289
1290         if (r & EMAC_MR1_MF_1000)
1291                 speed = SPEED_1000;
1292         else if (r & EMAC_MR1_MF_100)
1293                 speed = SPEED_100;
1294         else
1295                 speed = SPEED_10;
1296
1297         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1298         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1299                 pause = 1;
1300                 asym_pause = 0;
1301                 break;
1302         case EMAC_MR1_APP:
1303                 pause = 0;
1304                 asym_pause = 1;
1305                 break;
1306         default:
1307                 pause = asym_pause = 0;
1308         }
1309         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1310             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1311 }
1312 #endif
1313
1314 static void emac_link_timer(struct work_struct *work)
1315 {
1316         struct emac_instance *dev =
1317                 container_of(to_delayed_work(work),
1318                              struct emac_instance, link_work);
1319         int link_poll_interval;
1320
1321         mutex_lock(&dev->link_lock);
1322         DBG2(dev, "link timer" NL);
1323
1324         if (!dev->opened)
1325                 goto bail;
1326
1327         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1328                 if (!netif_carrier_ok(dev->ndev)) {
1329                         emac_rx_clk_default(dev);
1330                         /* Get new link parameters */
1331                         dev->phy.def->ops->read_link(&dev->phy);
1332
1333                         netif_carrier_on(dev->ndev);
1334                         emac_netif_stop(dev);
1335                         emac_full_tx_reset(dev);
1336                         emac_netif_start(dev);
1337                         emac_print_link_status(dev);
1338                 }
1339                 link_poll_interval = PHY_POLL_LINK_ON;
1340         } else {
1341                 if (netif_carrier_ok(dev->ndev)) {
1342                         emac_rx_clk_tx(dev);
1343                         netif_carrier_off(dev->ndev);
1344                         netif_tx_disable(dev->ndev);
1345                         emac_reinitialize(dev);
1346                         emac_print_link_status(dev);
1347                 }
1348                 link_poll_interval = PHY_POLL_LINK_OFF;
1349         }
1350         schedule_delayed_work(&dev->link_work, link_poll_interval);
1351  bail:
1352         mutex_unlock(&dev->link_lock);
1353 }
1354
1355 static void emac_force_link_update(struct emac_instance *dev)
1356 {
1357         netif_carrier_off(dev->ndev);
1358         smp_rmb();
1359         if (dev->link_polling) {
1360                 cancel_delayed_work_sync(&dev->link_work);
1361                 if (dev->link_polling)
1362                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1363         }
1364 }
1365
1366 /* Process ctx, rtnl_lock semaphore */
1367 static int emac_close(struct net_device *ndev)
1368 {
1369         struct emac_instance *dev = netdev_priv(ndev);
1370
1371         DBG(dev, "close" NL);
1372
1373         if (dev->phy.address >= 0) {
1374                 dev->link_polling = 0;
1375                 cancel_delayed_work_sync(&dev->link_work);
1376         }
1377         mutex_lock(&dev->link_lock);
1378         emac_netif_stop(dev);
1379         dev->opened = 0;
1380         mutex_unlock(&dev->link_lock);
1381
1382         emac_rx_disable(dev);
1383         emac_tx_disable(dev);
1384         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1385         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1386         mal_poll_del(dev->mal, &dev->commac);
1387
1388         emac_clean_tx_ring(dev);
1389         emac_clean_rx_ring(dev);
1390
1391         free_irq(dev->emac_irq, dev);
1392
1393         netif_carrier_off(ndev);
1394
1395         return 0;
1396 }
1397
1398 static inline u16 emac_tx_csum(struct emac_instance *dev,
1399                                struct sk_buff *skb)
1400 {
1401         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1402                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1403                 ++dev->stats.tx_packets_csum;
1404                 return EMAC_TX_CTRL_TAH_CSUM;
1405         }
1406         return 0;
1407 }
1408
1409 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1410 {
1411         struct emac_regs __iomem *p = dev->emacp;
1412         struct net_device *ndev = dev->ndev;
1413
1414         /* Send the packet out. If the if makes a significant perf
1415          * difference, then we can store the TMR0 value in "dev"
1416          * instead
1417          */
1418         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1419                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1420         else
1421                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1422
1423         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1424                 netif_stop_queue(ndev);
1425                 DBG2(dev, "stopped TX queue" NL);
1426         }
1427
1428         netif_trans_update(ndev);
1429         ++dev->stats.tx_packets;
1430         dev->stats.tx_bytes += len;
1431
1432         return NETDEV_TX_OK;
1433 }
1434
1435 /* Tx lock BH */
1436 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1437 {
1438         struct emac_instance *dev = netdev_priv(ndev);
1439         unsigned int len = skb->len;
1440         int slot;
1441
1442         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1443             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1444
1445         slot = dev->tx_slot++;
1446         if (dev->tx_slot == NUM_TX_BUFF) {
1447                 dev->tx_slot = 0;
1448                 ctrl |= MAL_TX_CTRL_WRAP;
1449         }
1450
1451         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1452
1453         dev->tx_skb[slot] = skb;
1454         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1455                                                      skb->data, len,
1456                                                      DMA_TO_DEVICE);
1457         dev->tx_desc[slot].data_len = (u16) len;
1458         wmb();
1459         dev->tx_desc[slot].ctrl = ctrl;
1460
1461         return emac_xmit_finish(dev, len);
1462 }
1463
1464 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1465                                   u32 pd, int len, int last, u16 base_ctrl)
1466 {
1467         while (1) {
1468                 u16 ctrl = base_ctrl;
1469                 int chunk = min(len, MAL_MAX_TX_SIZE);
1470                 len -= chunk;
1471
1472                 slot = (slot + 1) % NUM_TX_BUFF;
1473
1474                 if (last && !len)
1475                         ctrl |= MAL_TX_CTRL_LAST;
1476                 if (slot == NUM_TX_BUFF - 1)
1477                         ctrl |= MAL_TX_CTRL_WRAP;
1478
1479                 dev->tx_skb[slot] = NULL;
1480                 dev->tx_desc[slot].data_ptr = pd;
1481                 dev->tx_desc[slot].data_len = (u16) chunk;
1482                 dev->tx_desc[slot].ctrl = ctrl;
1483                 ++dev->tx_cnt;
1484
1485                 if (!len)
1486                         break;
1487
1488                 pd += chunk;
1489         }
1490         return slot;
1491 }
1492
1493 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1494 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1495 {
1496         struct emac_instance *dev = netdev_priv(ndev);
1497         int nr_frags = skb_shinfo(skb)->nr_frags;
1498         int len = skb->len, chunk;
1499         int slot, i;
1500         u16 ctrl;
1501         u32 pd;
1502
1503         /* This is common "fast" path */
1504         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1505                 return emac_start_xmit(skb, ndev);
1506
1507         len -= skb->data_len;
1508
1509         /* Note, this is only an *estimation*, we can still run out of empty
1510          * slots because of the additional fragmentation into
1511          * MAL_MAX_TX_SIZE-sized chunks
1512          */
1513         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1514                 goto stop_queue;
1515
1516         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1517             emac_tx_csum(dev, skb);
1518         slot = dev->tx_slot;
1519
1520         /* skb data */
1521         dev->tx_skb[slot] = NULL;
1522         chunk = min(len, MAL_MAX_TX_SIZE);
1523         dev->tx_desc[slot].data_ptr = pd =
1524             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1525         dev->tx_desc[slot].data_len = (u16) chunk;
1526         len -= chunk;
1527         if (unlikely(len))
1528                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1529                                        ctrl);
1530         /* skb fragments */
1531         for (i = 0; i < nr_frags; ++i) {
1532                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1533                 len = skb_frag_size(frag);
1534
1535                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1536                         goto undo_frame;
1537
1538                 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1539                                       DMA_TO_DEVICE);
1540
1541                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1542                                        ctrl);
1543         }
1544
1545         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1546
1547         /* Attach skb to the last slot so we don't release it too early */
1548         dev->tx_skb[slot] = skb;
1549
1550         /* Send the packet out */
1551         if (dev->tx_slot == NUM_TX_BUFF - 1)
1552                 ctrl |= MAL_TX_CTRL_WRAP;
1553         wmb();
1554         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1555         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1556
1557         return emac_xmit_finish(dev, skb->len);
1558
1559  undo_frame:
1560         /* Well, too bad. Our previous estimation was overly optimistic.
1561          * Undo everything.
1562          */
1563         while (slot != dev->tx_slot) {
1564                 dev->tx_desc[slot].ctrl = 0;
1565                 --dev->tx_cnt;
1566                 if (--slot < 0)
1567                         slot = NUM_TX_BUFF - 1;
1568         }
1569         ++dev->estats.tx_undo;
1570
1571  stop_queue:
1572         netif_stop_queue(ndev);
1573         DBG2(dev, "stopped TX queue" NL);
1574         return NETDEV_TX_BUSY;
1575 }
1576
1577 /* Tx lock BHs */
1578 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1579 {
1580         struct emac_error_stats *st = &dev->estats;
1581
1582         DBG(dev, "BD TX error %04x" NL, ctrl);
1583
1584         ++st->tx_bd_errors;
1585         if (ctrl & EMAC_TX_ST_BFCS)
1586                 ++st->tx_bd_bad_fcs;
1587         if (ctrl & EMAC_TX_ST_LCS)
1588                 ++st->tx_bd_carrier_loss;
1589         if (ctrl & EMAC_TX_ST_ED)
1590                 ++st->tx_bd_excessive_deferral;
1591         if (ctrl & EMAC_TX_ST_EC)
1592                 ++st->tx_bd_excessive_collisions;
1593         if (ctrl & EMAC_TX_ST_LC)
1594                 ++st->tx_bd_late_collision;
1595         if (ctrl & EMAC_TX_ST_MC)
1596                 ++st->tx_bd_multple_collisions;
1597         if (ctrl & EMAC_TX_ST_SC)
1598                 ++st->tx_bd_single_collision;
1599         if (ctrl & EMAC_TX_ST_UR)
1600                 ++st->tx_bd_underrun;
1601         if (ctrl & EMAC_TX_ST_SQE)
1602                 ++st->tx_bd_sqe;
1603 }
1604
1605 static void emac_poll_tx(void *param)
1606 {
1607         struct emac_instance *dev = param;
1608         u32 bad_mask;
1609
1610         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1611
1612         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1613                 bad_mask = EMAC_IS_BAD_TX_TAH;
1614         else
1615                 bad_mask = EMAC_IS_BAD_TX;
1616
1617         netif_tx_lock_bh(dev->ndev);
1618         if (dev->tx_cnt) {
1619                 u16 ctrl;
1620                 int slot = dev->ack_slot, n = 0;
1621         again:
1622                 ctrl = dev->tx_desc[slot].ctrl;
1623                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1624                         struct sk_buff *skb = dev->tx_skb[slot];
1625                         ++n;
1626
1627                         if (skb) {
1628                                 dev_kfree_skb(skb);
1629                                 dev->tx_skb[slot] = NULL;
1630                         }
1631                         slot = (slot + 1) % NUM_TX_BUFF;
1632
1633                         if (unlikely(ctrl & bad_mask))
1634                                 emac_parse_tx_error(dev, ctrl);
1635
1636                         if (--dev->tx_cnt)
1637                                 goto again;
1638                 }
1639                 if (n) {
1640                         dev->ack_slot = slot;
1641                         if (netif_queue_stopped(dev->ndev) &&
1642                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1643                                 netif_wake_queue(dev->ndev);
1644
1645                         DBG2(dev, "tx %d pkts" NL, n);
1646                 }
1647         }
1648         netif_tx_unlock_bh(dev->ndev);
1649 }
1650
1651 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1652                                        int len)
1653 {
1654         struct sk_buff *skb = dev->rx_skb[slot];
1655
1656         DBG2(dev, "recycle %d %d" NL, slot, len);
1657
1658         if (len)
1659                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1660                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1661
1662         dev->rx_desc[slot].data_len = 0;
1663         wmb();
1664         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1665             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1666 }
1667
1668 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1669 {
1670         struct emac_error_stats *st = &dev->estats;
1671
1672         DBG(dev, "BD RX error %04x" NL, ctrl);
1673
1674         ++st->rx_bd_errors;
1675         if (ctrl & EMAC_RX_ST_OE)
1676                 ++st->rx_bd_overrun;
1677         if (ctrl & EMAC_RX_ST_BP)
1678                 ++st->rx_bd_bad_packet;
1679         if (ctrl & EMAC_RX_ST_RP)
1680                 ++st->rx_bd_runt_packet;
1681         if (ctrl & EMAC_RX_ST_SE)
1682                 ++st->rx_bd_short_event;
1683         if (ctrl & EMAC_RX_ST_AE)
1684                 ++st->rx_bd_alignment_error;
1685         if (ctrl & EMAC_RX_ST_BFCS)
1686                 ++st->rx_bd_bad_fcs;
1687         if (ctrl & EMAC_RX_ST_PTL)
1688                 ++st->rx_bd_packet_too_long;
1689         if (ctrl & EMAC_RX_ST_ORE)
1690                 ++st->rx_bd_out_of_range;
1691         if (ctrl & EMAC_RX_ST_IRE)
1692                 ++st->rx_bd_in_range;
1693 }
1694
1695 static inline void emac_rx_csum(struct emac_instance *dev,
1696                                 struct sk_buff *skb, u16 ctrl)
1697 {
1698 #ifdef CONFIG_IBM_EMAC_TAH
1699         if (!ctrl && dev->tah_dev) {
1700                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701                 ++dev->stats.rx_packets_csum;
1702         }
1703 #endif
1704 }
1705
1706 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1707 {
1708         if (likely(dev->rx_sg_skb != NULL)) {
1709                 int len = dev->rx_desc[slot].data_len;
1710                 int tot_len = dev->rx_sg_skb->len + len;
1711
1712                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1713                         ++dev->estats.rx_dropped_mtu;
1714                         dev_kfree_skb(dev->rx_sg_skb);
1715                         dev->rx_sg_skb = NULL;
1716                 } else {
1717                         memcpy(skb_tail_pointer(dev->rx_sg_skb),
1718                                          dev->rx_skb[slot]->data, len);
1719                         skb_put(dev->rx_sg_skb, len);
1720                         emac_recycle_rx_skb(dev, slot, len);
1721                         return 0;
1722                 }
1723         }
1724         emac_recycle_rx_skb(dev, slot, 0);
1725         return -1;
1726 }
1727
1728 /* NAPI poll context */
1729 static int emac_poll_rx(void *param, int budget)
1730 {
1731         struct emac_instance *dev = param;
1732         int slot = dev->rx_slot, received = 0;
1733
1734         DBG2(dev, "poll_rx(%d)" NL, budget);
1735
1736  again:
1737         while (budget > 0) {
1738                 int len;
1739                 struct sk_buff *skb;
1740                 u16 ctrl = dev->rx_desc[slot].ctrl;
1741
1742                 if (ctrl & MAL_RX_CTRL_EMPTY)
1743                         break;
1744
1745                 skb = dev->rx_skb[slot];
1746                 mb();
1747                 len = dev->rx_desc[slot].data_len;
1748
1749                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1750                         goto sg;
1751
1752                 ctrl &= EMAC_BAD_RX_MASK;
1753                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1754                         emac_parse_rx_error(dev, ctrl);
1755                         ++dev->estats.rx_dropped_error;
1756                         emac_recycle_rx_skb(dev, slot, 0);
1757                         len = 0;
1758                         goto next;
1759                 }
1760
1761                 if (len < ETH_HLEN) {
1762                         ++dev->estats.rx_dropped_stack;
1763                         emac_recycle_rx_skb(dev, slot, len);
1764                         goto next;
1765                 }
1766
1767                 if (len && len < EMAC_RX_COPY_THRESH) {
1768                         struct sk_buff *copy_skb =
1769                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1770                         if (unlikely(!copy_skb))
1771                                 goto oom;
1772
1773                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1774                         memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1775                         emac_recycle_rx_skb(dev, slot, len);
1776                         skb = copy_skb;
1777                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1778                         goto oom;
1779
1780                 skb_put(skb, len);
1781         push_packet:
1782                 skb->protocol = eth_type_trans(skb, dev->ndev);
1783                 emac_rx_csum(dev, skb, ctrl);
1784
1785                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1786                         ++dev->estats.rx_dropped_stack;
1787         next:
1788                 ++dev->stats.rx_packets;
1789         skip:
1790                 dev->stats.rx_bytes += len;
1791                 slot = (slot + 1) % NUM_RX_BUFF;
1792                 --budget;
1793                 ++received;
1794                 continue;
1795         sg:
1796                 if (ctrl & MAL_RX_CTRL_FIRST) {
1797                         BUG_ON(dev->rx_sg_skb);
1798                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1799                                 DBG(dev, "rx OOM %d" NL, slot);
1800                                 ++dev->estats.rx_dropped_oom;
1801                                 emac_recycle_rx_skb(dev, slot, 0);
1802                         } else {
1803                                 dev->rx_sg_skb = skb;
1804                                 skb_put(skb, len);
1805                         }
1806                 } else if (!emac_rx_sg_append(dev, slot) &&
1807                            (ctrl & MAL_RX_CTRL_LAST)) {
1808
1809                         skb = dev->rx_sg_skb;
1810                         dev->rx_sg_skb = NULL;
1811
1812                         ctrl &= EMAC_BAD_RX_MASK;
1813                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1814                                 emac_parse_rx_error(dev, ctrl);
1815                                 ++dev->estats.rx_dropped_error;
1816                                 dev_kfree_skb(skb);
1817                                 len = 0;
1818                         } else
1819                                 goto push_packet;
1820                 }
1821                 goto skip;
1822         oom:
1823                 DBG(dev, "rx OOM %d" NL, slot);
1824                 /* Drop the packet and recycle skb */
1825                 ++dev->estats.rx_dropped_oom;
1826                 emac_recycle_rx_skb(dev, slot, 0);
1827                 goto next;
1828         }
1829
1830         if (received) {
1831                 DBG2(dev, "rx %d BDs" NL, received);
1832                 dev->rx_slot = slot;
1833         }
1834
1835         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1836                 mb();
1837                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1838                         DBG2(dev, "rx restart" NL);
1839                         received = 0;
1840                         goto again;
1841                 }
1842
1843                 if (dev->rx_sg_skb) {
1844                         DBG2(dev, "dropping partial rx packet" NL);
1845                         ++dev->estats.rx_dropped_error;
1846                         dev_kfree_skb(dev->rx_sg_skb);
1847                         dev->rx_sg_skb = NULL;
1848                 }
1849
1850                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1851                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1852                 emac_rx_enable(dev);
1853                 dev->rx_slot = 0;
1854         }
1855         return received;
1856 }
1857
1858 /* NAPI poll context */
1859 static int emac_peek_rx(void *param)
1860 {
1861         struct emac_instance *dev = param;
1862
1863         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1864 }
1865
1866 /* NAPI poll context */
1867 static int emac_peek_rx_sg(void *param)
1868 {
1869         struct emac_instance *dev = param;
1870
1871         int slot = dev->rx_slot;
1872         while (1) {
1873                 u16 ctrl = dev->rx_desc[slot].ctrl;
1874                 if (ctrl & MAL_RX_CTRL_EMPTY)
1875                         return 0;
1876                 else if (ctrl & MAL_RX_CTRL_LAST)
1877                         return 1;
1878
1879                 slot = (slot + 1) % NUM_RX_BUFF;
1880
1881                 /* I'm just being paranoid here :) */
1882                 if (unlikely(slot == dev->rx_slot))
1883                         return 0;
1884         }
1885 }
1886
1887 /* Hard IRQ */
1888 static void emac_rxde(void *param)
1889 {
1890         struct emac_instance *dev = param;
1891
1892         ++dev->estats.rx_stopped;
1893         emac_rx_disable_async(dev);
1894 }
1895
1896 /* Hard IRQ */
1897 static irqreturn_t emac_irq(int irq, void *dev_instance)
1898 {
1899         struct emac_instance *dev = dev_instance;
1900         struct emac_regs __iomem *p = dev->emacp;
1901         struct emac_error_stats *st = &dev->estats;
1902         u32 isr;
1903
1904         spin_lock(&dev->lock);
1905
1906         isr = in_be32(&p->isr);
1907         out_be32(&p->isr, isr);
1908
1909         DBG(dev, "isr = %08x" NL, isr);
1910
1911         if (isr & EMAC4_ISR_TXPE)
1912                 ++st->tx_parity;
1913         if (isr & EMAC4_ISR_RXPE)
1914                 ++st->rx_parity;
1915         if (isr & EMAC4_ISR_TXUE)
1916                 ++st->tx_underrun;
1917         if (isr & EMAC4_ISR_RXOE)
1918                 ++st->rx_fifo_overrun;
1919         if (isr & EMAC_ISR_OVR)
1920                 ++st->rx_overrun;
1921         if (isr & EMAC_ISR_BP)
1922                 ++st->rx_bad_packet;
1923         if (isr & EMAC_ISR_RP)
1924                 ++st->rx_runt_packet;
1925         if (isr & EMAC_ISR_SE)
1926                 ++st->rx_short_event;
1927         if (isr & EMAC_ISR_ALE)
1928                 ++st->rx_alignment_error;
1929         if (isr & EMAC_ISR_BFCS)
1930                 ++st->rx_bad_fcs;
1931         if (isr & EMAC_ISR_PTLE)
1932                 ++st->rx_packet_too_long;
1933         if (isr & EMAC_ISR_ORE)
1934                 ++st->rx_out_of_range;
1935         if (isr & EMAC_ISR_IRE)
1936                 ++st->rx_in_range;
1937         if (isr & EMAC_ISR_SQE)
1938                 ++st->tx_sqe;
1939         if (isr & EMAC_ISR_TE)
1940                 ++st->tx_errors;
1941
1942         spin_unlock(&dev->lock);
1943
1944         return IRQ_HANDLED;
1945 }
1946
1947 static struct net_device_stats *emac_stats(struct net_device *ndev)
1948 {
1949         struct emac_instance *dev = netdev_priv(ndev);
1950         struct emac_stats *st = &dev->stats;
1951         struct emac_error_stats *est = &dev->estats;
1952         struct net_device_stats *nst = &dev->nstats;
1953         unsigned long flags;
1954
1955         DBG2(dev, "stats" NL);
1956
1957         /* Compute "legacy" statistics */
1958         spin_lock_irqsave(&dev->lock, flags);
1959         nst->rx_packets = (unsigned long)st->rx_packets;
1960         nst->rx_bytes = (unsigned long)st->rx_bytes;
1961         nst->tx_packets = (unsigned long)st->tx_packets;
1962         nst->tx_bytes = (unsigned long)st->tx_bytes;
1963         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1964                                           est->rx_dropped_error +
1965                                           est->rx_dropped_resize +
1966                                           est->rx_dropped_mtu);
1967         nst->tx_dropped = (unsigned long)est->tx_dropped;
1968
1969         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1970         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1971                                               est->rx_fifo_overrun +
1972                                               est->rx_overrun);
1973         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1974                                                est->rx_alignment_error);
1975         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1976                                              est->rx_bad_fcs);
1977         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1978                                                 est->rx_bd_short_event +
1979                                                 est->rx_bd_packet_too_long +
1980                                                 est->rx_bd_out_of_range +
1981                                                 est->rx_bd_in_range +
1982                                                 est->rx_runt_packet +
1983                                                 est->rx_short_event +
1984                                                 est->rx_packet_too_long +
1985                                                 est->rx_out_of_range +
1986                                                 est->rx_in_range);
1987
1988         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1989         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1990                                               est->tx_underrun);
1991         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1992         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1993                                           est->tx_bd_excessive_collisions +
1994                                           est->tx_bd_late_collision +
1995                                           est->tx_bd_multple_collisions);
1996         spin_unlock_irqrestore(&dev->lock, flags);
1997         return nst;
1998 }
1999
2000 static struct mal_commac_ops emac_commac_ops = {
2001         .poll_tx = &emac_poll_tx,
2002         .poll_rx = &emac_poll_rx,
2003         .peek_rx = &emac_peek_rx,
2004         .rxde = &emac_rxde,
2005 };
2006
2007 static struct mal_commac_ops emac_commac_sg_ops = {
2008         .poll_tx = &emac_poll_tx,
2009         .poll_rx = &emac_poll_rx,
2010         .peek_rx = &emac_peek_rx_sg,
2011         .rxde = &emac_rxde,
2012 };
2013
2014 /* Ethtool support */
2015 static int emac_ethtool_get_settings(struct net_device *ndev,
2016                                      struct ethtool_cmd *cmd)
2017 {
2018         struct emac_instance *dev = netdev_priv(ndev);
2019
2020         cmd->supported = dev->phy.features;
2021         cmd->port = PORT_MII;
2022         cmd->phy_address = dev->phy.address;
2023         cmd->transceiver =
2024             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
2025
2026         mutex_lock(&dev->link_lock);
2027         cmd->advertising = dev->phy.advertising;
2028         cmd->autoneg = dev->phy.autoneg;
2029         cmd->speed = dev->phy.speed;
2030         cmd->duplex = dev->phy.duplex;
2031         mutex_unlock(&dev->link_lock);
2032
2033         return 0;
2034 }
2035
2036 static int emac_ethtool_set_settings(struct net_device *ndev,
2037                                      struct ethtool_cmd *cmd)
2038 {
2039         struct emac_instance *dev = netdev_priv(ndev);
2040         u32 f = dev->phy.features;
2041
2042         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2043             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
2044
2045         /* Basic sanity checks */
2046         if (dev->phy.address < 0)
2047                 return -EOPNOTSUPP;
2048         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2049                 return -EINVAL;
2050         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2051                 return -EINVAL;
2052         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2053                 return -EINVAL;
2054
2055         if (cmd->autoneg == AUTONEG_DISABLE) {
2056                 switch (cmd->speed) {
2057                 case SPEED_10:
2058                         if (cmd->duplex == DUPLEX_HALF &&
2059                             !(f & SUPPORTED_10baseT_Half))
2060                                 return -EINVAL;
2061                         if (cmd->duplex == DUPLEX_FULL &&
2062                             !(f & SUPPORTED_10baseT_Full))
2063                                 return -EINVAL;
2064                         break;
2065                 case SPEED_100:
2066                         if (cmd->duplex == DUPLEX_HALF &&
2067                             !(f & SUPPORTED_100baseT_Half))
2068                                 return -EINVAL;
2069                         if (cmd->duplex == DUPLEX_FULL &&
2070                             !(f & SUPPORTED_100baseT_Full))
2071                                 return -EINVAL;
2072                         break;
2073                 case SPEED_1000:
2074                         if (cmd->duplex == DUPLEX_HALF &&
2075                             !(f & SUPPORTED_1000baseT_Half))
2076                                 return -EINVAL;
2077                         if (cmd->duplex == DUPLEX_FULL &&
2078                             !(f & SUPPORTED_1000baseT_Full))
2079                                 return -EINVAL;
2080                         break;
2081                 default:
2082                         return -EINVAL;
2083                 }
2084
2085                 mutex_lock(&dev->link_lock);
2086                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2087                                                 cmd->duplex);
2088                 mutex_unlock(&dev->link_lock);
2089
2090         } else {
2091                 if (!(f & SUPPORTED_Autoneg))
2092                         return -EINVAL;
2093
2094                 mutex_lock(&dev->link_lock);
2095                 dev->phy.def->ops->setup_aneg(&dev->phy,
2096                                               (cmd->advertising & f) |
2097                                               (dev->phy.advertising &
2098                                                (ADVERTISED_Pause |
2099                                                 ADVERTISED_Asym_Pause)));
2100                 mutex_unlock(&dev->link_lock);
2101         }
2102         emac_force_link_update(dev);
2103
2104         return 0;
2105 }
2106
2107 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2108                                        struct ethtool_ringparam *rp)
2109 {
2110         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2111         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2112 }
2113
2114 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2115                                         struct ethtool_pauseparam *pp)
2116 {
2117         struct emac_instance *dev = netdev_priv(ndev);
2118
2119         mutex_lock(&dev->link_lock);
2120         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2121             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2122                 pp->autoneg = 1;
2123
2124         if (dev->phy.duplex == DUPLEX_FULL) {
2125                 if (dev->phy.pause)
2126                         pp->rx_pause = pp->tx_pause = 1;
2127                 else if (dev->phy.asym_pause)
2128                         pp->tx_pause = 1;
2129         }
2130         mutex_unlock(&dev->link_lock);
2131 }
2132
2133 static int emac_get_regs_len(struct emac_instance *dev)
2134 {
2135                 return sizeof(struct emac_ethtool_regs_subhdr) +
2136                         sizeof(struct emac_regs);
2137 }
2138
2139 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2140 {
2141         struct emac_instance *dev = netdev_priv(ndev);
2142         int size;
2143
2144         size = sizeof(struct emac_ethtool_regs_hdr) +
2145                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2146         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2147                 size += zmii_get_regs_len(dev->zmii_dev);
2148         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2149                 size += rgmii_get_regs_len(dev->rgmii_dev);
2150         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2151                 size += tah_get_regs_len(dev->tah_dev);
2152
2153         return size;
2154 }
2155
2156 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2157 {
2158         struct emac_ethtool_regs_subhdr *hdr = buf;
2159
2160         hdr->index = dev->cell_index;
2161         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2162                 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2163         } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2164                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2165         } else {
2166                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2167         }
2168         memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2169         return (void *)(hdr + 1) + sizeof(struct emac_regs);
2170 }
2171
2172 static void emac_ethtool_get_regs(struct net_device *ndev,
2173                                   struct ethtool_regs *regs, void *buf)
2174 {
2175         struct emac_instance *dev = netdev_priv(ndev);
2176         struct emac_ethtool_regs_hdr *hdr = buf;
2177
2178         hdr->components = 0;
2179         buf = hdr + 1;
2180
2181         buf = mal_dump_regs(dev->mal, buf);
2182         buf = emac_dump_regs(dev, buf);
2183         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2184                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2185                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2186         }
2187         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2188                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2189                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2190         }
2191         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2192                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2193                 buf = tah_dump_regs(dev->tah_dev, buf);
2194         }
2195 }
2196
2197 static int emac_ethtool_nway_reset(struct net_device *ndev)
2198 {
2199         struct emac_instance *dev = netdev_priv(ndev);
2200         int res = 0;
2201
2202         DBG(dev, "nway_reset" NL);
2203
2204         if (dev->phy.address < 0)
2205                 return -EOPNOTSUPP;
2206
2207         mutex_lock(&dev->link_lock);
2208         if (!dev->phy.autoneg) {
2209                 res = -EINVAL;
2210                 goto out;
2211         }
2212
2213         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2214  out:
2215         mutex_unlock(&dev->link_lock);
2216         emac_force_link_update(dev);
2217         return res;
2218 }
2219
2220 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2221 {
2222         if (stringset == ETH_SS_STATS)
2223                 return EMAC_ETHTOOL_STATS_COUNT;
2224         else
2225                 return -EINVAL;
2226 }
2227
2228 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2229                                      u8 * buf)
2230 {
2231         if (stringset == ETH_SS_STATS)
2232                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2233 }
2234
2235 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2236                                            struct ethtool_stats *estats,
2237                                            u64 * tmp_stats)
2238 {
2239         struct emac_instance *dev = netdev_priv(ndev);
2240
2241         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2242         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2243         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2244 }
2245
2246 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2247                                      struct ethtool_drvinfo *info)
2248 {
2249         struct emac_instance *dev = netdev_priv(ndev);
2250
2251         strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2252         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2253         snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2254                  dev->cell_index, dev->ofdev->dev.of_node->full_name);
2255 }
2256
2257 static const struct ethtool_ops emac_ethtool_ops = {
2258         .get_settings = emac_ethtool_get_settings,
2259         .set_settings = emac_ethtool_set_settings,
2260         .get_drvinfo = emac_ethtool_get_drvinfo,
2261
2262         .get_regs_len = emac_ethtool_get_regs_len,
2263         .get_regs = emac_ethtool_get_regs,
2264
2265         .nway_reset = emac_ethtool_nway_reset,
2266
2267         .get_ringparam = emac_ethtool_get_ringparam,
2268         .get_pauseparam = emac_ethtool_get_pauseparam,
2269
2270         .get_strings = emac_ethtool_get_strings,
2271         .get_sset_count = emac_ethtool_get_sset_count,
2272         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2273
2274         .get_link = ethtool_op_get_link,
2275 };
2276
2277 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2278 {
2279         struct emac_instance *dev = netdev_priv(ndev);
2280         struct mii_ioctl_data *data = if_mii(rq);
2281
2282         DBG(dev, "ioctl %08x" NL, cmd);
2283
2284         if (dev->phy.address < 0)
2285                 return -EOPNOTSUPP;
2286
2287         switch (cmd) {
2288         case SIOCGMIIPHY:
2289                 data->phy_id = dev->phy.address;
2290                 /* Fall through */
2291         case SIOCGMIIREG:
2292                 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2293                                                data->reg_num);
2294                 return 0;
2295
2296         case SIOCSMIIREG:
2297                 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2298                                 data->val_in);
2299                 return 0;
2300         default:
2301                 return -EOPNOTSUPP;
2302         }
2303 }
2304
2305 struct emac_depentry {
2306         u32                     phandle;
2307         struct device_node      *node;
2308         struct platform_device  *ofdev;
2309         void                    *drvdata;
2310 };
2311
2312 #define EMAC_DEP_MAL_IDX        0
2313 #define EMAC_DEP_ZMII_IDX       1
2314 #define EMAC_DEP_RGMII_IDX      2
2315 #define EMAC_DEP_TAH_IDX        3
2316 #define EMAC_DEP_MDIO_IDX       4
2317 #define EMAC_DEP_PREV_IDX       5
2318 #define EMAC_DEP_COUNT          6
2319
2320 static int emac_check_deps(struct emac_instance *dev,
2321                            struct emac_depentry *deps)
2322 {
2323         int i, there = 0;
2324         struct device_node *np;
2325
2326         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2327                 /* no dependency on that item, allright */
2328                 if (deps[i].phandle == 0) {
2329                         there++;
2330                         continue;
2331                 }
2332                 /* special case for blist as the dependency might go away */
2333                 if (i == EMAC_DEP_PREV_IDX) {
2334                         np = *(dev->blist - 1);
2335                         if (np == NULL) {
2336                                 deps[i].phandle = 0;
2337                                 there++;
2338                                 continue;
2339                         }
2340                         if (deps[i].node == NULL)
2341                                 deps[i].node = of_node_get(np);
2342                 }
2343                 if (deps[i].node == NULL)
2344                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2345                 if (deps[i].node == NULL)
2346                         continue;
2347                 if (deps[i].ofdev == NULL)
2348                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2349                 if (deps[i].ofdev == NULL)
2350                         continue;
2351                 if (deps[i].drvdata == NULL)
2352                         deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2353                 if (deps[i].drvdata != NULL)
2354                         there++;
2355         }
2356         return there == EMAC_DEP_COUNT;
2357 }
2358
2359 static void emac_put_deps(struct emac_instance *dev)
2360 {
2361         of_dev_put(dev->mal_dev);
2362         of_dev_put(dev->zmii_dev);
2363         of_dev_put(dev->rgmii_dev);
2364         of_dev_put(dev->mdio_dev);
2365         of_dev_put(dev->tah_dev);
2366 }
2367
2368 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2369                               void *data)
2370 {
2371         /* We are only intereted in device addition */
2372         if (action == BUS_NOTIFY_BOUND_DRIVER)
2373                 wake_up_all(&emac_probe_wait);
2374         return 0;
2375 }
2376
2377 static struct notifier_block emac_of_bus_notifier = {
2378         .notifier_call = emac_of_bus_notify
2379 };
2380
2381 static int emac_wait_deps(struct emac_instance *dev)
2382 {
2383         struct emac_depentry deps[EMAC_DEP_COUNT];
2384         int i, err;
2385
2386         memset(&deps, 0, sizeof(deps));
2387
2388         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2389         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2390         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2391         if (dev->tah_ph)
2392                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2393         if (dev->mdio_ph)
2394                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2395         if (dev->blist && dev->blist > emac_boot_list)
2396                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2397         bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2398         wait_event_timeout(emac_probe_wait,
2399                            emac_check_deps(dev, deps),
2400                            EMAC_PROBE_DEP_TIMEOUT);
2401         bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2402         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2403         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2404                 of_node_put(deps[i].node);
2405                 if (err)
2406                         of_dev_put(deps[i].ofdev);
2407         }
2408         if (err == 0) {
2409                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2410                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2411                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2412                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2413                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2414         }
2415         of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2416         return err;
2417 }
2418
2419 static int emac_read_uint_prop(struct device_node *np, const char *name,
2420                                u32 *val, int fatal)
2421 {
2422         int len;
2423         const u32 *prop = of_get_property(np, name, &len);
2424         if (prop == NULL || len < sizeof(u32)) {
2425                 if (fatal)
2426                         printk(KERN_ERR "%s: missing %s property\n",
2427                                np->full_name, name);
2428                 return -ENODEV;
2429         }
2430         *val = *prop;
2431         return 0;
2432 }
2433
2434 static int emac_init_phy(struct emac_instance *dev)
2435 {
2436         struct device_node *np = dev->ofdev->dev.of_node;
2437         struct net_device *ndev = dev->ndev;
2438         u32 phy_map, adv;
2439         int i;
2440
2441         dev->phy.dev = ndev;
2442         dev->phy.mode = dev->phy_mode;
2443
2444         /* PHY-less configuration.
2445          * XXX I probably should move these settings to the dev tree
2446          */
2447         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2448                 emac_reset(dev);
2449
2450                 /* PHY-less configuration.
2451                  * XXX I probably should move these settings to the dev tree
2452                  */
2453                 dev->phy.address = -1;
2454                 dev->phy.features = SUPPORTED_MII;
2455                 if (emac_phy_supports_gige(dev->phy_mode))
2456                         dev->phy.features |= SUPPORTED_1000baseT_Full;
2457                 else
2458                         dev->phy.features |= SUPPORTED_100baseT_Full;
2459                 dev->phy.pause = 1;
2460
2461                 return 0;
2462         }
2463
2464         mutex_lock(&emac_phy_map_lock);
2465         phy_map = dev->phy_map | busy_phy_map;
2466
2467         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2468
2469         dev->phy.mdio_read = emac_mdio_read;
2470         dev->phy.mdio_write = emac_mdio_write;
2471
2472         /* Enable internal clock source */
2473 #ifdef CONFIG_PPC_DCR_NATIVE
2474         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2475                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2476 #endif
2477         /* PHY clock workaround */
2478         emac_rx_clk_tx(dev);
2479
2480         /* Enable internal clock source on 440GX*/
2481 #ifdef CONFIG_PPC_DCR_NATIVE
2482         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2483                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2484 #endif
2485         /* Configure EMAC with defaults so we can at least use MDIO
2486          * This is needed mostly for 440GX
2487          */
2488         if (emac_phy_gpcs(dev->phy.mode)) {
2489                 /* XXX
2490                  * Make GPCS PHY address equal to EMAC index.
2491                  * We probably should take into account busy_phy_map
2492                  * and/or phy_map here.
2493                  *
2494                  * Note that the busy_phy_map is currently global
2495                  * while it should probably be per-ASIC...
2496                  */
2497                 dev->phy.gpcs_address = dev->gpcs_address;
2498                 if (dev->phy.gpcs_address == 0xffffffff)
2499                         dev->phy.address = dev->cell_index;
2500         }
2501
2502         emac_configure(dev);
2503
2504         if (dev->phy_address != 0xffffffff)
2505                 phy_map = ~(1 << dev->phy_address);
2506
2507         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2508                 if (!(phy_map & 1)) {
2509                         int r;
2510                         busy_phy_map |= 1 << i;
2511
2512                         /* Quick check if there is a PHY at the address */
2513                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2514                         if (r == 0xffff || r < 0)
2515                                 continue;
2516                         if (!emac_mii_phy_probe(&dev->phy, i))
2517                                 break;
2518                 }
2519
2520         /* Enable external clock source */
2521 #ifdef CONFIG_PPC_DCR_NATIVE
2522         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2523                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2524 #endif
2525         mutex_unlock(&emac_phy_map_lock);
2526         if (i == 0x20) {
2527                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2528                 return -ENXIO;
2529         }
2530
2531         /* Init PHY */
2532         if (dev->phy.def->ops->init)
2533                 dev->phy.def->ops->init(&dev->phy);
2534
2535         /* Disable any PHY features not supported by the platform */
2536         dev->phy.def->features &= ~dev->phy_feat_exc;
2537         dev->phy.features &= ~dev->phy_feat_exc;
2538
2539         /* Setup initial link parameters */
2540         if (dev->phy.features & SUPPORTED_Autoneg) {
2541                 adv = dev->phy.features;
2542                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2543                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2544                 /* Restart autonegotiation */
2545                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2546         } else {
2547                 u32 f = dev->phy.def->features;
2548                 int speed = SPEED_10, fd = DUPLEX_HALF;
2549
2550                 /* Select highest supported speed/duplex */
2551                 if (f & SUPPORTED_1000baseT_Full) {
2552                         speed = SPEED_1000;
2553                         fd = DUPLEX_FULL;
2554                 } else if (f & SUPPORTED_1000baseT_Half)
2555                         speed = SPEED_1000;
2556                 else if (f & SUPPORTED_100baseT_Full) {
2557                         speed = SPEED_100;
2558                         fd = DUPLEX_FULL;
2559                 } else if (f & SUPPORTED_100baseT_Half)
2560                         speed = SPEED_100;
2561                 else if (f & SUPPORTED_10baseT_Full)
2562                         fd = DUPLEX_FULL;
2563
2564                 /* Force link parameters */
2565                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2566         }
2567         return 0;
2568 }
2569
2570 static int emac_init_config(struct emac_instance *dev)
2571 {
2572         struct device_node *np = dev->ofdev->dev.of_node;
2573         const void *p;
2574
2575         /* Read config from device-tree */
2576         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2577                 return -ENXIO;
2578         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2579                 return -ENXIO;
2580         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2581                 return -ENXIO;
2582         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2583                 return -ENXIO;
2584         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2585                 dev->max_mtu = 1500;
2586         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2587                 dev->rx_fifo_size = 2048;
2588         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2589                 dev->tx_fifo_size = 2048;
2590         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2591                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2592         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2593                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2594         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2595                 dev->phy_address = 0xffffffff;
2596         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2597                 dev->phy_map = 0xffffffff;
2598         if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2599                 dev->gpcs_address = 0xffffffff;
2600         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2601                 return -ENXIO;
2602         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2603                 dev->tah_ph = 0;
2604         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2605                 dev->tah_port = 0;
2606         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2607                 dev->mdio_ph = 0;
2608         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2609                 dev->zmii_ph = 0;
2610         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2611                 dev->zmii_port = 0xffffffff;
2612         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2613                 dev->rgmii_ph = 0;
2614         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2615                 dev->rgmii_port = 0xffffffff;
2616         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2617                 dev->fifo_entry_size = 16;
2618         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2619                 dev->mal_burst_size = 256;
2620
2621         /* PHY mode needs some decoding */
2622         dev->phy_mode = of_get_phy_mode(np);
2623         if (dev->phy_mode < 0)
2624                 dev->phy_mode = PHY_MODE_NA;
2625
2626         /* Check EMAC version */
2627         if (of_device_is_compatible(np, "ibm,emac4sync")) {
2628                 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2629                 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2630                     of_device_is_compatible(np, "ibm,emac-460gt"))
2631                         dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2632                 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2633                     of_device_is_compatible(np, "ibm,emac-405exr"))
2634                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2635                 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2636                         dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2637                                           EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2638                                           EMAC_FTR_460EX_PHY_CLK_FIX);
2639                 }
2640         } else if (of_device_is_compatible(np, "ibm,emac4")) {
2641                 dev->features |= EMAC_FTR_EMAC4;
2642                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2643                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2644         } else {
2645                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2646                     of_device_is_compatible(np, "ibm,emac-440gr"))
2647                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2648                 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2649 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2650                         dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2651 #else
2652                         printk(KERN_ERR "%s: Flow control not disabled!\n",
2653                                         np->full_name);
2654                         return -ENXIO;
2655 #endif
2656                 }
2657
2658         }
2659
2660         /* Fixup some feature bits based on the device tree */
2661         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2662                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2663         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2664                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2665
2666         /* CAB lacks the appropriate properties */
2667         if (of_device_is_compatible(np, "ibm,emac-axon"))
2668                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2669                         EMAC_FTR_STACR_OC_INVERT;
2670
2671         /* Enable TAH/ZMII/RGMII features as found */
2672         if (dev->tah_ph != 0) {
2673 #ifdef CONFIG_IBM_EMAC_TAH
2674                 dev->features |= EMAC_FTR_HAS_TAH;
2675 #else
2676                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2677                        np->full_name);
2678                 return -ENXIO;
2679 #endif
2680         }
2681
2682         if (dev->zmii_ph != 0) {
2683 #ifdef CONFIG_IBM_EMAC_ZMII
2684                 dev->features |= EMAC_FTR_HAS_ZMII;
2685 #else
2686                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2687                        np->full_name);
2688                 return -ENXIO;
2689 #endif
2690         }
2691
2692         if (dev->rgmii_ph != 0) {
2693 #ifdef CONFIG_IBM_EMAC_RGMII
2694                 dev->features |= EMAC_FTR_HAS_RGMII;
2695 #else
2696                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2697                        np->full_name);
2698                 return -ENXIO;
2699 #endif
2700         }
2701
2702         /* Read MAC-address */
2703         p = of_get_property(np, "local-mac-address", NULL);
2704         if (p == NULL) {
2705                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2706                        np->full_name);
2707                 return -ENXIO;
2708         }
2709         memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2710
2711         /* IAHT and GAHT filter parameterization */
2712         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2713                 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2714                 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2715         } else {
2716                 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2717                 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2718         }
2719
2720         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2721         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2722         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2723         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2724         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2725
2726         return 0;
2727 }
2728
2729 static const struct net_device_ops emac_netdev_ops = {
2730         .ndo_open               = emac_open,
2731         .ndo_stop               = emac_close,
2732         .ndo_get_stats          = emac_stats,
2733         .ndo_set_rx_mode        = emac_set_multicast_list,
2734         .ndo_do_ioctl           = emac_ioctl,
2735         .ndo_tx_timeout         = emac_tx_timeout,
2736         .ndo_validate_addr      = eth_validate_addr,
2737         .ndo_set_mac_address    = emac_set_mac_address,
2738         .ndo_start_xmit         = emac_start_xmit,
2739         .ndo_change_mtu         = eth_change_mtu,
2740 };
2741
2742 static const struct net_device_ops emac_gige_netdev_ops = {
2743         .ndo_open               = emac_open,
2744         .ndo_stop               = emac_close,
2745         .ndo_get_stats          = emac_stats,
2746         .ndo_set_rx_mode        = emac_set_multicast_list,
2747         .ndo_do_ioctl           = emac_ioctl,
2748         .ndo_tx_timeout         = emac_tx_timeout,
2749         .ndo_validate_addr      = eth_validate_addr,
2750         .ndo_set_mac_address    = emac_set_mac_address,
2751         .ndo_start_xmit         = emac_start_xmit_sg,
2752         .ndo_change_mtu         = emac_change_mtu,
2753 };
2754
2755 static int emac_probe(struct platform_device *ofdev)
2756 {
2757         struct net_device *ndev;
2758         struct emac_instance *dev;
2759         struct device_node *np = ofdev->dev.of_node;
2760         struct device_node **blist = NULL;
2761         int err, i;
2762
2763         /* Skip unused/unwired EMACS.  We leave the check for an unused
2764          * property here for now, but new flat device trees should set a
2765          * status property to "disabled" instead.
2766          */
2767         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2768                 return -ENODEV;
2769
2770         /* Find ourselves in the bootlist if we are there */
2771         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2772                 if (emac_boot_list[i] == np)
2773                         blist = &emac_boot_list[i];
2774
2775         /* Allocate our net_device structure */
2776         err = -ENOMEM;
2777         ndev = alloc_etherdev(sizeof(struct emac_instance));
2778         if (!ndev)
2779                 goto err_gone;
2780
2781         dev = netdev_priv(ndev);
2782         dev->ndev = ndev;
2783         dev->ofdev = ofdev;
2784         dev->blist = blist;
2785         SET_NETDEV_DEV(ndev, &ofdev->dev);
2786
2787         /* Initialize some embedded data structures */
2788         mutex_init(&dev->mdio_lock);
2789         mutex_init(&dev->link_lock);
2790         spin_lock_init(&dev->lock);
2791         INIT_WORK(&dev->reset_work, emac_reset_work);
2792
2793         /* Init various config data based on device-tree */
2794         err = emac_init_config(dev);
2795         if (err != 0)
2796                 goto err_free;
2797
2798         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2799         dev->emac_irq = irq_of_parse_and_map(np, 0);
2800         dev->wol_irq = irq_of_parse_and_map(np, 1);
2801         if (!dev->emac_irq) {
2802                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2803                 goto err_free;
2804         }
2805         ndev->irq = dev->emac_irq;
2806
2807         /* Map EMAC regs */
2808         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2809                 printk(KERN_ERR "%s: Can't get registers address\n",
2810                        np->full_name);
2811                 goto err_irq_unmap;
2812         }
2813         // TODO : request_mem_region
2814         dev->emacp = ioremap(dev->rsrc_regs.start,
2815                              resource_size(&dev->rsrc_regs));
2816         if (dev->emacp == NULL) {
2817                 printk(KERN_ERR "%s: Can't map device registers!\n",
2818                        np->full_name);
2819                 err = -ENOMEM;
2820                 goto err_irq_unmap;
2821         }
2822
2823         /* Wait for dependent devices */
2824         err = emac_wait_deps(dev);
2825         if (err) {
2826                 printk(KERN_ERR
2827                        "%s: Timeout waiting for dependent devices\n",
2828                        np->full_name);
2829                 /*  display more info about what's missing ? */
2830                 goto err_reg_unmap;
2831         }
2832         dev->mal = platform_get_drvdata(dev->mal_dev);
2833         if (dev->mdio_dev != NULL)
2834                 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2835
2836         /* Register with MAL */
2837         dev->commac.ops = &emac_commac_ops;
2838         dev->commac.dev = dev;
2839         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2840         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2841         err = mal_register_commac(dev->mal, &dev->commac);
2842         if (err) {
2843                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2844                        np->full_name, dev->mal_dev->dev.of_node->full_name);
2845                 goto err_rel_deps;
2846         }
2847         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2848         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2849
2850         /* Get pointers to BD rings */
2851         dev->tx_desc =
2852             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2853         dev->rx_desc =
2854             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2855
2856         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2857         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2858
2859         /* Clean rings */
2860         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2861         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2862         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2863         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2864
2865         /* Attach to ZMII, if needed */
2866         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2867             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2868                 goto err_unreg_commac;
2869
2870         /* Attach to RGMII, if needed */
2871         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2872             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2873                 goto err_detach_zmii;
2874
2875         /* Attach to TAH, if needed */
2876         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2877             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2878                 goto err_detach_rgmii;
2879
2880         /* Set some link defaults before we can find out real parameters */
2881         dev->phy.speed = SPEED_100;
2882         dev->phy.duplex = DUPLEX_FULL;
2883         dev->phy.autoneg = AUTONEG_DISABLE;
2884         dev->phy.pause = dev->phy.asym_pause = 0;
2885         dev->stop_timeout = STOP_TIMEOUT_100;
2886         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2887
2888         /* Some SoCs like APM821xx does not support Half Duplex mode. */
2889         if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2890                 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2891                                      SUPPORTED_100baseT_Half |
2892                                      SUPPORTED_10baseT_Half);
2893         }
2894
2895         /* Find PHY if any */
2896         err = emac_init_phy(dev);
2897         if (err != 0)
2898                 goto err_detach_tah;
2899
2900         if (dev->tah_dev) {
2901                 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2902                 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2903         }
2904         ndev->watchdog_timeo = 5 * HZ;
2905         if (emac_phy_supports_gige(dev->phy_mode)) {
2906                 ndev->netdev_ops = &emac_gige_netdev_ops;
2907                 dev->commac.ops = &emac_commac_sg_ops;
2908         } else
2909                 ndev->netdev_ops = &emac_netdev_ops;
2910         ndev->ethtool_ops = &emac_ethtool_ops;
2911
2912         netif_carrier_off(ndev);
2913
2914         err = register_netdev(ndev);
2915         if (err) {
2916                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2917                        np->full_name, err);
2918                 goto err_detach_tah;
2919         }
2920
2921         /* Set our drvdata last as we don't want them visible until we are
2922          * fully initialized
2923          */
2924         wmb();
2925         platform_set_drvdata(ofdev, dev);
2926
2927         /* There's a new kid in town ! Let's tell everybody */
2928         wake_up_all(&emac_probe_wait);
2929
2930
2931         printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2932                ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2933
2934         if (dev->phy_mode == PHY_MODE_SGMII)
2935                 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2936
2937         if (dev->phy.address >= 0)
2938                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2939                        dev->phy.def->name, dev->phy.address);
2940
2941         emac_dbg_register(dev);
2942
2943         /* Life is good */
2944         return 0;
2945
2946         /* I have a bad feeling about this ... */
2947
2948  err_detach_tah:
2949         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2950                 tah_detach(dev->tah_dev, dev->tah_port);
2951  err_detach_rgmii:
2952         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2953                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2954  err_detach_zmii:
2955         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2956                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2957  err_unreg_commac:
2958         mal_unregister_commac(dev->mal, &dev->commac);
2959  err_rel_deps:
2960         emac_put_deps(dev);
2961  err_reg_unmap:
2962         iounmap(dev->emacp);
2963  err_irq_unmap:
2964         if (dev->wol_irq)
2965                 irq_dispose_mapping(dev->wol_irq);
2966         if (dev->emac_irq)
2967                 irq_dispose_mapping(dev->emac_irq);
2968  err_free:
2969         free_netdev(ndev);
2970  err_gone:
2971         /* if we were on the bootlist, remove us as we won't show up and
2972          * wake up all waiters to notify them in case they were waiting
2973          * on us
2974          */
2975         if (blist) {
2976                 *blist = NULL;
2977                 wake_up_all(&emac_probe_wait);
2978         }
2979         return err;
2980 }
2981
2982 static int emac_remove(struct platform_device *ofdev)
2983 {
2984         struct emac_instance *dev = platform_get_drvdata(ofdev);
2985
2986         DBG(dev, "remove" NL);
2987
2988         unregister_netdev(dev->ndev);
2989
2990         cancel_work_sync(&dev->reset_work);
2991
2992         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2993                 tah_detach(dev->tah_dev, dev->tah_port);
2994         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2995                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2996         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2997                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2998
2999         busy_phy_map &= ~(1 << dev->phy.address);
3000         DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3001
3002         mal_unregister_commac(dev->mal, &dev->commac);
3003         emac_put_deps(dev);
3004
3005         emac_dbg_unregister(dev);
3006         iounmap(dev->emacp);
3007
3008         if (dev->wol_irq)
3009                 irq_dispose_mapping(dev->wol_irq);
3010         if (dev->emac_irq)
3011                 irq_dispose_mapping(dev->emac_irq);
3012
3013         free_netdev(dev->ndev);
3014
3015         return 0;
3016 }
3017
3018 /* XXX Features in here should be replaced by properties... */
3019 static const struct of_device_id emac_match[] =
3020 {
3021         {
3022                 .type           = "network",
3023                 .compatible     = "ibm,emac",
3024         },
3025         {
3026                 .type           = "network",
3027                 .compatible     = "ibm,emac4",
3028         },
3029         {
3030                 .type           = "network",
3031                 .compatible     = "ibm,emac4sync",
3032         },
3033         {},
3034 };
3035 MODULE_DEVICE_TABLE(of, emac_match);
3036
3037 static struct platform_driver emac_driver = {
3038         .driver = {
3039                 .name = "emac",
3040                 .of_match_table = emac_match,
3041         },
3042         .probe = emac_probe,
3043         .remove = emac_remove,
3044 };
3045
3046 static void __init emac_make_bootlist(void)
3047 {
3048         struct device_node *np = NULL;
3049         int j, max, i = 0;
3050         int cell_indices[EMAC_BOOT_LIST_SIZE];
3051
3052         /* Collect EMACs */
3053         while((np = of_find_all_nodes(np)) != NULL) {
3054                 const u32 *idx;
3055
3056                 if (of_match_node(emac_match, np) == NULL)
3057                         continue;
3058                 if (of_get_property(np, "unused", NULL))
3059                         continue;
3060                 idx = of_get_property(np, "cell-index", NULL);
3061                 if (idx == NULL)
3062                         continue;
3063                 cell_indices[i] = *idx;
3064                 emac_boot_list[i++] = of_node_get(np);
3065                 if (i >= EMAC_BOOT_LIST_SIZE) {
3066                         of_node_put(np);
3067                         break;
3068                 }
3069         }
3070         max = i;
3071
3072         /* Bubble sort them (doh, what a creative algorithm :-) */
3073         for (i = 0; max > 1 && (i < (max - 1)); i++)
3074                 for (j = i; j < max; j++) {
3075                         if (cell_indices[i] > cell_indices[j]) {
3076                                 swap(emac_boot_list[i], emac_boot_list[j]);
3077                                 swap(cell_indices[i], cell_indices[j]);
3078                         }
3079                 }
3080 }
3081
3082 static int __init emac_init(void)
3083 {
3084         int rc;
3085
3086         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3087
3088         /* Init debug stuff */
3089         emac_init_debug();
3090
3091         /* Build EMAC boot list */
3092         emac_make_bootlist();
3093
3094         /* Init submodules */
3095         rc = mal_init();
3096         if (rc)
3097                 goto err;
3098         rc = zmii_init();
3099         if (rc)
3100                 goto err_mal;
3101         rc = rgmii_init();
3102         if (rc)
3103                 goto err_zmii;
3104         rc = tah_init();
3105         if (rc)
3106                 goto err_rgmii;
3107         rc = platform_driver_register(&emac_driver);
3108         if (rc)
3109                 goto err_tah;
3110
3111         return 0;
3112
3113  err_tah:
3114         tah_exit();
3115  err_rgmii:
3116         rgmii_exit();
3117  err_zmii:
3118         zmii_exit();
3119  err_mal:
3120         mal_exit();
3121  err:
3122         return rc;
3123 }
3124
3125 static void __exit emac_exit(void)
3126 {
3127         int i;
3128
3129         platform_driver_unregister(&emac_driver);
3130
3131         tah_exit();
3132         rgmii_exit();
3133         zmii_exit();
3134         mal_exit();
3135         emac_fini_debug();
3136
3137         /* Destroy EMAC boot list */
3138         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3139                 of_node_put(emac_boot_list[i]);
3140 }
3141
3142 module_init(emac_init);
3143 module_exit(emac_exit);