1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/if_vlan.h>
23 #include <linux/reset.h>
24 #include <linux/tcp.h>
25 #include <linux/interrupt.h>
26 #include <linux/pinctrl/devinfo.h>
28 #include "mtk_eth_soc.h"
30 static int mtk_msg_level = -1;
31 module_param_named(msg_level, mtk_msg_level, int, 0);
32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 #define MTK_ETHTOOL_STAT(x) { #x, \
35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 /* strings used by ethtool */
38 static const struct mtk_ethtool_stats {
39 char str[ETH_GSTRING_LEN];
41 } mtk_ethtool_stats[] = {
42 MTK_ETHTOOL_STAT(tx_bytes),
43 MTK_ETHTOOL_STAT(tx_packets),
44 MTK_ETHTOOL_STAT(tx_skip),
45 MTK_ETHTOOL_STAT(tx_collisions),
46 MTK_ETHTOOL_STAT(rx_bytes),
47 MTK_ETHTOOL_STAT(rx_packets),
48 MTK_ETHTOOL_STAT(rx_overflow),
49 MTK_ETHTOOL_STAT(rx_fcs_errors),
50 MTK_ETHTOOL_STAT(rx_short_errors),
51 MTK_ETHTOOL_STAT(rx_long_errors),
52 MTK_ETHTOOL_STAT(rx_checksum_errors),
53 MTK_ETHTOOL_STAT(rx_flow_control_packets),
56 static const char * const mtk_clks_source_name[] = {
57 "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
58 "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
61 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
63 __raw_writel(val, eth->base + reg);
66 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
68 return __raw_readl(eth->base + reg);
71 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
73 unsigned long t_start = jiffies;
76 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
78 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
83 dev_err(eth->dev, "mdio: MDIO timeout\n");
87 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
88 u32 phy_register, u32 write_data)
90 if (mtk_mdio_busy_wait(eth))
95 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
96 (phy_register << PHY_IAC_REG_SHIFT) |
97 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
100 if (mtk_mdio_busy_wait(eth))
106 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
110 if (mtk_mdio_busy_wait(eth))
113 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
114 (phy_reg << PHY_IAC_REG_SHIFT) |
115 (phy_addr << PHY_IAC_ADDR_SHIFT),
118 if (mtk_mdio_busy_wait(eth))
121 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
126 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
127 int phy_reg, u16 val)
129 struct mtk_eth *eth = bus->priv;
131 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
134 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
136 struct mtk_eth *eth = bus->priv;
138 return _mtk_mdio_read(eth, phy_addr, phy_reg);
141 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
146 val = (speed == SPEED_1000) ?
147 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
148 mtk_w32(eth, val, INTF_MODE);
150 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
151 ETHSYS_TRGMII_CLK_SEL362_5,
152 ETHSYS_TRGMII_CLK_SEL362_5);
154 val = (speed == SPEED_1000) ? 250000000 : 500000000;
155 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
157 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
159 val = (speed == SPEED_1000) ?
160 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
161 mtk_w32(eth, val, TRGMII_RCK_CTRL);
163 val = (speed == SPEED_1000) ?
164 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
165 mtk_w32(eth, val, TRGMII_TCK_CTRL);
168 static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
172 /* Setup the link timer and QPHY power up inside SGMIISYS */
173 regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
174 SGMII_LINK_TIMER_DEFAULT);
176 regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
177 val |= SGMII_REMOTE_FAULT_DIS;
178 regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
180 regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
181 val |= SGMII_AN_RESTART;
182 regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
184 regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
185 val &= ~SGMII_PHYA_PWD;
186 regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
188 /* Determine MUX for which GMAC uses the SGMII interface */
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
190 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
191 val &= ~SYSCFG0_SGMII_MASK;
192 val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
193 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
195 dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
199 /* Setup the GMAC1 going through SGMII path when SoC also support
202 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
204 mtk_w32(eth, 0, MTK_MAC_MISC);
205 dev_info(eth->dev, "setup gmac1 going through sgmii");
209 static void mtk_phy_link_adjust(struct net_device *dev)
211 struct mtk_mac *mac = netdev_priv(dev);
212 u16 lcl_adv = 0, rmt_adv = 0;
214 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
215 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
216 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
219 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
222 switch (dev->phydev->speed) {
224 mcr |= MAC_MCR_SPEED_1000;
227 mcr |= MAC_MCR_SPEED_100;
231 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
232 !mac->id && !mac->trgmii)
233 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
235 if (dev->phydev->link)
236 mcr |= MAC_MCR_FORCE_LINK;
238 if (dev->phydev->duplex) {
239 mcr |= MAC_MCR_FORCE_DPX;
241 if (dev->phydev->pause)
242 rmt_adv = LPA_PAUSE_CAP;
243 if (dev->phydev->asym_pause)
244 rmt_adv |= LPA_PAUSE_ASYM;
246 if (dev->phydev->advertising & ADVERTISED_Pause)
247 lcl_adv |= ADVERTISE_PAUSE_CAP;
248 if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
249 lcl_adv |= ADVERTISE_PAUSE_ASYM;
251 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
253 if (flowctrl & FLOW_CTRL_TX)
254 mcr |= MAC_MCR_FORCE_TX_FC;
255 if (flowctrl & FLOW_CTRL_RX)
256 mcr |= MAC_MCR_FORCE_RX_FC;
258 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
259 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
260 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
263 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
265 if (dev->phydev->link)
266 netif_carrier_on(dev);
268 netif_carrier_off(dev);
270 if (!of_phy_is_fixed_link(mac->of_node))
271 phy_print_status(dev->phydev);
274 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
275 struct device_node *phy_node)
277 struct phy_device *phydev;
280 phy_mode = of_get_phy_mode(phy_node);
282 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
286 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
287 mtk_phy_link_adjust, 0, phy_mode);
289 dev_err(eth->dev, "could not connect to PHY\n");
294 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
295 mac->id, phydev_name(phydev), phydev->phy_id,
301 static int mtk_phy_connect(struct net_device *dev)
303 struct mtk_mac *mac = netdev_priv(dev);
305 struct device_node *np;
309 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
310 if (!np && of_phy_is_fixed_link(mac->of_node))
311 if (!of_phy_register_fixed_link(mac->of_node))
312 np = of_node_get(mac->of_node);
317 switch (of_get_phy_mode(np)) {
318 case PHY_INTERFACE_MODE_TRGMII:
320 case PHY_INTERFACE_MODE_RGMII_TXID:
321 case PHY_INTERFACE_MODE_RGMII_RXID:
322 case PHY_INTERFACE_MODE_RGMII_ID:
323 case PHY_INTERFACE_MODE_RGMII:
325 case PHY_INTERFACE_MODE_SGMII:
326 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
327 mtk_gmac_sgmii_hw_setup(eth, mac->id);
329 case PHY_INTERFACE_MODE_MII:
332 case PHY_INTERFACE_MODE_REVMII:
335 case PHY_INTERFACE_MODE_RMII:
344 /* put the gmac into the right mode */
345 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
346 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
347 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
348 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
350 /* couple phydev to net_device */
351 if (mtk_phy_connect_node(eth, mac, np))
354 dev->phydev->autoneg = AUTONEG_ENABLE;
355 dev->phydev->speed = 0;
356 dev->phydev->duplex = 0;
358 if (of_phy_is_fixed_link(mac->of_node))
359 dev->phydev->supported |=
360 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
362 dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
363 SUPPORTED_Asym_Pause;
364 dev->phydev->advertising = dev->phydev->supported |
366 phy_start_aneg(dev->phydev);
373 if (of_phy_is_fixed_link(mac->of_node))
374 of_phy_deregister_fixed_link(mac->of_node);
376 dev_err(eth->dev, "%s: invalid phy\n", __func__);
380 static int mtk_mdio_init(struct mtk_eth *eth)
382 struct device_node *mii_np;
385 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
387 dev_err(eth->dev, "no %s child node found", "mdio-bus");
391 if (!of_device_is_available(mii_np)) {
396 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
402 eth->mii_bus->name = "mdio";
403 eth->mii_bus->read = mtk_mdio_read;
404 eth->mii_bus->write = mtk_mdio_write;
405 eth->mii_bus->priv = eth;
406 eth->mii_bus->parent = eth->dev;
408 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
409 ret = of_mdiobus_register(eth->mii_bus, mii_np);
416 static void mtk_mdio_cleanup(struct mtk_eth *eth)
421 mdiobus_unregister(eth->mii_bus);
424 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
429 spin_lock_irqsave(ð->tx_irq_lock, flags);
430 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
431 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
432 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
435 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
440 spin_lock_irqsave(ð->tx_irq_lock, flags);
441 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
442 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
443 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
446 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
451 spin_lock_irqsave(ð->rx_irq_lock, flags);
452 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
453 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
454 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
457 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
462 spin_lock_irqsave(ð->rx_irq_lock, flags);
463 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
464 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
465 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
468 static int mtk_set_mac_address(struct net_device *dev, void *p)
470 int ret = eth_mac_addr(dev, p);
471 struct mtk_mac *mac = netdev_priv(dev);
472 const char *macaddr = dev->dev_addr;
477 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
480 spin_lock_bh(&mac->hw->page_lock);
481 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
482 MTK_GDMA_MAC_ADRH(mac->id));
483 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
484 (macaddr[4] << 8) | macaddr[5],
485 MTK_GDMA_MAC_ADRL(mac->id));
486 spin_unlock_bh(&mac->hw->page_lock);
491 void mtk_stats_update_mac(struct mtk_mac *mac)
493 struct mtk_hw_stats *hw_stats = mac->hw_stats;
494 unsigned int base = MTK_GDM1_TX_GBCNT;
497 base += hw_stats->reg_offset;
499 u64_stats_update_begin(&hw_stats->syncp);
501 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
502 stats = mtk_r32(mac->hw, base + 0x04);
504 hw_stats->rx_bytes += (stats << 32);
505 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
506 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
507 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
508 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
509 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
510 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
511 hw_stats->rx_flow_control_packets +=
512 mtk_r32(mac->hw, base + 0x24);
513 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
514 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
515 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
516 stats = mtk_r32(mac->hw, base + 0x34);
518 hw_stats->tx_bytes += (stats << 32);
519 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
520 u64_stats_update_end(&hw_stats->syncp);
523 static void mtk_stats_update(struct mtk_eth *eth)
527 for (i = 0; i < MTK_MAC_COUNT; i++) {
528 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
530 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
531 mtk_stats_update_mac(eth->mac[i]);
532 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
537 static void mtk_get_stats64(struct net_device *dev,
538 struct rtnl_link_stats64 *storage)
540 struct mtk_mac *mac = netdev_priv(dev);
541 struct mtk_hw_stats *hw_stats = mac->hw_stats;
544 if (netif_running(dev) && netif_device_present(dev)) {
545 if (spin_trylock_bh(&hw_stats->stats_lock)) {
546 mtk_stats_update_mac(mac);
547 spin_unlock_bh(&hw_stats->stats_lock);
552 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
553 storage->rx_packets = hw_stats->rx_packets;
554 storage->tx_packets = hw_stats->tx_packets;
555 storage->rx_bytes = hw_stats->rx_bytes;
556 storage->tx_bytes = hw_stats->tx_bytes;
557 storage->collisions = hw_stats->tx_collisions;
558 storage->rx_length_errors = hw_stats->rx_short_errors +
559 hw_stats->rx_long_errors;
560 storage->rx_over_errors = hw_stats->rx_overflow;
561 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
562 storage->rx_errors = hw_stats->rx_checksum_errors;
563 storage->tx_aborted_errors = hw_stats->tx_skip;
564 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
566 storage->tx_errors = dev->stats.tx_errors;
567 storage->rx_dropped = dev->stats.rx_dropped;
568 storage->tx_dropped = dev->stats.tx_dropped;
571 static inline int mtk_max_frag_size(int mtu)
573 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
574 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
575 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
577 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
578 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
581 static inline int mtk_max_buf_size(int frag_size)
583 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
584 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
586 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
591 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
592 struct mtk_rx_dma *dma_rxd)
594 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
595 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
596 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
597 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
600 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
602 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
605 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
611 /* the qdma core needs scratch memory to be setup */
612 static int mtk_init_fq_dma(struct mtk_eth *eth)
614 dma_addr_t phy_ring_tail;
615 int cnt = MTK_DMA_SIZE;
619 eth->scratch_ring = dma_zalloc_coherent(eth->dev,
620 cnt * sizeof(struct mtk_tx_dma),
621 ð->phy_scratch_ring,
623 if (unlikely(!eth->scratch_ring))
626 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
628 if (unlikely(!eth->scratch_head))
631 dma_addr = dma_map_single(eth->dev,
632 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
634 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
637 phy_ring_tail = eth->phy_scratch_ring +
638 (sizeof(struct mtk_tx_dma) * (cnt - 1));
640 for (i = 0; i < cnt; i++) {
641 eth->scratch_ring[i].txd1 =
642 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
644 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
645 ((i + 1) * sizeof(struct mtk_tx_dma)));
646 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
649 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
650 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
651 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
652 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
657 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
659 void *ret = ring->dma;
661 return ret + (desc - ring->phys);
664 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
665 struct mtk_tx_dma *txd)
667 int idx = txd - ring->dma;
669 return &ring->buf[idx];
672 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
674 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
675 dma_unmap_single(eth->dev,
676 dma_unmap_addr(tx_buf, dma_addr0),
677 dma_unmap_len(tx_buf, dma_len0),
679 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
680 dma_unmap_page(eth->dev,
681 dma_unmap_addr(tx_buf, dma_addr0),
682 dma_unmap_len(tx_buf, dma_len0),
687 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
688 dev_kfree_skb_any(tx_buf->skb);
692 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
693 int tx_num, struct mtk_tx_ring *ring, bool gso)
695 struct mtk_mac *mac = netdev_priv(dev);
696 struct mtk_eth *eth = mac->hw;
697 struct mtk_tx_dma *itxd, *txd;
698 struct mtk_tx_buf *itx_buf, *tx_buf;
699 dma_addr_t mapped_addr;
700 unsigned int nr_frags;
704 itxd = ring->next_free;
705 if (itxd == ring->last_free)
708 /* set the forward port */
709 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
712 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
713 memset(itx_buf, 0, sizeof(*itx_buf));
718 /* TX Checksum offload */
719 if (skb->ip_summed == CHECKSUM_PARTIAL)
720 txd4 |= TX_DMA_CHKSUM;
722 /* VLAN header offload */
723 if (skb_vlan_tag_present(skb))
724 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
726 mapped_addr = dma_map_single(eth->dev, skb->data,
727 skb_headlen(skb), DMA_TO_DEVICE);
728 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
731 WRITE_ONCE(itxd->txd1, mapped_addr);
732 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
733 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
735 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
736 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
740 nr_frags = skb_shinfo(skb)->nr_frags;
741 for (i = 0; i < nr_frags; i++) {
742 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
743 unsigned int offset = 0;
744 int frag_size = skb_frag_size(frag);
747 bool last_frag = false;
748 unsigned int frag_map_size;
750 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
751 if (txd == ring->last_free)
755 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
756 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
759 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
762 if (i == nr_frags - 1 &&
763 (frag_size - frag_map_size) == 0)
766 WRITE_ONCE(txd->txd1, mapped_addr);
767 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
768 TX_DMA_PLEN0(frag_map_size) |
769 last_frag * TX_DMA_LS0));
770 WRITE_ONCE(txd->txd4, fport);
772 tx_buf = mtk_desc_to_tx_buf(ring, txd);
773 memset(tx_buf, 0, sizeof(*tx_buf));
774 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
775 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
776 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
779 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
780 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
781 frag_size -= frag_map_size;
782 offset += frag_map_size;
786 /* store skb to cleanup */
789 WRITE_ONCE(itxd->txd4, txd4);
790 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
791 (!nr_frags * TX_DMA_LS0)));
793 netdev_sent_queue(dev, skb->len);
794 skb_tx_timestamp(skb);
796 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
797 atomic_sub(n_desc, &ring->free_count);
799 /* make sure that all changes to the dma ring are flushed before we
804 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
805 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
811 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
814 mtk_tx_unmap(eth, tx_buf);
816 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
817 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
818 } while (itxd != txd);
823 static inline int mtk_cal_txd_req(struct sk_buff *skb)
826 struct skb_frag_struct *frag;
829 if (skb_is_gso(skb)) {
830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
831 frag = &skb_shinfo(skb)->frags[i];
832 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
835 nfrags += skb_shinfo(skb)->nr_frags;
841 static int mtk_queue_stopped(struct mtk_eth *eth)
845 for (i = 0; i < MTK_MAC_COUNT; i++) {
848 if (netif_queue_stopped(eth->netdev[i]))
855 static void mtk_wake_queue(struct mtk_eth *eth)
859 for (i = 0; i < MTK_MAC_COUNT; i++) {
862 netif_wake_queue(eth->netdev[i]);
866 static void mtk_stop_queue(struct mtk_eth *eth)
870 for (i = 0; i < MTK_MAC_COUNT; i++) {
873 netif_stop_queue(eth->netdev[i]);
877 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
879 struct mtk_mac *mac = netdev_priv(dev);
880 struct mtk_eth *eth = mac->hw;
881 struct mtk_tx_ring *ring = ð->tx_ring;
882 struct net_device_stats *stats = &dev->stats;
886 /* normally we can rely on the stack not calling this more than once,
887 * however we have 2 queues running on the same ring so we need to lock
890 spin_lock(ð->page_lock);
892 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
895 tx_num = mtk_cal_txd_req(skb);
896 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
898 netif_err(eth, tx_queued, dev,
899 "Tx Ring full when queue awake!\n");
900 spin_unlock(ð->page_lock);
901 return NETDEV_TX_BUSY;
904 /* TSO: fill MSS info in tcp checksum field */
905 if (skb_is_gso(skb)) {
906 if (skb_cow_head(skb, 0)) {
907 netif_warn(eth, tx_err, dev,
908 "GSO expand head fail.\n");
912 if (skb_shinfo(skb)->gso_type &
913 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
915 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
919 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
922 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
925 spin_unlock(ð->page_lock);
930 spin_unlock(ð->page_lock);
932 dev_kfree_skb_any(skb);
936 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
939 struct mtk_rx_ring *ring;
943 return ð->rx_ring[0];
945 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
946 ring = ð->rx_ring[i];
947 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
948 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
949 ring->calc_idx_update = true;
957 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
959 struct mtk_rx_ring *ring;
963 ring = ð->rx_ring[0];
964 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
966 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
967 ring = ð->rx_ring[i];
968 if (ring->calc_idx_update) {
969 ring->calc_idx_update = false;
970 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
976 static int mtk_poll_rx(struct napi_struct *napi, int budget,
979 struct mtk_rx_ring *ring;
983 struct mtk_rx_dma *rxd, trxd;
986 while (done < budget) {
987 struct net_device *netdev;
992 ring = mtk_get_rx_ring(eth);
996 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
997 rxd = &ring->dma[idx];
998 data = ring->data[idx];
1000 mtk_rx_get_desc(&trxd, rxd);
1001 if (!(trxd.rxd2 & RX_DMA_DONE))
1004 /* find out which mac the packet come from. values start at 1 */
1005 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1009 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1013 netdev = eth->netdev[mac];
1015 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1018 /* alloc new buffer */
1019 if (ring->frag_size <= PAGE_SIZE)
1020 new_data = napi_alloc_frag(ring->frag_size);
1022 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1023 if (unlikely(!new_data)) {
1024 netdev->stats.rx_dropped++;
1027 dma_addr = dma_map_single(eth->dev,
1028 new_data + NET_SKB_PAD,
1031 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1032 skb_free_frag(new_data);
1033 netdev->stats.rx_dropped++;
1038 skb = build_skb(data, ring->frag_size);
1039 if (unlikely(!skb)) {
1040 skb_free_frag(new_data);
1041 netdev->stats.rx_dropped++;
1044 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1046 dma_unmap_single(eth->dev, trxd.rxd1,
1047 ring->buf_size, DMA_FROM_DEVICE);
1048 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1050 skb_put(skb, pktlen);
1051 if (trxd.rxd4 & RX_DMA_L4_VALID)
1052 skb->ip_summed = CHECKSUM_UNNECESSARY;
1054 skb_checksum_none_assert(skb);
1055 skb->protocol = eth_type_trans(skb, netdev);
1057 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1058 (trxd.rxd2 & RX_DMA_VTAG))
1059 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1060 RX_DMA_VID(trxd.rxd3));
1061 skb_record_rx_queue(skb, 0);
1062 napi_gro_receive(napi, skb);
1064 ring->data[idx] = new_data;
1065 rxd->rxd1 = (unsigned int)dma_addr;
1068 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1070 ring->calc_idx = idx;
1077 /* make sure that all changes to the dma ring are flushed before
1081 mtk_update_rx_cpu_idx(eth);
1087 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1089 struct mtk_tx_ring *ring = ð->tx_ring;
1090 struct mtk_tx_dma *desc;
1091 struct sk_buff *skb;
1092 struct mtk_tx_buf *tx_buf;
1093 unsigned int done[MTK_MAX_DEVS];
1094 unsigned int bytes[MTK_MAX_DEVS];
1098 memset(done, 0, sizeof(done));
1099 memset(bytes, 0, sizeof(bytes));
1101 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1102 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1104 desc = mtk_qdma_phys_to_virt(ring, cpu);
1106 while ((cpu != dma) && budget) {
1107 u32 next_cpu = desc->txd2;
1110 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1111 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1114 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1115 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1122 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1123 bytes[mac] += skb->len;
1127 mtk_tx_unmap(eth, tx_buf);
1129 ring->last_free = desc;
1130 atomic_inc(&ring->free_count);
1135 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1137 for (i = 0; i < MTK_MAC_COUNT; i++) {
1138 if (!eth->netdev[i] || !done[i])
1140 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1144 if (mtk_queue_stopped(eth) &&
1145 (atomic_read(&ring->free_count) > ring->thresh))
1146 mtk_wake_queue(eth);
1151 static void mtk_handle_status_irq(struct mtk_eth *eth)
1153 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1155 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1156 mtk_stats_update(eth);
1157 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1162 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1164 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1168 mtk_handle_status_irq(eth);
1169 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1170 tx_done = mtk_poll_tx(eth, budget);
1172 if (unlikely(netif_msg_intr(eth))) {
1173 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1174 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1176 "done tx %d, intr 0x%08x/0x%x\n",
1177 tx_done, status, mask);
1180 if (tx_done == budget)
1183 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1184 if (status & MTK_TX_DONE_INT)
1187 napi_complete(napi);
1188 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1193 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1195 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1198 int remain_budget = budget;
1200 mtk_handle_status_irq(eth);
1203 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1204 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1206 if (unlikely(netif_msg_intr(eth))) {
1207 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1208 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1210 "done rx %d, intr 0x%08x/0x%x\n",
1211 rx_done, status, mask);
1213 if (rx_done == remain_budget)
1216 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1217 if (status & MTK_RX_DONE_INT) {
1218 remain_budget -= rx_done;
1221 napi_complete(napi);
1222 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1224 return rx_done + budget - remain_budget;
1227 static int mtk_tx_alloc(struct mtk_eth *eth)
1229 struct mtk_tx_ring *ring = ð->tx_ring;
1230 int i, sz = sizeof(*ring->dma);
1232 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1237 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1238 &ring->phys, GFP_ATOMIC);
1242 for (i = 0; i < MTK_DMA_SIZE; i++) {
1243 int next = (i + 1) % MTK_DMA_SIZE;
1244 u32 next_ptr = ring->phys + next * sz;
1246 ring->dma[i].txd2 = next_ptr;
1247 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1250 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1251 ring->next_free = &ring->dma[0];
1252 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1253 ring->thresh = MAX_SKB_FRAGS;
1255 /* make sure that all changes to the dma ring are flushed before we
1260 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1261 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1263 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1266 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1268 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1276 static void mtk_tx_clean(struct mtk_eth *eth)
1278 struct mtk_tx_ring *ring = ð->tx_ring;
1282 for (i = 0; i < MTK_DMA_SIZE; i++)
1283 mtk_tx_unmap(eth, &ring->buf[i]);
1289 dma_free_coherent(eth->dev,
1290 MTK_DMA_SIZE * sizeof(*ring->dma),
1297 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1299 struct mtk_rx_ring *ring;
1300 int rx_data_len, rx_dma_size;
1304 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1307 ring = ð->rx_ring_qdma;
1310 ring = ð->rx_ring[ring_no];
1313 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1314 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1315 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1317 rx_data_len = ETH_DATA_LEN;
1318 rx_dma_size = MTK_DMA_SIZE;
1321 ring->frag_size = mtk_max_frag_size(rx_data_len);
1322 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1323 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1328 for (i = 0; i < rx_dma_size; i++) {
1329 if (ring->frag_size <= PAGE_SIZE)
1330 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1332 ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1337 ring->dma = dma_zalloc_coherent(eth->dev,
1338 rx_dma_size * sizeof(*ring->dma),
1339 &ring->phys, GFP_ATOMIC);
1343 for (i = 0; i < rx_dma_size; i++) {
1344 dma_addr_t dma_addr = dma_map_single(eth->dev,
1345 ring->data[i] + NET_SKB_PAD,
1348 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1350 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1352 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1354 ring->dma_size = rx_dma_size;
1355 ring->calc_idx_update = false;
1356 ring->calc_idx = rx_dma_size - 1;
1357 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1358 /* make sure that all changes to the dma ring are flushed before we
1363 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1364 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1365 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1366 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1371 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1375 if (ring->data && ring->dma) {
1376 for (i = 0; i < ring->dma_size; i++) {
1379 if (!ring->dma[i].rxd1)
1381 dma_unmap_single(eth->dev,
1385 skb_free_frag(ring->data[i]);
1392 dma_free_coherent(eth->dev,
1393 ring->dma_size * sizeof(*ring->dma),
1400 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1403 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1404 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1406 /* set LRO rings to auto-learn modes */
1407 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1409 /* validate LRO ring */
1410 ring_ctrl_dw2 |= MTK_RING_VLD;
1412 /* set AGE timer (unit: 20us) */
1413 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1414 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1416 /* set max AGG timer (unit: 20us) */
1417 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1419 /* set max LRO AGG count */
1420 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1421 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1423 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1424 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1425 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1426 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1429 /* IPv4 checksum update enable */
1430 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1432 /* switch priority comparison to packet count mode */
1433 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1435 /* bandwidth threshold setting */
1436 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1438 /* auto-learn score delta setting */
1439 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1441 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1442 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1443 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1445 /* set HW LRO mode & the max aggregation count for rx packets */
1446 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1448 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1449 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1452 lro_ctrl_dw0 |= MTK_LRO_EN;
1454 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1455 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1460 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1465 /* relinquish lro rings, flush aggregated packets */
1466 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1468 /* wait for relinquishments done */
1469 for (i = 0; i < 10; i++) {
1470 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1471 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1478 /* invalidate lro rings */
1479 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1480 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1482 /* disable HW LRO */
1483 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1486 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1490 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1492 /* invalidate the IP setting */
1493 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1495 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1497 /* validate the IP setting */
1498 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1501 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1505 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1507 /* invalidate the IP setting */
1508 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1510 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1513 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1518 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1519 if (mac->hwlro_ip[i])
1526 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1527 struct ethtool_rxnfc *cmd)
1529 struct ethtool_rx_flow_spec *fsp =
1530 (struct ethtool_rx_flow_spec *)&cmd->fs;
1531 struct mtk_mac *mac = netdev_priv(dev);
1532 struct mtk_eth *eth = mac->hw;
1535 if ((fsp->flow_type != TCP_V4_FLOW) ||
1536 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1537 (fsp->location > 1))
1540 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1541 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1543 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1545 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1550 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1551 struct ethtool_rxnfc *cmd)
1553 struct ethtool_rx_flow_spec *fsp =
1554 (struct ethtool_rx_flow_spec *)&cmd->fs;
1555 struct mtk_mac *mac = netdev_priv(dev);
1556 struct mtk_eth *eth = mac->hw;
1559 if (fsp->location > 1)
1562 mac->hwlro_ip[fsp->location] = 0;
1563 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1565 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1567 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1572 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1574 struct mtk_mac *mac = netdev_priv(dev);
1575 struct mtk_eth *eth = mac->hw;
1578 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1579 mac->hwlro_ip[i] = 0;
1580 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1582 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1585 mac->hwlro_ip_cnt = 0;
1588 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1589 struct ethtool_rxnfc *cmd)
1591 struct mtk_mac *mac = netdev_priv(dev);
1592 struct ethtool_rx_flow_spec *fsp =
1593 (struct ethtool_rx_flow_spec *)&cmd->fs;
1595 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
1598 /* only tcp dst ipv4 is meaningful, others are meaningless */
1599 fsp->flow_type = TCP_V4_FLOW;
1600 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1601 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1603 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1604 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1605 fsp->h_u.tcp_ip4_spec.psrc = 0;
1606 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1607 fsp->h_u.tcp_ip4_spec.pdst = 0;
1608 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1609 fsp->h_u.tcp_ip4_spec.tos = 0;
1610 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1615 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1616 struct ethtool_rxnfc *cmd,
1619 struct mtk_mac *mac = netdev_priv(dev);
1623 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1624 if (mac->hwlro_ip[i]) {
1630 cmd->rule_cnt = cnt;
1635 static netdev_features_t mtk_fix_features(struct net_device *dev,
1636 netdev_features_t features)
1638 if (!(features & NETIF_F_LRO)) {
1639 struct mtk_mac *mac = netdev_priv(dev);
1640 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1643 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1645 features |= NETIF_F_LRO;
1652 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1656 if (!((dev->features ^ features) & NETIF_F_LRO))
1659 if (!(features & NETIF_F_LRO))
1660 mtk_hwlro_netdev_disable(dev);
1665 /* wait for DMA to finish whatever it is doing before we start using it again */
1666 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1668 unsigned long t_start = jiffies;
1671 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1672 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1674 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1678 dev_err(eth->dev, "DMA init timeout\n");
1682 static int mtk_dma_init(struct mtk_eth *eth)
1687 if (mtk_dma_busy_wait(eth))
1690 /* QDMA needs scratch memory for internal reordering of the
1693 err = mtk_init_fq_dma(eth);
1697 err = mtk_tx_alloc(eth);
1701 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
1705 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1710 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1711 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1715 err = mtk_hwlro_rx_init(eth);
1720 /* Enable random early drop and set drop threshold automatically */
1721 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1723 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1728 static void mtk_dma_free(struct mtk_eth *eth)
1732 for (i = 0; i < MTK_MAC_COUNT; i++)
1734 netdev_reset_queue(eth->netdev[i]);
1735 if (eth->scratch_ring) {
1736 dma_free_coherent(eth->dev,
1737 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1739 eth->phy_scratch_ring);
1740 eth->scratch_ring = NULL;
1741 eth->phy_scratch_ring = 0;
1744 mtk_rx_clean(eth, ð->rx_ring[0]);
1745 mtk_rx_clean(eth, ð->rx_ring_qdma);
1748 mtk_hwlro_rx_uninit(eth);
1749 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1750 mtk_rx_clean(eth, ð->rx_ring[i]);
1753 kfree(eth->scratch_head);
1756 static void mtk_tx_timeout(struct net_device *dev)
1758 struct mtk_mac *mac = netdev_priv(dev);
1759 struct mtk_eth *eth = mac->hw;
1761 eth->netdev[mac->id]->stats.tx_errors++;
1762 netif_err(eth, tx_err, dev,
1763 "transmit timed out\n");
1764 schedule_work(ð->pending_work);
1767 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1769 struct mtk_eth *eth = _eth;
1771 if (likely(napi_schedule_prep(ð->rx_napi))) {
1772 __napi_schedule(ð->rx_napi);
1773 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1779 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1781 struct mtk_eth *eth = _eth;
1783 if (likely(napi_schedule_prep(ð->tx_napi))) {
1784 __napi_schedule(ð->tx_napi);
1785 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1791 #ifdef CONFIG_NET_POLL_CONTROLLER
1792 static void mtk_poll_controller(struct net_device *dev)
1794 struct mtk_mac *mac = netdev_priv(dev);
1795 struct mtk_eth *eth = mac->hw;
1797 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1798 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1799 mtk_handle_irq_rx(eth->irq[2], dev);
1800 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1801 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1805 static int mtk_start_dma(struct mtk_eth *eth)
1807 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
1810 err = mtk_dma_init(eth);
1817 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1818 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
1819 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1824 MTK_RX_DMA_EN | rx_2b_offset |
1825 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1831 static int mtk_open(struct net_device *dev)
1833 struct mtk_mac *mac = netdev_priv(dev);
1834 struct mtk_eth *eth = mac->hw;
1836 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1837 if (!refcount_read(ð->dma_refcnt)) {
1838 int err = mtk_start_dma(eth);
1843 napi_enable(ð->tx_napi);
1844 napi_enable(ð->rx_napi);
1845 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1846 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1847 refcount_set(ð->dma_refcnt, 1);
1850 refcount_inc(ð->dma_refcnt);
1852 phy_start(dev->phydev);
1853 netif_start_queue(dev);
1858 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1863 /* stop the dma engine */
1864 spin_lock_bh(ð->page_lock);
1865 val = mtk_r32(eth, glo_cfg);
1866 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1868 spin_unlock_bh(ð->page_lock);
1870 /* wait for dma stop */
1871 for (i = 0; i < 10; i++) {
1872 val = mtk_r32(eth, glo_cfg);
1873 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1881 static int mtk_stop(struct net_device *dev)
1883 struct mtk_mac *mac = netdev_priv(dev);
1884 struct mtk_eth *eth = mac->hw;
1886 netif_tx_disable(dev);
1887 phy_stop(dev->phydev);
1889 /* only shutdown DMA if this is the last user */
1890 if (!refcount_dec_and_test(ð->dma_refcnt))
1893 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1894 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1895 napi_disable(ð->tx_napi);
1896 napi_disable(ð->rx_napi);
1898 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1899 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
1906 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1908 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1912 usleep_range(1000, 1100);
1913 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1919 static void mtk_clk_disable(struct mtk_eth *eth)
1923 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
1924 clk_disable_unprepare(eth->clks[clk]);
1927 static int mtk_clk_enable(struct mtk_eth *eth)
1931 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
1932 ret = clk_prepare_enable(eth->clks[clk]);
1934 goto err_disable_clks;
1941 clk_disable_unprepare(eth->clks[clk]);
1946 static int mtk_hw_init(struct mtk_eth *eth)
1950 if (test_and_set_bit(MTK_HW_INIT, ð->state))
1953 pm_runtime_enable(eth->dev);
1954 pm_runtime_get_sync(eth->dev);
1956 ret = mtk_clk_enable(eth);
1958 goto err_disable_pm;
1960 ethsys_reset(eth, RSTCTRL_FE);
1961 ethsys_reset(eth, RSTCTRL_PPE);
1963 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1964 for (i = 0; i < MTK_MAC_COUNT; i++) {
1967 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1968 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1970 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1973 /* Set GE2 driving and slew rate */
1974 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1977 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1980 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1983 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1984 * up with the more appropriate value when mtk_phy_link_adjust call is
1987 for (i = 0; i < MTK_MAC_COUNT; i++)
1988 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1990 /* Indicates CDM to parse the MTK special tag from CPU
1991 * which also is working out for untag packets.
1993 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
1994 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
1996 /* Enable RX VLan Offloading */
1997 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1999 /* enable interrupt delay for RX */
2000 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2002 /* disable delay and normal interrupt */
2003 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2004 mtk_tx_irq_disable(eth, ~0);
2005 mtk_rx_irq_disable(eth, ~0);
2006 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2007 mtk_w32(eth, 0, MTK_RST_GL);
2009 /* FE int grouping */
2010 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2011 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2012 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2013 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2014 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2016 for (i = 0; i < 2; i++) {
2017 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2019 /* setup the forward port to send frame to PDMA */
2022 /* Enable RX checksum */
2023 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2025 /* setup the mac dma */
2026 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2032 pm_runtime_put_sync(eth->dev);
2033 pm_runtime_disable(eth->dev);
2038 static int mtk_hw_deinit(struct mtk_eth *eth)
2040 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2043 mtk_clk_disable(eth);
2045 pm_runtime_put_sync(eth->dev);
2046 pm_runtime_disable(eth->dev);
2051 static int __init mtk_init(struct net_device *dev)
2053 struct mtk_mac *mac = netdev_priv(dev);
2054 struct mtk_eth *eth = mac->hw;
2055 const char *mac_addr;
2057 mac_addr = of_get_mac_address(mac->of_node);
2059 ether_addr_copy(dev->dev_addr, mac_addr);
2061 /* If the mac address is invalid, use random mac address */
2062 if (!is_valid_ether_addr(dev->dev_addr)) {
2063 eth_hw_addr_random(dev);
2064 dev_err(eth->dev, "generated random MAC address %pM\n",
2068 return mtk_phy_connect(dev);
2071 static void mtk_uninit(struct net_device *dev)
2073 struct mtk_mac *mac = netdev_priv(dev);
2074 struct mtk_eth *eth = mac->hw;
2076 phy_disconnect(dev->phydev);
2077 if (of_phy_is_fixed_link(mac->of_node))
2078 of_phy_deregister_fixed_link(mac->of_node);
2079 mtk_tx_irq_disable(eth, ~0);
2080 mtk_rx_irq_disable(eth, ~0);
2083 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2089 return phy_mii_ioctl(dev->phydev, ifr, cmd);
2097 static void mtk_pending_work(struct work_struct *work)
2099 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2101 unsigned long restart = 0;
2105 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2107 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2110 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2111 /* stop all devices to make sure that dma is properly shut down */
2112 for (i = 0; i < MTK_MAC_COUNT; i++) {
2113 if (!eth->netdev[i])
2115 mtk_stop(eth->netdev[i]);
2116 __set_bit(i, &restart);
2118 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2120 /* restart underlying hardware such as power, clock, pin mux
2121 * and the connected phy
2126 pinctrl_select_state(eth->dev->pins->p,
2127 eth->dev->pins->default_state);
2130 for (i = 0; i < MTK_MAC_COUNT; i++) {
2132 of_phy_is_fixed_link(eth->mac[i]->of_node))
2134 err = phy_init_hw(eth->netdev[i]->phydev);
2136 dev_err(eth->dev, "%s: PHY init failed.\n",
2137 eth->netdev[i]->name);
2140 /* restart DMA and enable IRQs */
2141 for (i = 0; i < MTK_MAC_COUNT; i++) {
2142 if (!test_bit(i, &restart))
2144 err = mtk_open(eth->netdev[i]);
2146 netif_alert(eth, ifup, eth->netdev[i],
2147 "Driver up/down cycle failed, closing device.\n");
2148 dev_close(eth->netdev[i]);
2152 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2154 clear_bit_unlock(MTK_RESETTING, ð->state);
2159 static int mtk_free_dev(struct mtk_eth *eth)
2163 for (i = 0; i < MTK_MAC_COUNT; i++) {
2164 if (!eth->netdev[i])
2166 free_netdev(eth->netdev[i]);
2172 static int mtk_unreg_dev(struct mtk_eth *eth)
2176 for (i = 0; i < MTK_MAC_COUNT; i++) {
2177 if (!eth->netdev[i])
2179 unregister_netdev(eth->netdev[i]);
2185 static int mtk_cleanup(struct mtk_eth *eth)
2189 cancel_work_sync(ð->pending_work);
2194 static int mtk_get_link_ksettings(struct net_device *ndev,
2195 struct ethtool_link_ksettings *cmd)
2197 struct mtk_mac *mac = netdev_priv(ndev);
2199 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2202 phy_ethtool_ksettings_get(ndev->phydev, cmd);
2207 static int mtk_set_link_ksettings(struct net_device *ndev,
2208 const struct ethtool_link_ksettings *cmd)
2210 struct mtk_mac *mac = netdev_priv(ndev);
2212 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2215 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2218 static void mtk_get_drvinfo(struct net_device *dev,
2219 struct ethtool_drvinfo *info)
2221 struct mtk_mac *mac = netdev_priv(dev);
2223 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2224 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2225 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2228 static u32 mtk_get_msglevel(struct net_device *dev)
2230 struct mtk_mac *mac = netdev_priv(dev);
2232 return mac->hw->msg_enable;
2235 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2237 struct mtk_mac *mac = netdev_priv(dev);
2239 mac->hw->msg_enable = value;
2242 static int mtk_nway_reset(struct net_device *dev)
2244 struct mtk_mac *mac = netdev_priv(dev);
2246 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2249 return genphy_restart_aneg(dev->phydev);
2252 static u32 mtk_get_link(struct net_device *dev)
2254 struct mtk_mac *mac = netdev_priv(dev);
2257 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2260 err = genphy_update_link(dev->phydev);
2262 return ethtool_op_get_link(dev);
2264 return dev->phydev->link;
2267 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2271 switch (stringset) {
2273 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2274 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2275 data += ETH_GSTRING_LEN;
2281 static int mtk_get_sset_count(struct net_device *dev, int sset)
2285 return ARRAY_SIZE(mtk_ethtool_stats);
2291 static void mtk_get_ethtool_stats(struct net_device *dev,
2292 struct ethtool_stats *stats, u64 *data)
2294 struct mtk_mac *mac = netdev_priv(dev);
2295 struct mtk_hw_stats *hwstats = mac->hw_stats;
2296 u64 *data_src, *data_dst;
2300 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2303 if (netif_running(dev) && netif_device_present(dev)) {
2304 if (spin_trylock_bh(&hwstats->stats_lock)) {
2305 mtk_stats_update_mac(mac);
2306 spin_unlock_bh(&hwstats->stats_lock);
2310 data_src = (u64 *)hwstats;
2314 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2316 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2317 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2318 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2321 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2324 int ret = -EOPNOTSUPP;
2327 case ETHTOOL_GRXRINGS:
2328 if (dev->hw_features & NETIF_F_LRO) {
2329 cmd->data = MTK_MAX_RX_RING_NUM;
2333 case ETHTOOL_GRXCLSRLCNT:
2334 if (dev->hw_features & NETIF_F_LRO) {
2335 struct mtk_mac *mac = netdev_priv(dev);
2337 cmd->rule_cnt = mac->hwlro_ip_cnt;
2341 case ETHTOOL_GRXCLSRULE:
2342 if (dev->hw_features & NETIF_F_LRO)
2343 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2345 case ETHTOOL_GRXCLSRLALL:
2346 if (dev->hw_features & NETIF_F_LRO)
2347 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2357 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2359 int ret = -EOPNOTSUPP;
2362 case ETHTOOL_SRXCLSRLINS:
2363 if (dev->hw_features & NETIF_F_LRO)
2364 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2366 case ETHTOOL_SRXCLSRLDEL:
2367 if (dev->hw_features & NETIF_F_LRO)
2368 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2377 static const struct ethtool_ops mtk_ethtool_ops = {
2378 .get_link_ksettings = mtk_get_link_ksettings,
2379 .set_link_ksettings = mtk_set_link_ksettings,
2380 .get_drvinfo = mtk_get_drvinfo,
2381 .get_msglevel = mtk_get_msglevel,
2382 .set_msglevel = mtk_set_msglevel,
2383 .nway_reset = mtk_nway_reset,
2384 .get_link = mtk_get_link,
2385 .get_strings = mtk_get_strings,
2386 .get_sset_count = mtk_get_sset_count,
2387 .get_ethtool_stats = mtk_get_ethtool_stats,
2388 .get_rxnfc = mtk_get_rxnfc,
2389 .set_rxnfc = mtk_set_rxnfc,
2392 static const struct net_device_ops mtk_netdev_ops = {
2393 .ndo_init = mtk_init,
2394 .ndo_uninit = mtk_uninit,
2395 .ndo_open = mtk_open,
2396 .ndo_stop = mtk_stop,
2397 .ndo_start_xmit = mtk_start_xmit,
2398 .ndo_set_mac_address = mtk_set_mac_address,
2399 .ndo_validate_addr = eth_validate_addr,
2400 .ndo_do_ioctl = mtk_do_ioctl,
2401 .ndo_tx_timeout = mtk_tx_timeout,
2402 .ndo_get_stats64 = mtk_get_stats64,
2403 .ndo_fix_features = mtk_fix_features,
2404 .ndo_set_features = mtk_set_features,
2405 #ifdef CONFIG_NET_POLL_CONTROLLER
2406 .ndo_poll_controller = mtk_poll_controller,
2410 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2412 struct mtk_mac *mac;
2413 const __be32 *_id = of_get_property(np, "reg", NULL);
2417 dev_err(eth->dev, "missing mac id\n");
2421 id = be32_to_cpup(_id);
2422 if (id >= MTK_MAC_COUNT) {
2423 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2427 if (eth->netdev[id]) {
2428 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2432 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2433 if (!eth->netdev[id]) {
2434 dev_err(eth->dev, "alloc_etherdev failed\n");
2437 mac = netdev_priv(eth->netdev[id]);
2443 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2444 mac->hwlro_ip_cnt = 0;
2446 mac->hw_stats = devm_kzalloc(eth->dev,
2447 sizeof(*mac->hw_stats),
2449 if (!mac->hw_stats) {
2450 dev_err(eth->dev, "failed to allocate counter memory\n");
2454 spin_lock_init(&mac->hw_stats->stats_lock);
2455 u64_stats_init(&mac->hw_stats->syncp);
2456 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2458 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2459 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2460 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2461 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2463 eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2465 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2467 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2468 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2469 eth->netdev[id]->features |= MTK_HW_FEATURES;
2470 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2472 eth->netdev[id]->irq = eth->irq[0];
2473 eth->netdev[id]->dev.of_node = np;
2475 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
2480 free_netdev(eth->netdev[id]);
2484 static int mtk_probe(struct platform_device *pdev)
2486 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2487 struct device_node *mac_np;
2488 struct mtk_eth *eth;
2492 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2496 eth->soc = of_device_get_match_data(&pdev->dev);
2498 eth->dev = &pdev->dev;
2499 eth->base = devm_ioremap_resource(&pdev->dev, res);
2500 if (IS_ERR(eth->base))
2501 return PTR_ERR(eth->base);
2503 spin_lock_init(ð->page_lock);
2504 spin_lock_init(ð->tx_irq_lock);
2505 spin_lock_init(ð->rx_irq_lock);
2507 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2509 if (IS_ERR(eth->ethsys)) {
2510 dev_err(&pdev->dev, "no ethsys regmap found\n");
2511 return PTR_ERR(eth->ethsys);
2514 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2516 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2517 "mediatek,sgmiisys");
2518 if (IS_ERR(eth->sgmiisys)) {
2519 dev_err(&pdev->dev, "no sgmiisys regmap found\n");
2520 return PTR_ERR(eth->sgmiisys);
2524 if (eth->soc->required_pctl) {
2525 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2527 if (IS_ERR(eth->pctl)) {
2528 dev_err(&pdev->dev, "no pctl regmap found\n");
2529 return PTR_ERR(eth->pctl);
2533 for (i = 0; i < 3; i++) {
2534 eth->irq[i] = platform_get_irq(pdev, i);
2535 if (eth->irq[i] < 0) {
2536 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2540 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2541 eth->clks[i] = devm_clk_get(eth->dev,
2542 mtk_clks_source_name[i]);
2543 if (IS_ERR(eth->clks[i])) {
2544 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2545 return -EPROBE_DEFER;
2546 if (eth->soc->required_clks & BIT(i)) {
2547 dev_err(&pdev->dev, "clock %s not found\n",
2548 mtk_clks_source_name[i]);
2551 eth->clks[i] = NULL;
2555 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2556 INIT_WORK(ð->pending_work, mtk_pending_work);
2558 err = mtk_hw_init(eth);
2562 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
2564 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2565 if (!of_device_is_compatible(mac_np,
2566 "mediatek,eth-mac"))
2569 if (!of_device_is_available(mac_np))
2572 err = mtk_add_mac(eth, mac_np);
2577 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2578 dev_name(eth->dev), eth);
2582 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2583 dev_name(eth->dev), eth);
2587 err = mtk_mdio_init(eth);
2591 for (i = 0; i < MTK_MAX_DEVS; i++) {
2592 if (!eth->netdev[i])
2595 err = register_netdev(eth->netdev[i]);
2597 dev_err(eth->dev, "error bringing up device\n");
2598 goto err_deinit_mdio;
2600 netif_info(eth, probe, eth->netdev[i],
2601 "mediatek frame engine at 0x%08lx, irq %d\n",
2602 eth->netdev[i]->base_addr, eth->irq[0]);
2605 /* we run 2 devices on the same DMA ring so we need a dummy device
2608 init_dummy_netdev(ð->dummy_dev);
2609 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
2611 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
2614 platform_set_drvdata(pdev, eth);
2619 mtk_mdio_cleanup(eth);
2628 static int mtk_remove(struct platform_device *pdev)
2630 struct mtk_eth *eth = platform_get_drvdata(pdev);
2633 /* stop all devices to make sure that dma is properly shut down */
2634 for (i = 0; i < MTK_MAC_COUNT; i++) {
2635 if (!eth->netdev[i])
2637 mtk_stop(eth->netdev[i]);
2642 netif_napi_del(ð->tx_napi);
2643 netif_napi_del(ð->rx_napi);
2645 mtk_mdio_cleanup(eth);
2650 static const struct mtk_soc_data mt2701_data = {
2651 .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
2652 .required_clks = MT7623_CLKS_BITMAP,
2653 .required_pctl = true,
2656 static const struct mtk_soc_data mt7622_data = {
2657 .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
2658 .required_clks = MT7622_CLKS_BITMAP,
2659 .required_pctl = false,
2662 static const struct mtk_soc_data mt7623_data = {
2663 .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
2664 .required_clks = MT7623_CLKS_BITMAP,
2665 .required_pctl = true,
2668 const struct of_device_id of_mtk_match[] = {
2669 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
2670 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
2671 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
2674 MODULE_DEVICE_TABLE(of, of_mtk_match);
2676 static struct platform_driver mtk_driver = {
2678 .remove = mtk_remove,
2680 .name = "mtk_soc_eth",
2681 .of_match_table = of_mtk_match,
2685 module_platform_driver(mtk_driver);
2687 MODULE_LICENSE("GPL");
2688 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2689 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");