1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
44 enum mvpp2_bm_pool_log_num {
54 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
56 /* The prototype is added here to be used in start_dev when using ACPI. This
57 * will be removed once phylink is used for all modes (dt+ACPI).
59 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
60 const struct phylink_link_state *state);
61 static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
65 #define MVPP2_QDIST_SINGLE_MODE 0
66 #define MVPP2_QDIST_MULTI_MODE 1
68 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
70 module_param(queue_mode, int, 0444);
71 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
73 /* Utility/helper methods */
75 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
77 writel(data, priv->swth_base[0] + offset);
80 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
82 return readl(priv->swth_base[0] + offset);
85 u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
87 return readl_relaxed(priv->swth_base[0] + offset);
89 /* These accessors should be used to access:
91 * - per-CPU registers, where each CPU has its own copy of the
94 * MVPP2_BM_VIRT_ALLOC_REG
95 * MVPP2_BM_ADDR_HIGH_ALLOC
96 * MVPP22_BM_ADDR_HIGH_RLS_REG
97 * MVPP2_BM_VIRT_RLS_REG
98 * MVPP2_ISR_RX_TX_CAUSE_REG
99 * MVPP2_ISR_RX_TX_MASK_REG
101 * MVPP2_AGGR_TXQ_UPDATE_REG
102 * MVPP2_TXQ_RSVD_REQ_REG
103 * MVPP2_TXQ_RSVD_RSLT_REG
107 * - global registers that must be accessed through a specific CPU
108 * window, because they are related to an access to a per-CPU
111 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
112 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
113 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
114 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
115 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
116 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
117 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
118 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
119 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
120 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
121 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
122 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
123 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
125 void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
126 u32 offset, u32 data)
128 writel(data, priv->swth_base[cpu] + offset);
131 u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
134 return readl(priv->swth_base[cpu] + offset);
137 void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
138 u32 offset, u32 data)
140 writel_relaxed(data, priv->swth_base[cpu] + offset);
143 static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
146 return readl_relaxed(priv->swth_base[cpu] + offset);
149 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
150 struct mvpp2_tx_desc *tx_desc)
152 if (port->priv->hw_version == MVPP21)
153 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
155 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
159 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
160 struct mvpp2_tx_desc *tx_desc,
163 dma_addr_t addr, offset;
165 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
166 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
168 if (port->priv->hw_version == MVPP21) {
169 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
170 tx_desc->pp21.packet_offset = offset;
172 __le64 val = cpu_to_le64(addr);
174 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
175 tx_desc->pp22.buf_dma_addr_ptp |= val;
176 tx_desc->pp22.packet_offset = offset;
180 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
181 struct mvpp2_tx_desc *tx_desc)
183 if (port->priv->hw_version == MVPP21)
184 return le16_to_cpu(tx_desc->pp21.data_size);
186 return le16_to_cpu(tx_desc->pp22.data_size);
189 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
190 struct mvpp2_tx_desc *tx_desc,
193 if (port->priv->hw_version == MVPP21)
194 tx_desc->pp21.data_size = cpu_to_le16(size);
196 tx_desc->pp22.data_size = cpu_to_le16(size);
199 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
200 struct mvpp2_tx_desc *tx_desc,
203 if (port->priv->hw_version == MVPP21)
204 tx_desc->pp21.phys_txq = txq;
206 tx_desc->pp22.phys_txq = txq;
209 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
210 struct mvpp2_tx_desc *tx_desc,
211 unsigned int command)
213 if (port->priv->hw_version == MVPP21)
214 tx_desc->pp21.command = cpu_to_le32(command);
216 tx_desc->pp22.command = cpu_to_le32(command);
219 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
220 struct mvpp2_tx_desc *tx_desc)
222 if (port->priv->hw_version == MVPP21)
223 return tx_desc->pp21.packet_offset;
225 return tx_desc->pp22.packet_offset;
228 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
229 struct mvpp2_rx_desc *rx_desc)
231 if (port->priv->hw_version == MVPP21)
232 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
234 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
238 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
239 struct mvpp2_rx_desc *rx_desc)
241 if (port->priv->hw_version == MVPP21)
242 return le32_to_cpu(rx_desc->pp21.buf_cookie);
244 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
248 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
249 struct mvpp2_rx_desc *rx_desc)
251 if (port->priv->hw_version == MVPP21)
252 return le16_to_cpu(rx_desc->pp21.data_size);
254 return le16_to_cpu(rx_desc->pp22.data_size);
257 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
258 struct mvpp2_rx_desc *rx_desc)
260 if (port->priv->hw_version == MVPP21)
261 return le32_to_cpu(rx_desc->pp21.status);
263 return le32_to_cpu(rx_desc->pp22.status);
266 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
268 txq_pcpu->txq_get_index++;
269 if (txq_pcpu->txq_get_index == txq_pcpu->size)
270 txq_pcpu->txq_get_index = 0;
273 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
274 struct mvpp2_txq_pcpu *txq_pcpu,
276 struct mvpp2_tx_desc *tx_desc)
278 struct mvpp2_txq_pcpu_buf *tx_buf =
279 txq_pcpu->buffs + txq_pcpu->txq_put_index;
281 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
282 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
283 mvpp2_txdesc_offset_get(port, tx_desc);
284 txq_pcpu->txq_put_index++;
285 if (txq_pcpu->txq_put_index == txq_pcpu->size)
286 txq_pcpu->txq_put_index = 0;
289 /* Get number of physical egress port */
290 static inline int mvpp2_egress_port(struct mvpp2_port *port)
292 return MVPP2_MAX_TCONT + port->id;
295 /* Get number of physical TXQ */
296 static inline int mvpp2_txq_phys(int port, int txq)
298 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
301 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
303 if (likely(pool->frag_size <= PAGE_SIZE))
304 return netdev_alloc_frag(pool->frag_size);
306 return kmalloc(pool->frag_size, GFP_ATOMIC);
309 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
311 if (likely(pool->frag_size <= PAGE_SIZE))
317 /* Buffer Manager configuration routines */
320 static int mvpp2_bm_pool_create(struct platform_device *pdev,
322 struct mvpp2_bm_pool *bm_pool, int size)
326 /* Number of buffer pointers must be a multiple of 16, as per
327 * hardware constraints
329 if (!IS_ALIGNED(size, 16))
332 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
333 * bytes per buffer pointer
335 if (priv->hw_version == MVPP21)
336 bm_pool->size_bytes = 2 * sizeof(u32) * size;
338 bm_pool->size_bytes = 2 * sizeof(u64) * size;
340 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
343 if (!bm_pool->virt_addr)
346 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
347 MVPP2_BM_POOL_PTR_ALIGN)) {
348 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
349 bm_pool->virt_addr, bm_pool->dma_addr);
350 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
351 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
355 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
356 lower_32_bits(bm_pool->dma_addr));
357 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
359 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
360 val |= MVPP2_BM_START_MASK;
361 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
363 bm_pool->size = size;
364 bm_pool->pkt_size = 0;
365 bm_pool->buf_num = 0;
370 /* Set pool buffer size */
371 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
372 struct mvpp2_bm_pool *bm_pool,
377 bm_pool->buf_size = buf_size;
379 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
380 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
383 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
384 struct mvpp2_bm_pool *bm_pool,
385 dma_addr_t *dma_addr,
386 phys_addr_t *phys_addr)
390 *dma_addr = mvpp2_percpu_read(priv, cpu,
391 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
392 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
394 if (priv->hw_version == MVPP22) {
396 u32 dma_addr_highbits, phys_addr_highbits;
398 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
399 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
400 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
401 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
403 if (sizeof(dma_addr_t) == 8)
404 *dma_addr |= (u64)dma_addr_highbits << 32;
406 if (sizeof(phys_addr_t) == 8)
407 *phys_addr |= (u64)phys_addr_highbits << 32;
413 /* Free all buffers from the pool */
414 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
415 struct mvpp2_bm_pool *bm_pool, int buf_num)
419 if (buf_num > bm_pool->buf_num) {
420 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
421 bm_pool->id, buf_num);
422 buf_num = bm_pool->buf_num;
425 for (i = 0; i < buf_num; i++) {
426 dma_addr_t buf_dma_addr;
427 phys_addr_t buf_phys_addr;
430 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
431 &buf_dma_addr, &buf_phys_addr);
433 dma_unmap_single(dev, buf_dma_addr,
434 bm_pool->buf_size, DMA_FROM_DEVICE);
436 data = (void *)phys_to_virt(buf_phys_addr);
440 mvpp2_frag_free(bm_pool, data);
443 /* Update BM driver with number of buffers removed from pool */
444 bm_pool->buf_num -= i;
447 /* Check number of buffers in BM pool */
448 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
452 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
453 MVPP22_BM_POOL_PTRS_NUM_MASK;
454 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
455 MVPP2_BM_BPPI_PTR_NUM_MASK;
457 /* HW has one buffer ready which is not reflected in the counters */
465 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
467 struct mvpp2_bm_pool *bm_pool)
472 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
473 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
475 /* Check buffer counters after free */
476 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
478 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
479 bm_pool->id, bm_pool->buf_num);
483 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
484 val |= MVPP2_BM_STOP_MASK;
485 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
487 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
493 static int mvpp2_bm_pools_init(struct platform_device *pdev,
497 struct mvpp2_bm_pool *bm_pool;
499 /* Create all pools with maximum size */
500 size = MVPP2_BM_POOL_SIZE_MAX;
501 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
502 bm_pool = &priv->bm_pools[i];
504 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
506 goto err_unroll_pools;
507 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
512 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
513 for (i = i - 1; i >= 0; i--)
514 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
518 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
522 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
523 /* Mask BM all interrupts */
524 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
525 /* Clear BM cause register */
526 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
529 /* Allocate and initialize BM pools */
530 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
531 sizeof(*priv->bm_pools), GFP_KERNEL);
535 err = mvpp2_bm_pools_init(pdev, priv);
541 static void mvpp2_setup_bm_pool(void)
544 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
545 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
548 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
549 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
552 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
553 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
556 /* Attach long pool to rxq */
557 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
558 int lrxq, int long_pool)
563 /* Get queue physical ID */
564 prxq = port->rxqs[lrxq]->id;
566 if (port->priv->hw_version == MVPP21)
567 mask = MVPP21_RXQ_POOL_LONG_MASK;
569 mask = MVPP22_RXQ_POOL_LONG_MASK;
571 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
573 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
574 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
577 /* Attach short pool to rxq */
578 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
579 int lrxq, int short_pool)
584 /* Get queue physical ID */
585 prxq = port->rxqs[lrxq]->id;
587 if (port->priv->hw_version == MVPP21)
588 mask = MVPP21_RXQ_POOL_SHORT_MASK;
590 mask = MVPP22_RXQ_POOL_SHORT_MASK;
592 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
594 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
595 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
598 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
599 struct mvpp2_bm_pool *bm_pool,
600 dma_addr_t *buf_dma_addr,
601 phys_addr_t *buf_phys_addr,
607 data = mvpp2_frag_alloc(bm_pool);
611 dma_addr = dma_map_single(port->dev->dev.parent, data,
612 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
614 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
615 mvpp2_frag_free(bm_pool, data);
618 *buf_dma_addr = dma_addr;
619 *buf_phys_addr = virt_to_phys(data);
624 /* Release buffer to BM */
625 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
626 dma_addr_t buf_dma_addr,
627 phys_addr_t buf_phys_addr)
631 if (port->priv->hw_version == MVPP22) {
634 if (sizeof(dma_addr_t) == 8)
635 val |= upper_32_bits(buf_dma_addr) &
636 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
638 if (sizeof(phys_addr_t) == 8)
639 val |= (upper_32_bits(buf_phys_addr)
640 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
641 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
643 mvpp2_percpu_write_relaxed(port->priv, cpu,
644 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
647 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
648 * returned in the "cookie" field of the RX
649 * descriptor. Instead of storing the virtual address, we
650 * store the physical address
652 mvpp2_percpu_write_relaxed(port->priv, cpu,
653 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
654 mvpp2_percpu_write_relaxed(port->priv, cpu,
655 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
660 /* Allocate buffers for the pool */
661 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
662 struct mvpp2_bm_pool *bm_pool, int buf_num)
664 int i, buf_size, total_size;
666 phys_addr_t phys_addr;
669 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
670 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
673 (buf_num + bm_pool->buf_num > bm_pool->size)) {
674 netdev_err(port->dev,
675 "cannot allocate %d buffers for pool %d\n",
676 buf_num, bm_pool->id);
680 for (i = 0; i < buf_num; i++) {
681 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
682 &phys_addr, GFP_KERNEL);
686 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
690 /* Update BM driver with number of buffers added to pool */
691 bm_pool->buf_num += i;
693 netdev_dbg(port->dev,
694 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
695 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
697 netdev_dbg(port->dev,
698 "pool %d: %d of %d buffers added\n",
699 bm_pool->id, i, buf_num);
703 /* Notify the driver that BM pool is being used as specific type and return the
704 * pool pointer on success
706 static struct mvpp2_bm_pool *
707 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
709 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
712 if (pool >= MVPP2_BM_POOLS_NUM) {
713 netdev_err(port->dev, "Invalid pool %d\n", pool);
717 /* Allocate buffers in case BM pool is used as long pool, but packet
718 * size doesn't match MTU or BM pool hasn't being used yet
720 if (new_pool->pkt_size == 0) {
723 /* Set default buffer number or free all the buffers in case
724 * the pool is not empty
726 pkts_num = new_pool->buf_num;
728 pkts_num = mvpp2_pools[pool].buf_num;
730 mvpp2_bm_bufs_free(port->dev->dev.parent,
731 port->priv, new_pool, pkts_num);
733 new_pool->pkt_size = pkt_size;
734 new_pool->frag_size =
735 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
736 MVPP2_SKB_SHINFO_SIZE;
738 /* Allocate buffers for this pool */
739 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
740 if (num != pkts_num) {
741 WARN(1, "pool %d: %d of %d allocated\n",
742 new_pool->id, num, pkts_num);
747 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
748 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
753 /* Initialize pools for swf */
754 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
757 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
759 /* If port pkt_size is higher than 1518B:
760 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
761 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
763 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
764 long_log_pool = MVPP2_BM_JUMBO;
765 short_log_pool = MVPP2_BM_LONG;
767 long_log_pool = MVPP2_BM_LONG;
768 short_log_pool = MVPP2_BM_SHORT;
771 if (!port->pool_long) {
773 mvpp2_bm_pool_use(port, long_log_pool,
774 mvpp2_pools[long_log_pool].pkt_size);
775 if (!port->pool_long)
778 port->pool_long->port_map |= BIT(port->id);
780 for (rxq = 0; rxq < port->nrxqs; rxq++)
781 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
784 if (!port->pool_short) {
786 mvpp2_bm_pool_use(port, short_log_pool,
787 mvpp2_pools[short_log_pool].pkt_size);
788 if (!port->pool_short)
791 port->pool_short->port_map |= BIT(port->id);
793 for (rxq = 0; rxq < port->nrxqs; rxq++)
794 mvpp2_rxq_short_pool_set(port, rxq,
795 port->pool_short->id);
801 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
803 struct mvpp2_port *port = netdev_priv(dev);
804 enum mvpp2_bm_pool_log_num new_long_pool;
805 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
807 /* If port MTU is higher than 1518B:
808 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
809 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
811 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
812 new_long_pool = MVPP2_BM_JUMBO;
814 new_long_pool = MVPP2_BM_LONG;
816 if (new_long_pool != port->pool_long->id) {
817 /* Remove port from old short & long pool */
818 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
819 port->pool_long->pkt_size);
820 port->pool_long->port_map &= ~BIT(port->id);
821 port->pool_long = NULL;
823 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
824 port->pool_short->pkt_size);
825 port->pool_short->port_map &= ~BIT(port->id);
826 port->pool_short = NULL;
828 port->pkt_size = pkt_size;
830 /* Add port to new short & long pool */
831 mvpp2_swf_bm_pool_init(port);
833 /* Update L4 checksum when jumbo enable/disable on port */
834 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
835 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
836 dev->hw_features &= ~(NETIF_F_IP_CSUM |
839 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
840 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
845 dev->wanted_features = dev->features;
847 netdev_update_features(dev);
851 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
853 int i, sw_thread_mask = 0;
855 for (i = 0; i < port->nqvecs; i++)
856 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
858 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
859 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
862 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
864 int i, sw_thread_mask = 0;
866 for (i = 0; i < port->nqvecs; i++)
867 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
869 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
870 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
873 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
875 struct mvpp2_port *port = qvec->port;
877 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
878 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
881 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
883 struct mvpp2_port *port = qvec->port;
885 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
886 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
889 /* Mask the current CPU's Rx/Tx interrupts
890 * Called by on_each_cpu(), guaranteed to run with migration disabled,
891 * using smp_processor_id() is OK.
893 static void mvpp2_interrupts_mask(void *arg)
895 struct mvpp2_port *port = arg;
897 mvpp2_percpu_write(port->priv, smp_processor_id(),
898 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
901 /* Unmask the current CPU's Rx/Tx interrupts.
902 * Called by on_each_cpu(), guaranteed to run with migration disabled,
903 * using smp_processor_id() is OK.
905 static void mvpp2_interrupts_unmask(void *arg)
907 struct mvpp2_port *port = arg;
910 val = MVPP2_CAUSE_MISC_SUM_MASK |
911 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
912 if (port->has_tx_irqs)
913 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
915 mvpp2_percpu_write(port->priv, smp_processor_id(),
916 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
920 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
925 if (port->priv->hw_version != MVPP22)
931 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
933 for (i = 0; i < port->nqvecs; i++) {
934 struct mvpp2_queue_vector *v = port->qvecs + i;
936 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
939 mvpp2_percpu_write(port->priv, v->sw_thread_id,
940 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
944 /* Port configuration routines */
946 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
948 struct mvpp2 *priv = port->priv;
951 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
952 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
953 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
955 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
956 if (port->gop_id == 2)
957 val |= GENCONF_CTRL0_PORT0_RGMII;
958 else if (port->gop_id == 3)
959 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
960 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
963 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
965 struct mvpp2 *priv = port->priv;
968 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
969 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
970 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
971 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
973 if (port->gop_id > 1) {
974 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
975 if (port->gop_id == 2)
976 val &= ~GENCONF_CTRL0_PORT0_RGMII;
977 else if (port->gop_id == 3)
978 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
979 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
983 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
985 struct mvpp2 *priv = port->priv;
986 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
987 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
991 val = readl(xpcs + MVPP22_XPCS_CFG0);
992 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
993 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
994 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
995 writel(val, xpcs + MVPP22_XPCS_CFG0);
998 val = readl(mpcs + MVPP22_MPCS_CTRL);
999 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1000 writel(val, mpcs + MVPP22_MPCS_CTRL);
1002 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1003 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
1004 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1005 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1006 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1008 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1009 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
1010 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1013 static int mvpp22_gop_init(struct mvpp2_port *port)
1015 struct mvpp2 *priv = port->priv;
1018 if (!priv->sysctrl_base)
1021 switch (port->phy_interface) {
1022 case PHY_INTERFACE_MODE_RGMII:
1023 case PHY_INTERFACE_MODE_RGMII_ID:
1024 case PHY_INTERFACE_MODE_RGMII_RXID:
1025 case PHY_INTERFACE_MODE_RGMII_TXID:
1026 if (port->gop_id == 0)
1028 mvpp22_gop_init_rgmii(port);
1030 case PHY_INTERFACE_MODE_SGMII:
1031 case PHY_INTERFACE_MODE_1000BASEX:
1032 case PHY_INTERFACE_MODE_2500BASEX:
1033 mvpp22_gop_init_sgmii(port);
1035 case PHY_INTERFACE_MODE_10GKR:
1036 if (port->gop_id != 0)
1038 mvpp22_gop_init_10gkr(port);
1041 goto unsupported_conf;
1044 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1045 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1046 GENCONF_PORT_CTRL1_EN(port->gop_id);
1047 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1049 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1050 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1051 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1053 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1054 val |= GENCONF_SOFT_RESET1_GOP;
1055 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1061 netdev_err(port->dev, "Invalid port configuration\n");
1065 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1069 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1070 port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1071 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1072 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1073 /* Enable the GMAC link status irq for this port */
1074 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1075 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1076 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1079 if (port->gop_id == 0) {
1080 /* Enable the XLG/GIG irqs for this port */
1081 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1082 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
1083 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1085 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1086 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1090 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1094 if (port->gop_id == 0) {
1095 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1096 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1097 MVPP22_XLG_EXT_INT_MASK_GIG);
1098 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1101 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1102 port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1103 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1104 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1105 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1106 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1107 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1111 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1115 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1116 port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1117 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1118 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1119 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1120 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1121 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1124 if (port->gop_id == 0) {
1125 val = readl(port->base + MVPP22_XLG_INT_MASK);
1126 val |= MVPP22_XLG_INT_MASK_LINK;
1127 writel(val, port->base + MVPP22_XLG_INT_MASK);
1130 mvpp22_gop_unmask_irq(port);
1133 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1135 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1136 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1139 * The COMPHY configures the serdes lanes regardless of the actual use of the
1140 * lanes by the physical layer. This is why configurations like
1141 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1143 static int mvpp22_comphy_init(struct mvpp2_port *port)
1151 switch (port->phy_interface) {
1152 case PHY_INTERFACE_MODE_SGMII:
1153 case PHY_INTERFACE_MODE_1000BASEX:
1154 mode = PHY_MODE_SGMII;
1156 case PHY_INTERFACE_MODE_2500BASEX:
1157 mode = PHY_MODE_2500SGMII;
1159 case PHY_INTERFACE_MODE_10GKR:
1160 mode = PHY_MODE_10GKR;
1166 ret = phy_set_mode(port->comphy, mode);
1170 return phy_power_on(port->comphy);
1173 static void mvpp2_port_enable(struct mvpp2_port *port)
1177 /* Only GOP port 0 has an XLG MAC */
1178 if (port->gop_id == 0 &&
1179 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
1180 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
1181 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1182 val |= MVPP22_XLG_CTRL0_PORT_EN |
1183 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1184 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1185 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1187 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1188 val |= MVPP2_GMAC_PORT_EN_MASK;
1189 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1190 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1194 static void mvpp2_port_disable(struct mvpp2_port *port)
1198 /* Only GOP port 0 has an XLG MAC */
1199 if (port->gop_id == 0 &&
1200 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
1201 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
1202 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1203 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1204 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1206 /* Disable & reset should be done separately */
1207 val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1208 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1210 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1211 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1212 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1216 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1217 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1221 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1222 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1223 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1226 /* Configure loopback port */
1227 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1228 const struct phylink_link_state *state)
1232 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1234 if (state->speed == 1000)
1235 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1237 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1239 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1240 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1241 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
1242 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1244 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1246 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1249 struct mvpp2_ethtool_counter {
1250 unsigned int offset;
1251 const char string[ETH_GSTRING_LEN];
1255 static u64 mvpp2_read_count(struct mvpp2_port *port,
1256 const struct mvpp2_ethtool_counter *counter)
1260 val = readl(port->stats_base + counter->offset);
1261 if (counter->reg_is_64b)
1262 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1267 /* Due to the fact that software statistics and hardware statistics are, by
1268 * design, incremented at different moments in the chain of packet processing,
1269 * it is very likely that incoming packets could have been dropped after being
1270 * counted by hardware but before reaching software statistics (most probably
1271 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1272 * are added in between as well as TSO skb will be split and header bytes added.
1273 * Hence, statistics gathered from userspace with ifconfig (software) and
1274 * ethtool (hardware) cannot be compared.
1276 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
1277 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1278 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1279 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1280 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1281 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1282 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1283 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1284 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1285 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1286 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1287 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1288 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1289 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1290 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1291 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1292 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1293 { MVPP2_MIB_FC_SENT, "fc_sent" },
1294 { MVPP2_MIB_FC_RCVD, "fc_received" },
1295 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1296 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1297 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1298 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1299 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1300 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1301 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1302 { MVPP2_MIB_COLLISION, "collision" },
1303 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1306 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1309 if (sset == ETH_SS_STATS) {
1312 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1313 strscpy(data + i * ETH_GSTRING_LEN,
1314 mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
1318 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1320 struct delayed_work *del_work = to_delayed_work(work);
1321 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1326 mutex_lock(&port->gather_stats_lock);
1328 pstats = port->ethtool_stats;
1329 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1330 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
1332 /* No need to read again the counters right after this function if it
1333 * was called asynchronously by the user (ie. use of ethtool).
1335 cancel_delayed_work(&port->stats_work);
1336 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1337 MVPP2_MIB_COUNTERS_STATS_DELAY);
1339 mutex_unlock(&port->gather_stats_lock);
1342 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1343 struct ethtool_stats *stats, u64 *data)
1345 struct mvpp2_port *port = netdev_priv(dev);
1347 /* Update statistics for the given port, then take the lock to avoid
1348 * concurrent accesses on the ethtool_stats structure during its copy.
1350 mvpp2_gather_hw_statistics(&port->stats_work.work);
1352 mutex_lock(&port->gather_stats_lock);
1353 memcpy(data, port->ethtool_stats,
1354 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
1355 mutex_unlock(&port->gather_stats_lock);
1358 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1360 if (sset == ETH_SS_STATS)
1361 return ARRAY_SIZE(mvpp2_ethtool_regs);
1366 static void mvpp2_port_reset(struct mvpp2_port *port)
1371 /* Read the GOP statistics to reset the hardware counters */
1372 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1373 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
1375 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1376 MVPP2_GMAC_PORT_RESET_MASK;
1377 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1380 /* Change maximum receive size of the port */
1381 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1385 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1386 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1387 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1388 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1389 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1392 /* Change maximum receive size of the port */
1393 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1397 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1398 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1399 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1400 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1401 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1404 /* Set defaults to the MVPP2 port */
1405 static void mvpp2_defaults_set(struct mvpp2_port *port)
1407 int tx_port_num, val, queue, lrxq;
1409 if (port->priv->hw_version == MVPP21) {
1410 /* Update TX FIFO MIN Threshold */
1411 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1412 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1413 /* Min. TX threshold must be less than minimal packet length */
1414 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1415 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1418 /* Disable Legacy WRR, Disable EJP, Release from reset */
1419 tx_port_num = mvpp2_egress_port(port);
1420 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1422 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1424 /* Close bandwidth for all queues */
1425 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1426 mvpp2_write(port->priv,
1427 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1429 /* Set refill period to 1 usec, refill tokens
1430 * and bucket size to maximum
1432 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1433 port->priv->tclk / USEC_PER_SEC);
1434 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1435 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1436 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1437 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1438 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1439 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1440 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1442 /* Set MaximumLowLatencyPacketSize value to 256 */
1443 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1444 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1445 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1447 /* Enable Rx cache snoop */
1448 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1449 queue = port->rxqs[lrxq]->id;
1450 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1451 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1452 MVPP2_SNOOP_BUF_HDR_MASK;
1453 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1456 /* At default, mask all interrupts to all present cpus */
1457 mvpp2_interrupts_disable(port);
1460 /* Enable/disable receiving packets */
1461 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1466 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1467 queue = port->rxqs[lrxq]->id;
1468 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1469 val &= ~MVPP2_RXQ_DISABLE_MASK;
1470 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1474 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1479 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1480 queue = port->rxqs[lrxq]->id;
1481 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1482 val |= MVPP2_RXQ_DISABLE_MASK;
1483 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1487 /* Enable transmit via physical egress queue
1488 * - HW starts take descriptors from DRAM
1490 static void mvpp2_egress_enable(struct mvpp2_port *port)
1494 int tx_port_num = mvpp2_egress_port(port);
1496 /* Enable all initialized TXs. */
1498 for (queue = 0; queue < port->ntxqs; queue++) {
1499 struct mvpp2_tx_queue *txq = port->txqs[queue];
1502 qmap |= (1 << queue);
1505 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1506 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
1509 /* Disable transmit via physical egress queue
1510 * - HW doesn't take descriptors from DRAM
1512 static void mvpp2_egress_disable(struct mvpp2_port *port)
1516 int tx_port_num = mvpp2_egress_port(port);
1518 /* Issue stop command for active channels only */
1519 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1520 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
1521 MVPP2_TXP_SCHED_ENQ_MASK;
1523 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
1524 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
1526 /* Wait for all Tx activity to terminate. */
1529 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
1530 netdev_warn(port->dev,
1531 "Tx stop timed out, status=0x%08x\n",
1538 /* Check port TX Command register that all
1539 * Tx queues are stopped
1541 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
1542 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
1545 /* Rx descriptors helper methods */
1547 /* Get number of Rx descriptors occupied by received packets */
1549 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
1551 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
1553 return val & MVPP2_RXQ_OCCUPIED_MASK;
1556 /* Update Rx queue status with the number of occupied and available
1557 * Rx descriptor slots.
1560 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
1561 int used_count, int free_count)
1563 /* Decrement the number of used descriptors and increment count
1564 * increment the number of free descriptors.
1566 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
1568 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
1571 /* Get pointer to next RX descriptor to be processed by SW */
1572 static inline struct mvpp2_rx_desc *
1573 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
1575 int rx_desc = rxq->next_desc_to_proc;
1577 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
1578 prefetch(rxq->descs + rxq->next_desc_to_proc);
1579 return rxq->descs + rx_desc;
1582 /* Set rx queue offset */
1583 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
1584 int prxq, int offset)
1588 /* Convert offset from bytes to units of 32 bytes */
1589 offset = offset >> 5;
1591 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
1592 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
1595 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
1596 MVPP2_RXQ_PACKET_OFFSET_MASK);
1598 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
1601 /* Tx descriptors helper methods */
1603 /* Get pointer to next Tx descriptor to be processed (send) by HW */
1604 static struct mvpp2_tx_desc *
1605 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
1607 int tx_desc = txq->next_desc_to_proc;
1609 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
1610 return txq->descs + tx_desc;
1613 /* Update HW with number of aggregated Tx descriptors to be sent
1615 * Called only from mvpp2_tx(), so migration is disabled, using
1616 * smp_processor_id() is OK.
1618 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
1620 /* aggregated access - relevant TXQ number is written in TX desc */
1621 mvpp2_percpu_write(port->priv, smp_processor_id(),
1622 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
1625 /* Check if there are enough free descriptors in aggregated txq.
1626 * If not, update the number of occupied descriptors and repeat the check.
1628 * Called only from mvpp2_tx(), so migration is disabled, using
1629 * smp_processor_id() is OK.
1631 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
1632 struct mvpp2_tx_queue *aggr_txq, int num)
1634 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
1635 /* Update number of occupied aggregated Tx descriptors */
1636 int cpu = smp_processor_id();
1637 u32 val = mvpp2_read_relaxed(priv,
1638 MVPP2_AGGR_TXQ_STATUS_REG(cpu));
1640 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
1642 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
1648 /* Reserved Tx descriptors allocation request
1650 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
1651 * only by mvpp2_tx(), so migration is disabled, using
1652 * smp_processor_id() is OK.
1654 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
1655 struct mvpp2_tx_queue *txq, int num)
1658 int cpu = smp_processor_id();
1660 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
1661 mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
1663 val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
1665 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
1668 /* Check if there are enough reserved descriptors for transmission.
1669 * If not, request chunk of reserved descriptors and check again.
1671 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
1672 struct mvpp2_tx_queue *txq,
1673 struct mvpp2_txq_pcpu *txq_pcpu,
1676 int req, cpu, desc_count;
1678 if (txq_pcpu->reserved_num >= num)
1681 /* Not enough descriptors reserved! Update the reserved descriptor
1682 * count and check again.
1686 /* Compute total of used descriptors */
1687 for_each_present_cpu(cpu) {
1688 struct mvpp2_txq_pcpu *txq_pcpu_aux;
1690 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
1691 desc_count += txq_pcpu_aux->count;
1692 desc_count += txq_pcpu_aux->reserved_num;
1695 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
1699 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
1702 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
1704 /* OK, the descriptor could have been updated: check again. */
1705 if (txq_pcpu->reserved_num < num)
1710 /* Release the last allocated Tx descriptor. Useful to handle DMA
1711 * mapping failures in the Tx path.
1713 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
1715 if (txq->next_desc_to_proc == 0)
1716 txq->next_desc_to_proc = txq->last_desc - 1;
1718 txq->next_desc_to_proc--;
1721 /* Set Tx descriptors fields relevant for CSUM calculation */
1722 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
1723 int ip_hdr_len, int l4_proto)
1727 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1728 * G_L4_chk, L4_type required only for checksum calculation
1730 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
1731 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
1732 command |= MVPP2_TXD_IP_CSUM_DISABLE;
1734 if (l3_proto == htons(ETH_P_IP)) {
1735 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
1736 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
1738 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
1741 if (l4_proto == IPPROTO_TCP) {
1742 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
1743 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
1744 } else if (l4_proto == IPPROTO_UDP) {
1745 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
1746 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
1748 command |= MVPP2_TXD_L4_CSUM_NOT;
1754 /* Get number of sent descriptors and decrement counter.
1755 * The number of sent descriptors is returned.
1758 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
1759 * (migration disabled) and from the TX completion tasklet (migration
1760 * disabled) so using smp_processor_id() is OK.
1762 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
1763 struct mvpp2_tx_queue *txq)
1767 /* Reading status reg resets transmitted descriptor counter */
1768 val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
1769 MVPP2_TXQ_SENT_REG(txq->id));
1771 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
1772 MVPP2_TRANSMITTED_COUNT_OFFSET;
1775 /* Called through on_each_cpu(), so runs on all CPUs, with migration
1776 * disabled, therefore using smp_processor_id() is OK.
1778 static void mvpp2_txq_sent_counter_clear(void *arg)
1780 struct mvpp2_port *port = arg;
1783 for (queue = 0; queue < port->ntxqs; queue++) {
1784 int id = port->txqs[queue]->id;
1786 mvpp2_percpu_read(port->priv, smp_processor_id(),
1787 MVPP2_TXQ_SENT_REG(id));
1791 /* Set max sizes for Tx queues */
1792 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
1795 int txq, tx_port_num;
1797 mtu = port->pkt_size * 8;
1798 if (mtu > MVPP2_TXP_MTU_MAX)
1799 mtu = MVPP2_TXP_MTU_MAX;
1801 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
1804 /* Indirect access to registers */
1805 tx_port_num = mvpp2_egress_port(port);
1806 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1809 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
1810 val &= ~MVPP2_TXP_MTU_MAX;
1812 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
1814 /* TXP token size and all TXQs token size must be larger that MTU */
1815 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
1816 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
1819 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
1821 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1824 for (txq = 0; txq < port->ntxqs; txq++) {
1825 val = mvpp2_read(port->priv,
1826 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
1827 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
1831 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
1833 mvpp2_write(port->priv,
1834 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
1840 /* Set the number of packets that will be received before Rx interrupt
1841 * will be generated by HW.
1843 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
1844 struct mvpp2_rx_queue *rxq)
1846 int cpu = get_cpu();
1848 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
1849 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
1851 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
1852 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
1858 /* For some reason in the LSP this is done on each CPU. Why ? */
1859 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
1860 struct mvpp2_tx_queue *txq)
1862 int cpu = get_cpu();
1865 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
1866 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
1868 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
1869 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
1870 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
1875 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
1877 u64 tmp = (u64)clk_hz * usec;
1879 do_div(tmp, USEC_PER_SEC);
1881 return tmp > U32_MAX ? U32_MAX : tmp;
1884 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
1886 u64 tmp = (u64)cycles * USEC_PER_SEC;
1888 do_div(tmp, clk_hz);
1890 return tmp > U32_MAX ? U32_MAX : tmp;
1893 /* Set the time delay in usec before Rx interrupt */
1894 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
1895 struct mvpp2_rx_queue *rxq)
1897 unsigned long freq = port->priv->tclk;
1898 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
1900 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
1902 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
1904 /* re-evaluate to get actual register value */
1905 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
1908 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
1911 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
1913 unsigned long freq = port->priv->tclk;
1914 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
1916 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
1917 port->tx_time_coal =
1918 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
1920 /* re-evaluate to get actual register value */
1921 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
1924 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
1927 /* Free Tx queue skbuffs */
1928 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
1929 struct mvpp2_tx_queue *txq,
1930 struct mvpp2_txq_pcpu *txq_pcpu, int num)
1934 for (i = 0; i < num; i++) {
1935 struct mvpp2_txq_pcpu_buf *tx_buf =
1936 txq_pcpu->buffs + txq_pcpu->txq_get_index;
1938 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
1939 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
1940 tx_buf->size, DMA_TO_DEVICE);
1942 dev_kfree_skb_any(tx_buf->skb);
1944 mvpp2_txq_inc_get(txq_pcpu);
1948 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
1951 int queue = fls(cause) - 1;
1953 return port->rxqs[queue];
1956 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
1959 int queue = fls(cause) - 1;
1961 return port->txqs[queue];
1964 /* Handle end of transmission */
1965 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
1966 struct mvpp2_txq_pcpu *txq_pcpu)
1968 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
1971 if (txq_pcpu->cpu != smp_processor_id())
1972 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
1974 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
1977 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
1979 txq_pcpu->count -= tx_done;
1981 if (netif_tx_queue_stopped(nq))
1982 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
1983 netif_tx_wake_queue(nq);
1986 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
1989 struct mvpp2_tx_queue *txq;
1990 struct mvpp2_txq_pcpu *txq_pcpu;
1991 unsigned int tx_todo = 0;
1994 txq = mvpp2_get_tx_queue(port, cause);
1998 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2000 if (txq_pcpu->count) {
2001 mvpp2_txq_done(port, txq, txq_pcpu);
2002 tx_todo += txq_pcpu->count;
2005 cause &= ~(1 << txq->log_id);
2010 /* Rx/Tx queue initialization/cleanup methods */
2012 /* Allocate and initialize descriptors for aggr TXQ */
2013 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2014 struct mvpp2_tx_queue *aggr_txq, int cpu,
2019 /* Allocate memory for TX descriptors */
2020 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
2021 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2022 &aggr_txq->descs_dma, GFP_KERNEL);
2023 if (!aggr_txq->descs)
2026 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2028 /* Aggr TXQ no reset WA */
2029 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2030 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
2032 /* Set Tx descriptors queue starting address indirect
2035 if (priv->hw_version == MVPP21)
2036 txq_dma = aggr_txq->descs_dma;
2038 txq_dma = aggr_txq->descs_dma >>
2039 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2041 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
2042 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
2043 MVPP2_AGGR_TXQ_SIZE);
2048 /* Create a specified Rx queue */
2049 static int mvpp2_rxq_init(struct mvpp2_port *port,
2050 struct mvpp2_rx_queue *rxq)
2056 rxq->size = port->rx_ring_size;
2058 /* Allocate memory for RX descriptors */
2059 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2060 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2061 &rxq->descs_dma, GFP_KERNEL);
2065 rxq->last_desc = rxq->size - 1;
2067 /* Zero occupied and non-occupied counters - direct access */
2068 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2070 /* Set Rx descriptors queue starting address - indirect access */
2072 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
2073 if (port->priv->hw_version == MVPP21)
2074 rxq_dma = rxq->descs_dma;
2076 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2077 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2078 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2079 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
2083 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2085 /* Set coalescing pkts and time */
2086 mvpp2_rx_pkts_coal_set(port, rxq);
2087 mvpp2_rx_time_coal_set(port, rxq);
2089 /* Add number of descriptors ready for receiving packets */
2090 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2095 /* Push packets received by the RXQ to BM pool */
2096 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2097 struct mvpp2_rx_queue *rxq)
2101 rx_received = mvpp2_rxq_received(port, rxq->id);
2105 for (i = 0; i < rx_received; i++) {
2106 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2107 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2110 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2111 MVPP2_RXD_BM_POOL_ID_OFFS;
2113 mvpp2_bm_pool_put(port, pool,
2114 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2115 mvpp2_rxdesc_cookie_get(port, rx_desc));
2117 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2120 /* Cleanup Rx queue */
2121 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2122 struct mvpp2_rx_queue *rxq)
2126 mvpp2_rxq_drop_pkts(port, rxq);
2129 dma_free_coherent(port->dev->dev.parent,
2130 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2136 rxq->next_desc_to_proc = 0;
2139 /* Clear Rx descriptors queue starting address and size;
2140 * free descriptor number
2142 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2144 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
2145 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
2146 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
2150 /* Create and initialize a Tx queue */
2151 static int mvpp2_txq_init(struct mvpp2_port *port,
2152 struct mvpp2_tx_queue *txq)
2155 int cpu, desc, desc_per_txq, tx_port_num;
2156 struct mvpp2_txq_pcpu *txq_pcpu;
2158 txq->size = port->tx_ring_size;
2160 /* Allocate memory for Tx descriptors */
2161 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2162 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2163 &txq->descs_dma, GFP_KERNEL);
2167 txq->last_desc = txq->size - 1;
2169 /* Set Tx descriptors queue starting address - indirect access */
2171 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2172 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
2174 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
2175 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2176 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
2177 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
2178 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2179 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
2180 val &= ~MVPP2_TXQ_PENDING_MASK;
2181 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
2183 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2184 * for each existing TXQ.
2185 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2186 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2189 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2190 (txq->log_id * desc_per_txq);
2192 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
2193 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2194 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2197 /* WRR / EJP configuration - indirect access */
2198 tx_port_num = mvpp2_egress_port(port);
2199 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2201 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2202 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2203 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2204 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2205 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2207 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2208 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2211 for_each_present_cpu(cpu) {
2212 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2213 txq_pcpu->size = txq->size;
2214 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2215 sizeof(*txq_pcpu->buffs),
2217 if (!txq_pcpu->buffs)
2220 txq_pcpu->count = 0;
2221 txq_pcpu->reserved_num = 0;
2222 txq_pcpu->txq_put_index = 0;
2223 txq_pcpu->txq_get_index = 0;
2224 txq_pcpu->tso_headers = NULL;
2226 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2227 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2229 txq_pcpu->tso_headers =
2230 dma_alloc_coherent(port->dev->dev.parent,
2231 txq_pcpu->size * TSO_HEADER_SIZE,
2232 &txq_pcpu->tso_headers_dma,
2234 if (!txq_pcpu->tso_headers)
2241 /* Free allocated TXQ resources */
2242 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2243 struct mvpp2_tx_queue *txq)
2245 struct mvpp2_txq_pcpu *txq_pcpu;
2248 for_each_present_cpu(cpu) {
2249 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2250 kfree(txq_pcpu->buffs);
2252 if (txq_pcpu->tso_headers)
2253 dma_free_coherent(port->dev->dev.parent,
2254 txq_pcpu->size * TSO_HEADER_SIZE,
2255 txq_pcpu->tso_headers,
2256 txq_pcpu->tso_headers_dma);
2258 txq_pcpu->tso_headers = NULL;
2262 dma_free_coherent(port->dev->dev.parent,
2263 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2264 txq->descs, txq->descs_dma);
2268 txq->next_desc_to_proc = 0;
2271 /* Set minimum bandwidth for disabled TXQs */
2272 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2274 /* Set Tx descriptors queue starting address and size */
2276 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2277 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
2278 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
2282 /* Cleanup Tx ports */
2283 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2285 struct mvpp2_txq_pcpu *txq_pcpu;
2286 int delay, pending, cpu;
2290 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2291 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
2292 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2293 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
2295 /* The napi queue has been stopped so wait for all packets
2296 * to be transmitted.
2300 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2301 netdev_warn(port->dev,
2302 "port %d: cleaning queue %d timed out\n",
2303 port->id, txq->log_id);
2309 pending = mvpp2_percpu_read(port->priv, cpu,
2310 MVPP2_TXQ_PENDING_REG);
2311 pending &= MVPP2_TXQ_PENDING_MASK;
2314 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2315 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
2318 for_each_present_cpu(cpu) {
2319 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2321 /* Release all packets */
2322 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2325 txq_pcpu->count = 0;
2326 txq_pcpu->txq_put_index = 0;
2327 txq_pcpu->txq_get_index = 0;
2331 /* Cleanup all Tx queues */
2332 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2334 struct mvpp2_tx_queue *txq;
2338 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2340 /* Reset Tx ports and delete Tx queues */
2341 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2342 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2344 for (queue = 0; queue < port->ntxqs; queue++) {
2345 txq = port->txqs[queue];
2346 mvpp2_txq_clean(port, txq);
2347 mvpp2_txq_deinit(port, txq);
2350 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2352 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2353 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2356 /* Cleanup all Rx queues */
2357 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2361 for (queue = 0; queue < port->nrxqs; queue++)
2362 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2365 /* Init all Rx queues for port */
2366 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2370 for (queue = 0; queue < port->nrxqs; queue++) {
2371 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2378 mvpp2_cleanup_rxqs(port);
2382 /* Init all tx queues for port */
2383 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2385 struct mvpp2_tx_queue *txq;
2388 for (queue = 0; queue < port->ntxqs; queue++) {
2389 txq = port->txqs[queue];
2390 err = mvpp2_txq_init(port, txq);
2395 if (port->has_tx_irqs) {
2396 mvpp2_tx_time_coal_set(port);
2397 for (queue = 0; queue < port->ntxqs; queue++) {
2398 txq = port->txqs[queue];
2399 mvpp2_tx_pkts_coal_set(port, txq);
2403 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2407 mvpp2_cleanup_txqs(port);
2411 /* The callback for per-port interrupt */
2412 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2414 struct mvpp2_queue_vector *qv = dev_id;
2416 mvpp2_qvec_interrupt_disable(qv);
2418 napi_schedule(&qv->napi);
2423 /* Per-port interrupt for link status changes */
2424 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2426 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2427 struct net_device *dev = port->dev;
2428 bool event = false, link = false;
2431 mvpp22_gop_mask_irq(port);
2433 if (port->gop_id == 0 &&
2434 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
2435 val = readl(port->base + MVPP22_XLG_INT_STAT);
2436 if (val & MVPP22_XLG_INT_STAT_LINK) {
2438 val = readl(port->base + MVPP22_XLG_STATUS);
2439 if (val & MVPP22_XLG_STATUS_LINK_UP)
2442 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
2443 port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
2444 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
2445 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
2446 val = readl(port->base + MVPP22_GMAC_INT_STAT);
2447 if (val & MVPP22_GMAC_INT_STAT_LINK) {
2449 val = readl(port->base + MVPP2_GMAC_STATUS0);
2450 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
2455 if (port->phylink) {
2456 phylink_mac_change(port->phylink, link);
2460 if (!netif_running(dev) || !event)
2464 mvpp2_interrupts_enable(port);
2466 mvpp2_egress_enable(port);
2467 mvpp2_ingress_enable(port);
2468 netif_carrier_on(dev);
2469 netif_tx_wake_all_queues(dev);
2471 netif_tx_stop_all_queues(dev);
2472 netif_carrier_off(dev);
2473 mvpp2_ingress_disable(port);
2474 mvpp2_egress_disable(port);
2476 mvpp2_interrupts_disable(port);
2480 mvpp22_gop_unmask_irq(port);
2484 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
2488 if (!port_pcpu->timer_scheduled) {
2489 port_pcpu->timer_scheduled = true;
2490 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
2491 hrtimer_start(&port_pcpu->tx_done_timer, interval,
2492 HRTIMER_MODE_REL_PINNED);
2496 static void mvpp2_tx_proc_cb(unsigned long data)
2498 struct net_device *dev = (struct net_device *)data;
2499 struct mvpp2_port *port = netdev_priv(dev);
2500 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
2501 unsigned int tx_todo, cause;
2503 if (!netif_running(dev))
2505 port_pcpu->timer_scheduled = false;
2507 /* Process all the Tx queues */
2508 cause = (1 << port->ntxqs) - 1;
2509 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
2511 /* Set the timer in case not all the packets were processed */
2513 mvpp2_timer_set(port_pcpu);
2516 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
2518 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
2519 struct mvpp2_port_pcpu,
2522 tasklet_schedule(&port_pcpu->tx_done_tasklet);
2524 return HRTIMER_NORESTART;
2527 /* Main RX/TX processing routines */
2529 /* Display more error info */
2530 static void mvpp2_rx_error(struct mvpp2_port *port,
2531 struct mvpp2_rx_desc *rx_desc)
2533 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2534 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
2535 char *err_str = NULL;
2537 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
2538 case MVPP2_RXD_ERR_CRC:
2541 case MVPP2_RXD_ERR_OVERRUN:
2542 err_str = "overrun";
2544 case MVPP2_RXD_ERR_RESOURCE:
2545 err_str = "resource";
2548 if (err_str && net_ratelimit())
2549 netdev_err(port->dev,
2550 "bad rx status %08x (%s error), size=%zu\n",
2551 status, err_str, sz);
2554 /* Handle RX checksum offload */
2555 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
2556 struct sk_buff *skb)
2558 if (((status & MVPP2_RXD_L3_IP4) &&
2559 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
2560 (status & MVPP2_RXD_L3_IP6))
2561 if (((status & MVPP2_RXD_L4_UDP) ||
2562 (status & MVPP2_RXD_L4_TCP)) &&
2563 (status & MVPP2_RXD_L4_CSUM_OK)) {
2565 skb->ip_summed = CHECKSUM_UNNECESSARY;
2569 skb->ip_summed = CHECKSUM_NONE;
2572 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
2573 static int mvpp2_rx_refill(struct mvpp2_port *port,
2574 struct mvpp2_bm_pool *bm_pool, int pool)
2576 dma_addr_t dma_addr;
2577 phys_addr_t phys_addr;
2580 /* No recycle or too many buffers are in use, so allocate a new skb */
2581 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
2586 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2591 /* Handle tx checksum */
2592 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2594 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2597 __be16 l3_proto = vlan_get_protocol(skb);
2599 if (l3_proto == htons(ETH_P_IP)) {
2600 struct iphdr *ip4h = ip_hdr(skb);
2602 /* Calculate IPv4 checksum and L4 checksum */
2603 ip_hdr_len = ip4h->ihl;
2604 l4_proto = ip4h->protocol;
2605 } else if (l3_proto == htons(ETH_P_IPV6)) {
2606 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2608 /* Read l4_protocol from one of IPv6 extra headers */
2609 if (skb_network_header_len(skb) > 0)
2610 ip_hdr_len = (skb_network_header_len(skb) >> 2);
2611 l4_proto = ip6h->nexthdr;
2613 return MVPP2_TXD_L4_CSUM_NOT;
2616 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2617 l3_proto, ip_hdr_len, l4_proto);
2620 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
2623 /* Main rx processing */
2624 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
2625 int rx_todo, struct mvpp2_rx_queue *rxq)
2627 struct net_device *dev = port->dev;
2633 /* Get number of received packets and clamp the to-do */
2634 rx_received = mvpp2_rxq_received(port, rxq->id);
2635 if (rx_todo > rx_received)
2636 rx_todo = rx_received;
2638 while (rx_done < rx_todo) {
2639 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2640 struct mvpp2_bm_pool *bm_pool;
2641 struct sk_buff *skb;
2642 unsigned int frag_size;
2643 dma_addr_t dma_addr;
2644 phys_addr_t phys_addr;
2646 int pool, rx_bytes, err;
2650 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
2651 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
2652 rx_bytes -= MVPP2_MH_SIZE;
2653 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
2654 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
2655 data = (void *)phys_to_virt(phys_addr);
2657 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2658 MVPP2_RXD_BM_POOL_ID_OFFS;
2659 bm_pool = &port->priv->bm_pools[pool];
2661 /* In case of an error, release the requested buffer pointer
2662 * to the Buffer Manager. This request process is controlled
2663 * by the hardware, and the information about the buffer is
2664 * comprised by the RX descriptor.
2666 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
2668 dev->stats.rx_errors++;
2669 mvpp2_rx_error(port, rx_desc);
2670 /* Return the buffer to the pool */
2671 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2675 if (bm_pool->frag_size > PAGE_SIZE)
2678 frag_size = bm_pool->frag_size;
2680 skb = build_skb(data, frag_size);
2682 netdev_warn(port->dev, "skb build failed\n");
2683 goto err_drop_frame;
2686 err = mvpp2_rx_refill(port, bm_pool, pool);
2688 netdev_err(port->dev, "failed to refill BM pools\n");
2689 goto err_drop_frame;
2692 dma_unmap_single(dev->dev.parent, dma_addr,
2693 bm_pool->buf_size, DMA_FROM_DEVICE);
2696 rcvd_bytes += rx_bytes;
2698 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
2699 skb_put(skb, rx_bytes);
2700 skb->protocol = eth_type_trans(skb, dev);
2701 mvpp2_rx_csum(port, rx_status, skb);
2703 napi_gro_receive(napi, skb);
2707 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
2709 u64_stats_update_begin(&stats->syncp);
2710 stats->rx_packets += rcvd_pkts;
2711 stats->rx_bytes += rcvd_bytes;
2712 u64_stats_update_end(&stats->syncp);
2715 /* Update Rx queue management counters */
2717 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
2723 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2724 struct mvpp2_tx_desc *desc)
2726 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
2728 dma_addr_t buf_dma_addr =
2729 mvpp2_txdesc_dma_addr_get(port, desc);
2731 mvpp2_txdesc_size_get(port, desc);
2732 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
2733 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
2734 buf_sz, DMA_TO_DEVICE);
2735 mvpp2_txq_desc_put(txq);
2738 /* Handle tx fragmentation processing */
2739 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
2740 struct mvpp2_tx_queue *aggr_txq,
2741 struct mvpp2_tx_queue *txq)
2743 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
2744 struct mvpp2_tx_desc *tx_desc;
2746 dma_addr_t buf_dma_addr;
2748 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2749 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2750 void *addr = page_address(frag->page.p) + frag->page_offset;
2752 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2753 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2754 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
2756 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
2757 frag->size, DMA_TO_DEVICE);
2758 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
2759 mvpp2_txq_desc_put(txq);
2763 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
2765 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
2766 /* Last descriptor */
2767 mvpp2_txdesc_cmd_set(port, tx_desc,
2769 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
2771 /* Descriptor in the middle: Not First, Not Last */
2772 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
2773 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2779 /* Release all descriptors that were used to map fragments of
2780 * this packet, as well as the corresponding DMA mappings
2782 for (i = i - 1; i >= 0; i--) {
2783 tx_desc = txq->descs + i;
2784 tx_desc_unmap_put(port, txq, tx_desc);
2790 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
2791 struct net_device *dev,
2792 struct mvpp2_tx_queue *txq,
2793 struct mvpp2_tx_queue *aggr_txq,
2794 struct mvpp2_txq_pcpu *txq_pcpu,
2797 struct mvpp2_port *port = netdev_priv(dev);
2798 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2801 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2802 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
2804 addr = txq_pcpu->tso_headers_dma +
2805 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
2806 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
2808 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
2810 MVPP2_TXD_PADDING_DISABLE);
2811 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2814 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
2815 struct net_device *dev, struct tso_t *tso,
2816 struct mvpp2_tx_queue *txq,
2817 struct mvpp2_tx_queue *aggr_txq,
2818 struct mvpp2_txq_pcpu *txq_pcpu,
2819 int sz, bool left, bool last)
2821 struct mvpp2_port *port = netdev_priv(dev);
2822 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2823 dma_addr_t buf_dma_addr;
2825 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2826 mvpp2_txdesc_size_set(port, tx_desc, sz);
2828 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
2830 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
2831 mvpp2_txq_desc_put(txq);
2835 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
2838 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
2840 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
2844 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
2847 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2851 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
2852 struct mvpp2_tx_queue *txq,
2853 struct mvpp2_tx_queue *aggr_txq,
2854 struct mvpp2_txq_pcpu *txq_pcpu)
2856 struct mvpp2_port *port = netdev_priv(dev);
2858 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
2859 int i, len, descs = 0;
2861 /* Check number of available descriptors */
2862 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
2863 tso_count_descs(skb)) ||
2864 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
2865 tso_count_descs(skb)))
2868 tso_start(skb, &tso);
2869 len = skb->len - hdr_sz;
2871 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
2872 char *hdr = txq_pcpu->tso_headers +
2873 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
2878 tso_build_hdr(skb, hdr, &tso, left, len == 0);
2879 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
2882 int sz = min_t(int, tso.size, left);
2886 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
2887 txq_pcpu, sz, left, len == 0))
2889 tso_build_data(skb, &tso, sz);
2896 for (i = descs - 1; i >= 0; i--) {
2897 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
2898 tx_desc_unmap_put(port, txq, tx_desc);
2903 /* Main tx processing */
2904 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
2906 struct mvpp2_port *port = netdev_priv(dev);
2907 struct mvpp2_tx_queue *txq, *aggr_txq;
2908 struct mvpp2_txq_pcpu *txq_pcpu;
2909 struct mvpp2_tx_desc *tx_desc;
2910 dma_addr_t buf_dma_addr;
2915 txq_id = skb_get_queue_mapping(skb);
2916 txq = port->txqs[txq_id];
2917 txq_pcpu = this_cpu_ptr(txq->pcpu);
2918 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
2920 if (skb_is_gso(skb)) {
2921 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
2924 frags = skb_shinfo(skb)->nr_frags + 1;
2926 /* Check number of available descriptors */
2927 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
2928 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
2934 /* Get a descriptor for the first part of the packet */
2935 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2936 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2937 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
2939 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
2940 skb_headlen(skb), DMA_TO_DEVICE);
2941 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
2942 mvpp2_txq_desc_put(txq);
2947 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
2949 tx_cmd = mvpp2_skb_tx_csum(port, skb);
2952 /* First and Last descriptor */
2953 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
2954 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
2955 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
2957 /* First but not Last */
2958 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
2959 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
2960 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2962 /* Continue with other skb fragments */
2963 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
2964 tx_desc_unmap_put(port, txq, tx_desc);
2971 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
2972 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2974 txq_pcpu->reserved_num -= frags;
2975 txq_pcpu->count += frags;
2976 aggr_txq->count += frags;
2978 /* Enable transmit */
2980 mvpp2_aggr_txq_pend_desc_add(port, frags);
2982 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
2983 netif_tx_stop_queue(nq);
2985 u64_stats_update_begin(&stats->syncp);
2986 stats->tx_packets++;
2987 stats->tx_bytes += skb->len;
2988 u64_stats_update_end(&stats->syncp);
2990 dev->stats.tx_dropped++;
2991 dev_kfree_skb_any(skb);
2994 /* Finalize TX processing */
2995 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
2996 mvpp2_txq_done(port, txq, txq_pcpu);
2998 /* Set the timer in case not all frags were processed */
2999 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3000 txq_pcpu->count > 0) {
3001 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
3003 mvpp2_timer_set(port_pcpu);
3006 return NETDEV_TX_OK;
3009 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3011 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3012 netdev_err(dev, "FCS error\n");
3013 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3014 netdev_err(dev, "rx fifo overrun error\n");
3015 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3016 netdev_err(dev, "tx fifo underrun error\n");
3019 static int mvpp2_poll(struct napi_struct *napi, int budget)
3021 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3023 struct mvpp2_port *port = netdev_priv(napi->dev);
3024 struct mvpp2_queue_vector *qv;
3025 int cpu = smp_processor_id();
3027 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3029 /* Rx/Tx cause register
3031 * Bits 0-15: each bit indicates received packets on the Rx queue
3032 * (bit 0 is for Rx queue 0).
3034 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3035 * (bit 16 is for Tx queue 0).
3037 * Each CPU has its own Rx/Tx cause register
3039 cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
3040 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3042 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3044 mvpp2_cause_error(port->dev, cause_misc);
3046 /* Clear the cause register */
3047 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3048 mvpp2_percpu_write(port->priv, cpu,
3049 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3050 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3053 if (port->has_tx_irqs) {
3054 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3056 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3057 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3061 /* Process RX packets */
3062 cause_rx = cause_rx_tx &
3063 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3064 cause_rx <<= qv->first_rxq;
3065 cause_rx |= qv->pending_cause_rx;
3066 while (cause_rx && budget > 0) {
3068 struct mvpp2_rx_queue *rxq;
3070 rxq = mvpp2_get_rx_queue(port, cause_rx);
3074 count = mvpp2_rx(port, napi, budget, rxq);
3078 /* Clear the bit associated to this Rx queue
3079 * so that next iteration will continue from
3080 * the next Rx queue.
3082 cause_rx &= ~(1 << rxq->logic_rxq);
3088 napi_complete_done(napi, rx_done);
3090 mvpp2_qvec_interrupt_enable(qv);
3092 qv->pending_cause_rx = cause_rx;
3096 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3100 /* comphy reconfiguration */
3101 mvpp22_comphy_init(port);
3103 /* gop reconfiguration */
3104 mvpp22_gop_init(port);
3106 /* Only GOP port 0 has an XLG MAC */
3107 if (port->gop_id == 0) {
3108 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3109 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3111 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
3112 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
3113 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
3115 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3117 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
3120 if (port->gop_id == 0 &&
3121 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
3122 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
3123 mvpp2_xlg_max_rx_size_set(port);
3125 mvpp2_gmac_max_rx_size_set(port);
3128 /* Set hw internals when starting port */
3129 static void mvpp2_start_dev(struct mvpp2_port *port)
3133 mvpp2_txp_max_tx_size_set(port);
3135 for (i = 0; i < port->nqvecs; i++)
3136 napi_enable(&port->qvecs[i].napi);
3138 /* Enable interrupts on all CPUs */
3139 mvpp2_interrupts_enable(port);
3141 if (port->priv->hw_version == MVPP22)
3142 mvpp22_mode_reconfigure(port);
3144 if (port->phylink) {
3145 netif_carrier_off(port->dev);
3146 phylink_start(port->phylink);
3148 /* Phylink isn't used as of now for ACPI, so the MAC has to be
3149 * configured manually when the interface is started. This will
3150 * be removed as soon as the phylink ACPI support lands in.
3152 struct phylink_link_state state = {
3153 .interface = port->phy_interface,
3155 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
3156 mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
3160 netif_tx_start_all_queues(port->dev);
3163 /* Set hw internals when stopping port */
3164 static void mvpp2_stop_dev(struct mvpp2_port *port)
3168 /* Disable interrupts on all CPUs */
3169 mvpp2_interrupts_disable(port);
3171 for (i = 0; i < port->nqvecs; i++)
3172 napi_disable(&port->qvecs[i].napi);
3175 phylink_stop(port->phylink);
3176 phy_power_off(port->comphy);
3179 static int mvpp2_check_ringparam_valid(struct net_device *dev,
3180 struct ethtool_ringparam *ring)
3182 u16 new_rx_pending = ring->rx_pending;
3183 u16 new_tx_pending = ring->tx_pending;
3185 if (ring->rx_pending == 0 || ring->tx_pending == 0)
3188 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
3189 new_rx_pending = MVPP2_MAX_RXD_MAX;
3190 else if (!IS_ALIGNED(ring->rx_pending, 16))
3191 new_rx_pending = ALIGN(ring->rx_pending, 16);
3193 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
3194 new_tx_pending = MVPP2_MAX_TXD_MAX;
3195 else if (!IS_ALIGNED(ring->tx_pending, 32))
3196 new_tx_pending = ALIGN(ring->tx_pending, 32);
3198 /* The Tx ring size cannot be smaller than the minimum number of
3199 * descriptors needed for TSO.
3201 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
3202 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
3204 if (ring->rx_pending != new_rx_pending) {
3205 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
3206 ring->rx_pending, new_rx_pending);
3207 ring->rx_pending = new_rx_pending;
3210 if (ring->tx_pending != new_tx_pending) {
3211 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
3212 ring->tx_pending, new_tx_pending);
3213 ring->tx_pending = new_tx_pending;
3219 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
3221 u32 mac_addr_l, mac_addr_m, mac_addr_h;
3223 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3224 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
3225 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
3226 addr[0] = (mac_addr_h >> 24) & 0xFF;
3227 addr[1] = (mac_addr_h >> 16) & 0xFF;
3228 addr[2] = (mac_addr_h >> 8) & 0xFF;
3229 addr[3] = mac_addr_h & 0xFF;
3230 addr[4] = mac_addr_m & 0xFF;
3231 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
3234 static int mvpp2_irqs_init(struct mvpp2_port *port)
3238 for (i = 0; i < port->nqvecs; i++) {
3239 struct mvpp2_queue_vector *qv = port->qvecs + i;
3241 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
3242 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
3244 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
3248 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
3249 irq_set_affinity_hint(qv->irq,
3250 cpumask_of(qv->sw_thread_id));
3255 for (i = 0; i < port->nqvecs; i++) {
3256 struct mvpp2_queue_vector *qv = port->qvecs + i;
3258 irq_set_affinity_hint(qv->irq, NULL);
3259 free_irq(qv->irq, qv);
3265 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
3269 for (i = 0; i < port->nqvecs; i++) {
3270 struct mvpp2_queue_vector *qv = port->qvecs + i;
3272 irq_set_affinity_hint(qv->irq, NULL);
3273 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
3274 free_irq(qv->irq, qv);
3278 static bool mvpp22_rss_is_supported(void)
3280 return queue_mode == MVPP2_QDIST_MULTI_MODE;
3283 static int mvpp2_open(struct net_device *dev)
3285 struct mvpp2_port *port = netdev_priv(dev);
3286 struct mvpp2 *priv = port->priv;
3287 unsigned char mac_bcast[ETH_ALEN] = {
3288 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3292 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
3294 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3297 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
3299 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
3302 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
3304 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
3307 err = mvpp2_prs_def_flow(port);
3309 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3313 /* Allocate the Rx/Tx queues */
3314 err = mvpp2_setup_rxqs(port);
3316 netdev_err(port->dev, "cannot allocate Rx queues\n");
3320 err = mvpp2_setup_txqs(port);
3322 netdev_err(port->dev, "cannot allocate Tx queues\n");
3323 goto err_cleanup_rxqs;
3326 err = mvpp2_irqs_init(port);
3328 netdev_err(port->dev, "cannot init IRQs\n");
3329 goto err_cleanup_txqs;
3332 /* Phylink isn't supported yet in ACPI mode */
3333 if (port->of_node) {
3334 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
3336 netdev_err(port->dev, "could not attach PHY (%d)\n",
3344 if (priv->hw_version == MVPP22 && port->link_irq) {
3345 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
3348 netdev_err(port->dev, "cannot request link IRQ %d\n",
3353 mvpp22_gop_setup_irq(port);
3355 /* In default link is down */
3356 netif_carrier_off(port->dev);
3364 netdev_err(port->dev,
3365 "invalid configuration: no dt or link IRQ");
3370 /* Unmask interrupts on all CPUs */
3371 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
3372 mvpp2_shared_interrupt_mask_unmask(port, false);
3374 mvpp2_start_dev(port);
3376 /* Start hardware statistics gathering */
3377 queue_delayed_work(priv->stats_queue, &port->stats_work,
3378 MVPP2_MIB_COUNTERS_STATS_DELAY);
3383 mvpp2_irqs_deinit(port);
3385 mvpp2_cleanup_txqs(port);
3387 mvpp2_cleanup_rxqs(port);
3391 static int mvpp2_stop(struct net_device *dev)
3393 struct mvpp2_port *port = netdev_priv(dev);
3394 struct mvpp2_port_pcpu *port_pcpu;
3397 mvpp2_stop_dev(port);
3399 /* Mask interrupts on all CPUs */
3400 on_each_cpu(mvpp2_interrupts_mask, port, 1);
3401 mvpp2_shared_interrupt_mask_unmask(port, true);
3404 phylink_disconnect_phy(port->phylink);
3406 free_irq(port->link_irq, port);
3408 mvpp2_irqs_deinit(port);
3409 if (!port->has_tx_irqs) {
3410 for_each_present_cpu(cpu) {
3411 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
3413 hrtimer_cancel(&port_pcpu->tx_done_timer);
3414 port_pcpu->timer_scheduled = false;
3415 tasklet_kill(&port_pcpu->tx_done_tasklet);
3418 mvpp2_cleanup_rxqs(port);
3419 mvpp2_cleanup_txqs(port);
3421 cancel_delayed_work_sync(&port->stats_work);
3426 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
3427 struct netdev_hw_addr_list *list)
3429 struct netdev_hw_addr *ha;
3432 netdev_hw_addr_list_for_each(ha, list) {
3433 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
3441 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
3443 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3444 mvpp2_prs_vid_enable_filtering(port);
3446 mvpp2_prs_vid_disable_filtering(port);
3448 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3449 MVPP2_PRS_L2_UNI_CAST, enable);
3451 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3452 MVPP2_PRS_L2_MULTI_CAST, enable);
3455 static void mvpp2_set_rx_mode(struct net_device *dev)
3457 struct mvpp2_port *port = netdev_priv(dev);
3459 /* Clear the whole UC and MC list */
3460 mvpp2_prs_mac_del_all(port);
3462 if (dev->flags & IFF_PROMISC) {
3463 mvpp2_set_rx_promisc(port, true);
3467 mvpp2_set_rx_promisc(port, false);
3469 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
3470 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
3471 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3472 MVPP2_PRS_L2_UNI_CAST, true);
3474 if (dev->flags & IFF_ALLMULTI) {
3475 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3476 MVPP2_PRS_L2_MULTI_CAST, true);
3480 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
3481 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
3482 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3483 MVPP2_PRS_L2_MULTI_CAST, true);
3486 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
3488 const struct sockaddr *addr = p;
3491 if (!is_valid_ether_addr(addr->sa_data))
3492 return -EADDRNOTAVAIL;
3494 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
3496 /* Reconfigure parser accept the original MAC address */
3497 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
3498 netdev_err(dev, "failed to change MAC address\n");
3503 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3505 struct mvpp2_port *port = netdev_priv(dev);
3506 bool running = netif_running(dev);
3509 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
3510 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
3511 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
3512 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3516 mvpp2_stop_dev(port);
3518 err = mvpp2_bm_update_mtu(dev, mtu);
3520 netdev_err(dev, "failed to change MTU\n");
3521 /* Reconfigure BM to the original MTU */
3522 mvpp2_bm_update_mtu(dev, dev->mtu);
3524 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3528 mvpp2_start_dev(port);
3529 mvpp2_egress_enable(port);
3530 mvpp2_ingress_enable(port);
3537 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
3539 struct mvpp2_port *port = netdev_priv(dev);
3543 for_each_possible_cpu(cpu) {
3544 struct mvpp2_pcpu_stats *cpu_stats;
3550 cpu_stats = per_cpu_ptr(port->stats, cpu);
3552 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3553 rx_packets = cpu_stats->rx_packets;
3554 rx_bytes = cpu_stats->rx_bytes;
3555 tx_packets = cpu_stats->tx_packets;
3556 tx_bytes = cpu_stats->tx_bytes;
3557 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3559 stats->rx_packets += rx_packets;
3560 stats->rx_bytes += rx_bytes;
3561 stats->tx_packets += tx_packets;
3562 stats->tx_bytes += tx_bytes;
3565 stats->rx_errors = dev->stats.rx_errors;
3566 stats->rx_dropped = dev->stats.rx_dropped;
3567 stats->tx_dropped = dev->stats.tx_dropped;
3570 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3572 struct mvpp2_port *port = netdev_priv(dev);
3577 return phylink_mii_ioctl(port->phylink, ifr, cmd);
3580 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3582 struct mvpp2_port *port = netdev_priv(dev);
3585 ret = mvpp2_prs_vid_entry_add(port, vid);
3587 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3588 MVPP2_PRS_VLAN_FILT_MAX - 1);
3592 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3594 struct mvpp2_port *port = netdev_priv(dev);
3596 mvpp2_prs_vid_entry_remove(port, vid);
3600 static int mvpp2_set_features(struct net_device *dev,
3601 netdev_features_t features)
3603 netdev_features_t changed = dev->features ^ features;
3604 struct mvpp2_port *port = netdev_priv(dev);
3606 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3607 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
3608 mvpp2_prs_vid_enable_filtering(port);
3610 /* Invalidate all registered VID filters for this
3613 mvpp2_prs_vid_remove_all(port);
3615 mvpp2_prs_vid_disable_filtering(port);
3619 if (changed & NETIF_F_RXHASH) {
3620 if (features & NETIF_F_RXHASH)
3621 mvpp22_rss_enable(port);
3623 mvpp22_rss_disable(port);
3629 /* Ethtool methods */
3631 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
3633 struct mvpp2_port *port = netdev_priv(dev);
3638 return phylink_ethtool_nway_reset(port->phylink);
3641 /* Set interrupt coalescing for ethtools */
3642 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
3643 struct ethtool_coalesce *c)
3645 struct mvpp2_port *port = netdev_priv(dev);
3648 for (queue = 0; queue < port->nrxqs; queue++) {
3649 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3651 rxq->time_coal = c->rx_coalesce_usecs;
3652 rxq->pkts_coal = c->rx_max_coalesced_frames;
3653 mvpp2_rx_pkts_coal_set(port, rxq);
3654 mvpp2_rx_time_coal_set(port, rxq);
3657 if (port->has_tx_irqs) {
3658 port->tx_time_coal = c->tx_coalesce_usecs;
3659 mvpp2_tx_time_coal_set(port);
3662 for (queue = 0; queue < port->ntxqs; queue++) {
3663 struct mvpp2_tx_queue *txq = port->txqs[queue];
3665 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3667 if (port->has_tx_irqs)
3668 mvpp2_tx_pkts_coal_set(port, txq);
3674 /* get coalescing for ethtools */
3675 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
3676 struct ethtool_coalesce *c)
3678 struct mvpp2_port *port = netdev_priv(dev);
3680 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
3681 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
3682 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
3683 c->tx_coalesce_usecs = port->tx_time_coal;
3687 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
3688 struct ethtool_drvinfo *drvinfo)
3690 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
3691 sizeof(drvinfo->driver));
3692 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
3693 sizeof(drvinfo->version));
3694 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3695 sizeof(drvinfo->bus_info));
3698 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
3699 struct ethtool_ringparam *ring)
3701 struct mvpp2_port *port = netdev_priv(dev);
3703 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
3704 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
3705 ring->rx_pending = port->rx_ring_size;
3706 ring->tx_pending = port->tx_ring_size;
3709 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
3710 struct ethtool_ringparam *ring)
3712 struct mvpp2_port *port = netdev_priv(dev);
3713 u16 prev_rx_ring_size = port->rx_ring_size;
3714 u16 prev_tx_ring_size = port->tx_ring_size;
3717 err = mvpp2_check_ringparam_valid(dev, ring);
3721 if (!netif_running(dev)) {
3722 port->rx_ring_size = ring->rx_pending;
3723 port->tx_ring_size = ring->tx_pending;
3727 /* The interface is running, so we have to force a
3728 * reallocation of the queues
3730 mvpp2_stop_dev(port);
3731 mvpp2_cleanup_rxqs(port);
3732 mvpp2_cleanup_txqs(port);
3734 port->rx_ring_size = ring->rx_pending;
3735 port->tx_ring_size = ring->tx_pending;
3737 err = mvpp2_setup_rxqs(port);
3739 /* Reallocate Rx queues with the original ring size */
3740 port->rx_ring_size = prev_rx_ring_size;
3741 ring->rx_pending = prev_rx_ring_size;
3742 err = mvpp2_setup_rxqs(port);
3746 err = mvpp2_setup_txqs(port);
3748 /* Reallocate Tx queues with the original ring size */
3749 port->tx_ring_size = prev_tx_ring_size;
3750 ring->tx_pending = prev_tx_ring_size;
3751 err = mvpp2_setup_txqs(port);
3753 goto err_clean_rxqs;
3756 mvpp2_start_dev(port);
3757 mvpp2_egress_enable(port);
3758 mvpp2_ingress_enable(port);
3763 mvpp2_cleanup_rxqs(port);
3765 netdev_err(dev, "failed to change ring parameters");
3769 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
3770 struct ethtool_pauseparam *pause)
3772 struct mvpp2_port *port = netdev_priv(dev);
3777 phylink_ethtool_get_pauseparam(port->phylink, pause);
3780 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
3781 struct ethtool_pauseparam *pause)
3783 struct mvpp2_port *port = netdev_priv(dev);
3788 return phylink_ethtool_set_pauseparam(port->phylink, pause);
3791 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
3792 struct ethtool_link_ksettings *cmd)
3794 struct mvpp2_port *port = netdev_priv(dev);
3799 return phylink_ethtool_ksettings_get(port->phylink, cmd);
3802 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
3803 const struct ethtool_link_ksettings *cmd)
3805 struct mvpp2_port *port = netdev_priv(dev);
3810 return phylink_ethtool_ksettings_set(port->phylink, cmd);
3813 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
3814 struct ethtool_rxnfc *info, u32 *rules)
3816 struct mvpp2_port *port = netdev_priv(dev);
3819 if (!mvpp22_rss_is_supported())
3822 switch (info->cmd) {
3824 ret = mvpp2_ethtool_rxfh_get(port, info);
3826 case ETHTOOL_GRXRINGS:
3827 info->data = port->nrxqs;
3836 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
3837 struct ethtool_rxnfc *info)
3839 struct mvpp2_port *port = netdev_priv(dev);
3842 if (!mvpp22_rss_is_supported())
3845 switch (info->cmd) {
3847 ret = mvpp2_ethtool_rxfh_set(port, info);
3855 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
3857 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
3860 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3863 struct mvpp2_port *port = netdev_priv(dev);
3865 if (!mvpp22_rss_is_supported())
3869 memcpy(indir, port->indir,
3870 ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
3873 *hfunc = ETH_RSS_HASH_CRC32;
3878 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3879 const u8 *key, const u8 hfunc)
3881 struct mvpp2_port *port = netdev_priv(dev);
3883 if (!mvpp22_rss_is_supported())
3886 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
3893 memcpy(port->indir, indir,
3894 ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
3895 mvpp22_rss_fill_table(port, port->id);
3903 static const struct net_device_ops mvpp2_netdev_ops = {
3904 .ndo_open = mvpp2_open,
3905 .ndo_stop = mvpp2_stop,
3906 .ndo_start_xmit = mvpp2_tx,
3907 .ndo_set_rx_mode = mvpp2_set_rx_mode,
3908 .ndo_set_mac_address = mvpp2_set_mac_address,
3909 .ndo_change_mtu = mvpp2_change_mtu,
3910 .ndo_get_stats64 = mvpp2_get_stats64,
3911 .ndo_do_ioctl = mvpp2_ioctl,
3912 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
3913 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
3914 .ndo_set_features = mvpp2_set_features,
3917 static const struct ethtool_ops mvpp2_eth_tool_ops = {
3918 .nway_reset = mvpp2_ethtool_nway_reset,
3919 .get_link = ethtool_op_get_link,
3920 .set_coalesce = mvpp2_ethtool_set_coalesce,
3921 .get_coalesce = mvpp2_ethtool_get_coalesce,
3922 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
3923 .get_ringparam = mvpp2_ethtool_get_ringparam,
3924 .set_ringparam = mvpp2_ethtool_set_ringparam,
3925 .get_strings = mvpp2_ethtool_get_strings,
3926 .get_ethtool_stats = mvpp2_ethtool_get_stats,
3927 .get_sset_count = mvpp2_ethtool_get_sset_count,
3928 .get_pauseparam = mvpp2_ethtool_get_pause_param,
3929 .set_pauseparam = mvpp2_ethtool_set_pause_param,
3930 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
3931 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
3932 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
3933 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
3934 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
3935 .get_rxfh = mvpp2_ethtool_get_rxfh,
3936 .set_rxfh = mvpp2_ethtool_set_rxfh,
3940 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
3941 * had a single IRQ defined per-port.
3943 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
3944 struct device_node *port_node)
3946 struct mvpp2_queue_vector *v = &port->qvecs[0];
3949 v->nrxqs = port->nrxqs;
3950 v->type = MVPP2_QUEUE_VECTOR_SHARED;
3951 v->sw_thread_id = 0;
3952 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
3954 v->irq = irq_of_parse_and_map(port_node, 0);
3957 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
3965 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
3966 struct device_node *port_node)
3968 struct mvpp2_queue_vector *v;
3971 port->nqvecs = num_possible_cpus();
3972 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
3975 for (i = 0; i < port->nqvecs; i++) {
3978 v = port->qvecs + i;
3981 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
3982 v->sw_thread_id = i;
3983 v->sw_thread_mask = BIT(i);
3985 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
3987 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
3988 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
3989 v->nrxqs = MVPP2_DEFAULT_RXQ;
3990 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
3991 i == (port->nqvecs - 1)) {
3993 v->nrxqs = port->nrxqs;
3994 v->type = MVPP2_QUEUE_VECTOR_SHARED;
3995 strncpy(irqname, "rx-shared", sizeof(irqname));
3999 v->irq = of_irq_get_byname(port_node, irqname);
4001 v->irq = fwnode_irq_get(port->fwnode, i);
4007 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4014 for (i = 0; i < port->nqvecs; i++)
4015 irq_dispose_mapping(port->qvecs[i].irq);
4019 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
4020 struct device_node *port_node)
4022 if (port->has_tx_irqs)
4023 return mvpp2_multi_queue_vectors_init(port, port_node);
4025 return mvpp2_simple_queue_vectors_init(port, port_node);
4028 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
4032 for (i = 0; i < port->nqvecs; i++)
4033 irq_dispose_mapping(port->qvecs[i].irq);
4036 /* Configure Rx queue group interrupt for this port */
4037 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
4039 struct mvpp2 *priv = port->priv;
4043 if (priv->hw_version == MVPP21) {
4044 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
4049 /* Handle the more complicated PPv2.2 case */
4050 for (i = 0; i < port->nqvecs; i++) {
4051 struct mvpp2_queue_vector *qv = port->qvecs + i;
4056 val = qv->sw_thread_id;
4057 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
4058 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
4060 val = qv->first_rxq;
4061 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
4062 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
4066 /* Initialize port HW */
4067 static int mvpp2_port_init(struct mvpp2_port *port)
4069 struct device *dev = port->dev->dev.parent;
4070 struct mvpp2 *priv = port->priv;
4071 struct mvpp2_txq_pcpu *txq_pcpu;
4072 int queue, cpu, err;
4074 /* Checks for hardware constraints */
4075 if (port->first_rxq + port->nrxqs >
4076 MVPP2_MAX_PORTS * priv->max_port_rxqs)
4079 if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
4080 port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
4084 mvpp2_egress_disable(port);
4085 mvpp2_port_disable(port);
4087 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
4089 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
4094 /* Associate physical Tx queues to this port and initialize.
4095 * The mapping is predefined.
4097 for (queue = 0; queue < port->ntxqs; queue++) {
4098 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4099 struct mvpp2_tx_queue *txq;
4101 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4104 goto err_free_percpu;
4107 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
4110 goto err_free_percpu;
4113 txq->id = queue_phy_id;
4114 txq->log_id = queue;
4115 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4116 for_each_present_cpu(cpu) {
4117 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4118 txq_pcpu->cpu = cpu;
4121 port->txqs[queue] = txq;
4124 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
4128 goto err_free_percpu;
4131 /* Allocate and initialize Rx queue for this port */
4132 for (queue = 0; queue < port->nrxqs; queue++) {
4133 struct mvpp2_rx_queue *rxq;
4135 /* Map physical Rx queue to port's logical Rx queue */
4136 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4139 goto err_free_percpu;
4141 /* Map this Rx queue to a physical queue */
4142 rxq->id = port->first_rxq + queue;
4143 rxq->port = port->id;
4144 rxq->logic_rxq = queue;
4146 port->rxqs[queue] = rxq;
4149 mvpp2_rx_irqs_setup(port);
4151 /* Create Rx descriptor rings */
4152 for (queue = 0; queue < port->nrxqs; queue++) {
4153 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4155 rxq->size = port->rx_ring_size;
4156 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4157 rxq->time_coal = MVPP2_RX_COAL_USEC;
4160 mvpp2_ingress_disable(port);
4162 /* Port default configuration */
4163 mvpp2_defaults_set(port);
4165 /* Port's classifier configuration */
4166 mvpp2_cls_oversize_rxq_set(port);
4167 mvpp2_cls_port_config(port);
4169 if (mvpp22_rss_is_supported())
4170 mvpp22_rss_port_init(port);
4172 /* Provide an initial Rx packet size */
4173 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
4175 /* Initialize pools for swf */
4176 err = mvpp2_swf_bm_pool_init(port);
4178 goto err_free_percpu;
4183 for (queue = 0; queue < port->ntxqs; queue++) {
4184 if (!port->txqs[queue])
4186 free_percpu(port->txqs[queue]->pcpu);
4191 /* Checks if the port DT description has the TX interrupts
4192 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
4193 * there are available, but we need to keep support for old DTs.
4195 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
4196 struct device_node *port_node)
4198 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
4199 "tx-cpu2", "tx-cpu3" };
4202 if (priv->hw_version == MVPP21)
4205 for (i = 0; i < 5; i++) {
4206 ret = of_property_match_string(port_node, "interrupt-names",
4215 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
4216 struct fwnode_handle *fwnode,
4219 struct mvpp2_port *port = netdev_priv(dev);
4220 char hw_mac_addr[ETH_ALEN] = {0};
4221 char fw_mac_addr[ETH_ALEN];
4223 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
4224 *mac_from = "firmware node";
4225 ether_addr_copy(dev->dev_addr, fw_mac_addr);
4229 if (priv->hw_version == MVPP21) {
4230 mvpp21_get_mac_address(port, hw_mac_addr);
4231 if (is_valid_ether_addr(hw_mac_addr)) {
4232 *mac_from = "hardware";
4233 ether_addr_copy(dev->dev_addr, hw_mac_addr);
4238 *mac_from = "random";
4239 eth_hw_addr_random(dev);
4242 static void mvpp2_phylink_validate(struct net_device *dev,
4243 unsigned long *supported,
4244 struct phylink_link_state *state)
4246 struct mvpp2_port *port = netdev_priv(dev);
4247 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4249 /* Invalid combinations */
4250 switch (state->interface) {
4251 case PHY_INTERFACE_MODE_10GKR:
4252 case PHY_INTERFACE_MODE_XAUI:
4253 if (port->gop_id != 0)
4256 case PHY_INTERFACE_MODE_RGMII:
4257 case PHY_INTERFACE_MODE_RGMII_ID:
4258 case PHY_INTERFACE_MODE_RGMII_RXID:
4259 case PHY_INTERFACE_MODE_RGMII_TXID:
4260 if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
4267 phylink_set(mask, Autoneg);
4268 phylink_set_port_modes(mask);
4270 switch (state->interface) {
4271 case PHY_INTERFACE_MODE_10GKR:
4272 case PHY_INTERFACE_MODE_XAUI:
4273 case PHY_INTERFACE_MODE_NA:
4274 if (port->gop_id == 0) {
4275 phylink_set(mask, 10000baseT_Full);
4276 phylink_set(mask, 10000baseCR_Full);
4277 phylink_set(mask, 10000baseSR_Full);
4278 phylink_set(mask, 10000baseLR_Full);
4279 phylink_set(mask, 10000baseLRM_Full);
4280 phylink_set(mask, 10000baseER_Full);
4281 phylink_set(mask, 10000baseKR_Full);
4284 case PHY_INTERFACE_MODE_RGMII:
4285 case PHY_INTERFACE_MODE_RGMII_ID:
4286 case PHY_INTERFACE_MODE_RGMII_RXID:
4287 case PHY_INTERFACE_MODE_RGMII_TXID:
4288 case PHY_INTERFACE_MODE_SGMII:
4289 phylink_set(mask, 10baseT_Half);
4290 phylink_set(mask, 10baseT_Full);
4291 phylink_set(mask, 100baseT_Half);
4292 phylink_set(mask, 100baseT_Full);
4294 case PHY_INTERFACE_MODE_1000BASEX:
4295 case PHY_INTERFACE_MODE_2500BASEX:
4296 phylink_set(mask, 1000baseT_Full);
4297 phylink_set(mask, 1000baseX_Full);
4298 phylink_set(mask, 2500baseX_Full);
4304 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
4305 bitmap_and(state->advertising, state->advertising, mask,
4306 __ETHTOOL_LINK_MODE_MASK_NBITS);
4310 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
4313 static void mvpp22_xlg_link_state(struct mvpp2_port *port,
4314 struct phylink_link_state *state)
4318 state->speed = SPEED_10000;
4320 state->an_complete = 1;
4322 val = readl(port->base + MVPP22_XLG_STATUS);
4323 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
4326 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4327 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
4328 state->pause |= MLO_PAUSE_TX;
4329 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
4330 state->pause |= MLO_PAUSE_RX;
4333 static void mvpp2_gmac_link_state(struct mvpp2_port *port,
4334 struct phylink_link_state *state)
4338 val = readl(port->base + MVPP2_GMAC_STATUS0);
4340 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
4341 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
4342 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
4344 switch (port->phy_interface) {
4345 case PHY_INTERFACE_MODE_1000BASEX:
4346 state->speed = SPEED_1000;
4348 case PHY_INTERFACE_MODE_2500BASEX:
4349 state->speed = SPEED_2500;
4352 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
4353 state->speed = SPEED_1000;
4354 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
4355 state->speed = SPEED_100;
4357 state->speed = SPEED_10;
4361 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
4362 state->pause |= MLO_PAUSE_RX;
4363 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
4364 state->pause |= MLO_PAUSE_TX;
4367 static int mvpp2_phylink_mac_link_state(struct net_device *dev,
4368 struct phylink_link_state *state)
4370 struct mvpp2_port *port = netdev_priv(dev);
4372 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
4373 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
4374 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4376 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
4377 mvpp22_xlg_link_state(port, state);
4382 mvpp2_gmac_link_state(port, state);
4386 static void mvpp2_mac_an_restart(struct net_device *dev)
4388 struct mvpp2_port *port = netdev_priv(dev);
4391 if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
4394 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4395 /* The RESTART_AN bit is cleared by the h/w after restarting the AN
4398 val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
4399 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4402 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
4403 const struct phylink_link_state *state)
4407 ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
4408 ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
4410 if (state->pause & MLO_PAUSE_TX)
4411 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4412 if (state->pause & MLO_PAUSE_RX)
4413 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4415 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4416 MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4417 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4419 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
4420 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
4423 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
4424 const struct phylink_link_state *state)
4426 u32 an, ctrl0, ctrl2, ctrl4;
4429 an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4430 ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4431 ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4432 ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4436 /* Force link down */
4437 an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4438 an |= MVPP2_GMAC_FORCE_LINK_DOWN;
4439 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4441 /* Set the GMAC in a reset state */
4442 ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
4443 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4445 an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
4446 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
4447 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4448 MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
4449 MVPP2_GMAC_FORCE_LINK_DOWN);
4450 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4451 ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
4453 if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4454 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
4455 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
4456 * they negotiate duplex: they are always operating with a fixed
4457 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
4458 * speed and full duplex here.
4460 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
4461 an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
4462 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4463 } else if (!phy_interface_mode_is_rgmii(state->interface)) {
4464 an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
4468 an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4469 if (phylink_test(state->advertising, Pause))
4470 an |= MVPP2_GMAC_FC_ADV_EN;
4471 if (phylink_test(state->advertising, Asym_Pause))
4472 an |= MVPP2_GMAC_FC_ADV_ASM_EN;
4474 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
4475 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4476 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
4477 an |= MVPP2_GMAC_IN_BAND_AUTONEG;
4478 ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4480 ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4481 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
4482 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4483 MVPP22_CTRL4_DP_CLK_SEL |
4484 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4486 if (state->pause & MLO_PAUSE_TX)
4487 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
4488 if (state->pause & MLO_PAUSE_RX)
4489 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
4490 } else if (phy_interface_mode_is_rgmii(state->interface)) {
4491 an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
4493 if (state->speed == SPEED_1000)
4494 an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4495 else if (state->speed == SPEED_100)
4496 an |= MVPP2_GMAC_CONFIG_MII_SPEED;
4498 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
4499 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4500 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4501 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4504 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
4505 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4506 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
4507 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4509 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
4510 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4511 MVPP2_GMAC_PORT_RESET_MASK)
4516 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4517 const struct phylink_link_state *state)
4519 struct mvpp2_port *port = netdev_priv(dev);
4521 /* Check for invalid configuration */
4522 if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
4523 netdev_err(dev, "Invalid mode on %s\n", dev->name);
4527 /* Make sure the port is disabled when reconfiguring the mode */
4528 mvpp2_port_disable(port);
4530 if (port->priv->hw_version == MVPP22 &&
4531 port->phy_interface != state->interface) {
4532 port->phy_interface = state->interface;
4534 /* Reconfigure the serdes lanes */
4535 phy_power_off(port->comphy);
4536 mvpp22_mode_reconfigure(port);
4539 /* mac (re)configuration */
4540 if (state->interface == PHY_INTERFACE_MODE_10GKR)
4541 mvpp2_xlg_config(port, mode, state);
4542 else if (phy_interface_mode_is_rgmii(state->interface) ||
4543 state->interface == PHY_INTERFACE_MODE_SGMII ||
4544 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4545 state->interface == PHY_INTERFACE_MODE_2500BASEX)
4546 mvpp2_gmac_config(port, mode, state);
4548 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
4549 mvpp2_port_loopback_set(port, state);
4551 mvpp2_port_enable(port);
4554 static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
4555 phy_interface_t interface, struct phy_device *phy)
4557 struct mvpp2_port *port = netdev_priv(dev);
4560 if (!phylink_autoneg_inband(mode) &&
4561 interface != PHY_INTERFACE_MODE_10GKR) {
4562 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4563 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4564 if (phy_interface_mode_is_rgmii(interface))
4565 val |= MVPP2_GMAC_FORCE_LINK_PASS;
4566 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4569 mvpp2_port_enable(port);
4571 mvpp2_egress_enable(port);
4572 mvpp2_ingress_enable(port);
4573 netif_tx_wake_all_queues(dev);
4576 static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
4577 phy_interface_t interface)
4579 struct mvpp2_port *port = netdev_priv(dev);
4582 if (!phylink_autoneg_inband(mode) &&
4583 interface != PHY_INTERFACE_MODE_10GKR) {
4584 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4585 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4586 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4587 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4590 netif_tx_stop_all_queues(dev);
4591 mvpp2_egress_disable(port);
4592 mvpp2_ingress_disable(port);
4594 /* When using link interrupts to notify phylink of a MAC state change,
4595 * we do not want the port to be disabled (we want to receive further
4596 * interrupts, to be notified when the port will have a link later).
4601 mvpp2_port_disable(port);
4604 static const struct phylink_mac_ops mvpp2_phylink_ops = {
4605 .validate = mvpp2_phylink_validate,
4606 .mac_link_state = mvpp2_phylink_mac_link_state,
4607 .mac_an_restart = mvpp2_mac_an_restart,
4608 .mac_config = mvpp2_mac_config,
4609 .mac_link_up = mvpp2_mac_link_up,
4610 .mac_link_down = mvpp2_mac_link_down,
4613 /* Ports initialization */
4614 static int mvpp2_port_probe(struct platform_device *pdev,
4615 struct fwnode_handle *port_fwnode,
4618 struct phy *comphy = NULL;
4619 struct mvpp2_port *port;
4620 struct mvpp2_port_pcpu *port_pcpu;
4621 struct device_node *port_node = to_of_node(port_fwnode);
4622 struct net_device *dev;
4623 struct resource *res;
4624 struct phylink *phylink;
4625 char *mac_from = "";
4626 unsigned int ntxqs, nrxqs;
4634 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
4637 queue_mode = MVPP2_QDIST_MULTI_MODE;
4641 queue_mode = MVPP2_QDIST_SINGLE_MODE;
4643 ntxqs = MVPP2_MAX_TXQ;
4644 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
4645 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
4647 nrxqs = MVPP2_DEFAULT_RXQ;
4649 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
4653 phy_mode = fwnode_get_phy_mode(port_fwnode);
4655 dev_err(&pdev->dev, "incorrect phy mode\n");
4657 goto err_free_netdev;
4661 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
4662 if (IS_ERR(comphy)) {
4663 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
4664 err = -EPROBE_DEFER;
4665 goto err_free_netdev;
4671 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
4673 dev_err(&pdev->dev, "missing port-id value\n");
4674 goto err_free_netdev;
4677 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
4678 dev->watchdog_timeo = 5 * HZ;
4679 dev->netdev_ops = &mvpp2_netdev_ops;
4680 dev->ethtool_ops = &mvpp2_eth_tool_ops;
4682 port = netdev_priv(dev);
4684 port->fwnode = port_fwnode;
4685 port->has_phy = !!of_find_property(port_node, "phy", NULL);
4686 port->ntxqs = ntxqs;
4687 port->nrxqs = nrxqs;
4689 port->has_tx_irqs = has_tx_irqs;
4691 err = mvpp2_queue_vectors_init(port, port_node);
4693 goto err_free_netdev;
4696 port->link_irq = of_irq_get_byname(port_node, "link");
4698 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
4699 if (port->link_irq == -EPROBE_DEFER) {
4700 err = -EPROBE_DEFER;
4701 goto err_deinit_qvecs;
4703 if (port->link_irq <= 0)
4704 /* the link irq is optional */
4707 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
4708 port->flags |= MVPP2_F_LOOPBACK;
4711 if (priv->hw_version == MVPP21)
4712 port->first_rxq = port->id * port->nrxqs;
4714 port->first_rxq = port->id * priv->max_port_rxqs;
4716 port->of_node = port_node;
4717 port->phy_interface = phy_mode;
4718 port->comphy = comphy;
4720 if (priv->hw_version == MVPP21) {
4721 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
4722 port->base = devm_ioremap_resource(&pdev->dev, res);
4723 if (IS_ERR(port->base)) {
4724 err = PTR_ERR(port->base);
4728 port->stats_base = port->priv->lms_base +
4729 MVPP21_MIB_COUNTERS_OFFSET +
4730 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
4732 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
4735 dev_err(&pdev->dev, "missing gop-port-id value\n");
4736 goto err_deinit_qvecs;
4739 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
4740 port->stats_base = port->priv->iface_base +
4741 MVPP22_MIB_COUNTERS_OFFSET +
4742 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
4745 /* Alloc per-cpu and ethtool stats */
4746 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
4752 port->ethtool_stats = devm_kcalloc(&pdev->dev,
4753 ARRAY_SIZE(mvpp2_ethtool_regs),
4754 sizeof(u64), GFP_KERNEL);
4755 if (!port->ethtool_stats) {
4757 goto err_free_stats;
4760 mutex_init(&port->gather_stats_lock);
4761 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
4763 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
4765 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
4766 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
4767 SET_NETDEV_DEV(dev, &pdev->dev);
4769 err = mvpp2_port_init(port);
4771 dev_err(&pdev->dev, "failed to init port %d\n", id);
4772 goto err_free_stats;
4775 mvpp2_port_periodic_xon_disable(port);
4777 mvpp2_port_reset(port);
4779 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
4782 goto err_free_txq_pcpu;
4785 if (!port->has_tx_irqs) {
4786 for_each_present_cpu(cpu) {
4787 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
4789 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
4790 HRTIMER_MODE_REL_PINNED);
4791 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
4792 port_pcpu->timer_scheduled = false;
4794 tasklet_init(&port_pcpu->tx_done_tasklet,
4796 (unsigned long)dev);
4800 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4802 dev->features = features | NETIF_F_RXCSUM;
4803 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
4804 NETIF_F_HW_VLAN_CTAG_FILTER;
4806 if (mvpp22_rss_is_supported())
4807 dev->hw_features |= NETIF_F_RXHASH;
4809 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
4810 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4811 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4814 dev->vlan_features |= features;
4815 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
4816 dev->priv_flags |= IFF_UNICAST_FLT;
4818 /* MTU range: 68 - 9704 */
4819 dev->min_mtu = ETH_MIN_MTU;
4820 /* 9704 == 9728 - 20 and rounding to 8 */
4821 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
4822 dev->dev.of_node = port_node;
4824 /* Phylink isn't used w/ ACPI as of now */
4826 phylink = phylink_create(dev, port_fwnode, phy_mode,
4827 &mvpp2_phylink_ops);
4828 if (IS_ERR(phylink)) {
4829 err = PTR_ERR(phylink);
4830 goto err_free_port_pcpu;
4832 port->phylink = phylink;
4834 port->phylink = NULL;
4837 err = register_netdev(dev);
4839 dev_err(&pdev->dev, "failed to register netdev\n");
4842 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
4844 priv->port_list[priv->port_count++] = port;
4850 phylink_destroy(port->phylink);
4852 free_percpu(port->pcpu);
4854 for (i = 0; i < port->ntxqs; i++)
4855 free_percpu(port->txqs[i]->pcpu);
4857 free_percpu(port->stats);
4860 irq_dispose_mapping(port->link_irq);
4862 mvpp2_queue_vectors_deinit(port);
4868 /* Ports removal routine */
4869 static void mvpp2_port_remove(struct mvpp2_port *port)
4873 unregister_netdev(port->dev);
4875 phylink_destroy(port->phylink);
4876 free_percpu(port->pcpu);
4877 free_percpu(port->stats);
4878 for (i = 0; i < port->ntxqs; i++)
4879 free_percpu(port->txqs[i]->pcpu);
4880 mvpp2_queue_vectors_deinit(port);
4882 irq_dispose_mapping(port->link_irq);
4883 free_netdev(port->dev);
4886 /* Initialize decoding windows */
4887 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4893 for (i = 0; i < 6; i++) {
4894 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4895 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4898 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4903 for (i = 0; i < dram->num_cs; i++) {
4904 const struct mbus_dram_window *cs = dram->cs + i;
4906 mvpp2_write(priv, MVPP2_WIN_BASE(i),
4907 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4908 dram->mbus_dram_target_id);
4910 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4911 (cs->size - 1) & 0xffff0000);
4913 win_enable |= (1 << i);
4916 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4919 /* Initialize Rx FIFO's */
4920 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4924 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4925 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4926 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
4927 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4928 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
4931 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4932 MVPP2_RX_FIFO_PORT_MIN_PKT);
4933 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4936 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
4940 /* The FIFO size parameters are set depending on the maximum speed a
4941 * given port can handle:
4944 * - Ports 2 and 3: 1Gbps
4947 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
4948 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
4949 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
4950 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
4952 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
4953 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
4954 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
4955 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
4957 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
4958 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4959 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
4960 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4961 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
4964 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4965 MVPP2_RX_FIFO_PORT_MIN_PKT);
4966 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4969 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
4970 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
4971 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
4973 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
4975 int port, size, thrs;
4977 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4979 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
4980 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
4982 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
4983 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
4985 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
4986 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
4990 static void mvpp2_axi_init(struct mvpp2 *priv)
4992 u32 val, rdval, wrval;
4994 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4996 /* AXI Bridge Configuration */
4998 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4999 << MVPP22_AXI_ATTR_CACHE_OFFS;
5000 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5001 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5003 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
5004 << MVPP22_AXI_ATTR_CACHE_OFFS;
5005 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5006 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5009 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
5010 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
5013 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
5014 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
5015 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
5016 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
5019 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
5020 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
5022 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
5023 << MVPP22_AXI_CODE_CACHE_OFFS;
5024 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5025 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5026 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
5027 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
5029 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
5030 << MVPP22_AXI_CODE_CACHE_OFFS;
5031 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5032 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5034 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
5036 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
5037 << MVPP22_AXI_CODE_CACHE_OFFS;
5038 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5039 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5041 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
5044 /* Initialize network controller common part HW */
5045 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
5047 const struct mbus_dram_target_info *dram_target_info;
5051 /* MBUS windows configuration */
5052 dram_target_info = mv_mbus_dram_info();
5053 if (dram_target_info)
5054 mvpp2_conf_mbus_windows(dram_target_info, priv);
5056 if (priv->hw_version == MVPP22)
5057 mvpp2_axi_init(priv);
5059 /* Disable HW PHY polling */
5060 if (priv->hw_version == MVPP21) {
5061 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5062 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5063 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5065 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5066 val &= ~MVPP22_SMI_POLLING_EN;
5067 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5070 /* Allocate and initialize aggregated TXQs */
5071 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
5072 sizeof(*priv->aggr_txqs),
5074 if (!priv->aggr_txqs)
5077 for_each_present_cpu(i) {
5078 priv->aggr_txqs[i].id = i;
5079 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5080 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
5086 if (priv->hw_version == MVPP21) {
5087 mvpp2_rx_fifo_init(priv);
5089 mvpp22_rx_fifo_init(priv);
5090 mvpp22_tx_fifo_init(priv);
5093 if (priv->hw_version == MVPP21)
5094 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5095 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5097 /* Allow cache snoop when transmiting packets */
5098 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5100 /* Buffer Manager initialization */
5101 err = mvpp2_bm_init(pdev, priv);
5105 /* Parser default initialization */
5106 err = mvpp2_prs_default_init(pdev, priv);
5110 /* Classifier default initialization */
5111 mvpp2_cls_init(priv);
5116 static int mvpp2_probe(struct platform_device *pdev)
5118 const struct acpi_device_id *acpi_id;
5119 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5120 struct fwnode_handle *port_fwnode;
5122 struct resource *res;
5127 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
5131 if (has_acpi_companion(&pdev->dev)) {
5132 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
5136 priv->hw_version = (unsigned long)acpi_id->driver_data;
5139 (unsigned long)of_device_get_match_data(&pdev->dev);
5142 /* multi queue mode isn't supported on PPV2.1, fallback to single
5145 if (priv->hw_version == MVPP21)
5146 queue_mode = MVPP2_QDIST_SINGLE_MODE;
5148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5149 base = devm_ioremap_resource(&pdev->dev, res);
5151 return PTR_ERR(base);
5153 if (priv->hw_version == MVPP21) {
5154 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5155 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
5156 if (IS_ERR(priv->lms_base))
5157 return PTR_ERR(priv->lms_base);
5159 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5161 dev_err(&pdev->dev, "Invalid resource\n");
5164 if (has_acpi_companion(&pdev->dev)) {
5165 /* In case the MDIO memory region is declared in
5166 * the ACPI, it can already appear as 'in-use'
5167 * in the OS. Because it is overlapped by second
5168 * region of the network controller, make
5169 * sure it is released, before requesting it again.
5170 * The care is taken by mvpp2 driver to avoid
5171 * concurrent access to this memory region.
5173 release_resource(res);
5175 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
5176 if (IS_ERR(priv->iface_base))
5177 return PTR_ERR(priv->iface_base);
5180 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
5181 priv->sysctrl_base =
5182 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5183 "marvell,system-controller");
5184 if (IS_ERR(priv->sysctrl_base))
5185 /* The system controller regmap is optional for dt
5186 * compatibility reasons. When not provided, the
5187 * configuration of the GoP relies on the
5188 * firmware/bootloader.
5190 priv->sysctrl_base = NULL;
5193 mvpp2_setup_bm_pool();
5195 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5198 addr_space_sz = (priv->hw_version == MVPP21 ?
5199 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
5200 priv->swth_base[i] = base + i * addr_space_sz;
5203 if (priv->hw_version == MVPP21)
5204 priv->max_port_rxqs = 8;
5206 priv->max_port_rxqs = 32;
5208 if (dev_of_node(&pdev->dev)) {
5209 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
5210 if (IS_ERR(priv->pp_clk))
5211 return PTR_ERR(priv->pp_clk);
5212 err = clk_prepare_enable(priv->pp_clk);
5216 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
5217 if (IS_ERR(priv->gop_clk)) {
5218 err = PTR_ERR(priv->gop_clk);
5221 err = clk_prepare_enable(priv->gop_clk);
5225 if (priv->hw_version == MVPP22) {
5226 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
5227 if (IS_ERR(priv->mg_clk)) {
5228 err = PTR_ERR(priv->mg_clk);
5232 err = clk_prepare_enable(priv->mg_clk);
5236 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
5237 if (IS_ERR(priv->mg_core_clk)) {
5238 priv->mg_core_clk = NULL;
5240 err = clk_prepare_enable(priv->mg_core_clk);
5246 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
5247 if (IS_ERR(priv->axi_clk)) {
5248 err = PTR_ERR(priv->axi_clk);
5249 if (err == -EPROBE_DEFER)
5250 goto err_mg_core_clk;
5251 priv->axi_clk = NULL;
5253 err = clk_prepare_enable(priv->axi_clk);
5255 goto err_mg_core_clk;
5258 /* Get system's tclk rate */
5259 priv->tclk = clk_get_rate(priv->pp_clk);
5260 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
5262 dev_err(&pdev->dev, "missing clock-frequency value\n");
5266 if (priv->hw_version == MVPP22) {
5267 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
5270 /* Sadly, the BM pools all share the same register to
5271 * store the high 32 bits of their address. So they
5272 * must all have the same high 32 bits, which forces
5273 * us to restrict coherent memory to DMA_BIT_MASK(32).
5275 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5280 /* Initialize network controller */
5281 err = mvpp2_init(pdev, priv);
5283 dev_err(&pdev->dev, "failed to initialize controller\n");
5287 /* Initialize ports */
5288 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5289 err = mvpp2_port_probe(pdev, port_fwnode, priv);
5291 goto err_port_probe;
5294 if (priv->port_count == 0) {
5295 dev_err(&pdev->dev, "no ports enabled\n");
5300 /* Statistics must be gathered regularly because some of them (like
5301 * packets counters) are 32-bit registers and could overflow quite
5302 * quickly. For instance, a 10Gb link used at full bandwidth with the
5303 * smallest packets (64B) will overflow a 32-bit counter in less than
5304 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
5306 snprintf(priv->queue_name, sizeof(priv->queue_name),
5307 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
5308 priv->port_count > 1 ? "+" : "");
5309 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
5310 if (!priv->stats_queue) {
5312 goto err_port_probe;
5315 mvpp2_dbgfs_init(priv, pdev->name);
5317 platform_set_drvdata(pdev, priv);
5321 fwnode_handle_put(port_fwnode);
5324 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5325 if (priv->port_list[i])
5326 mvpp2_port_remove(priv->port_list[i]);
5330 clk_disable_unprepare(priv->axi_clk);
5333 if (priv->hw_version == MVPP22)
5334 clk_disable_unprepare(priv->mg_core_clk);
5336 if (priv->hw_version == MVPP22)
5337 clk_disable_unprepare(priv->mg_clk);
5339 clk_disable_unprepare(priv->gop_clk);
5341 clk_disable_unprepare(priv->pp_clk);
5345 static int mvpp2_remove(struct platform_device *pdev)
5347 struct mvpp2 *priv = platform_get_drvdata(pdev);
5348 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5349 struct fwnode_handle *port_fwnode;
5352 mvpp2_dbgfs_cleanup(priv);
5354 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5355 if (priv->port_list[i]) {
5356 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
5357 mvpp2_port_remove(priv->port_list[i]);
5362 destroy_workqueue(priv->stats_queue);
5364 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5365 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
5367 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
5370 for_each_present_cpu(i) {
5371 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
5373 dma_free_coherent(&pdev->dev,
5374 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5376 aggr_txq->descs_dma);
5379 if (is_acpi_node(port_fwnode))
5382 clk_disable_unprepare(priv->axi_clk);
5383 clk_disable_unprepare(priv->mg_core_clk);
5384 clk_disable_unprepare(priv->mg_clk);
5385 clk_disable_unprepare(priv->pp_clk);
5386 clk_disable_unprepare(priv->gop_clk);
5391 static const struct of_device_id mvpp2_match[] = {
5393 .compatible = "marvell,armada-375-pp2",
5394 .data = (void *)MVPP21,
5397 .compatible = "marvell,armada-7k-pp22",
5398 .data = (void *)MVPP22,
5402 MODULE_DEVICE_TABLE(of, mvpp2_match);
5404 static const struct acpi_device_id mvpp2_acpi_match[] = {
5405 { "MRVL0110", MVPP22 },
5408 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
5410 static struct platform_driver mvpp2_driver = {
5411 .probe = mvpp2_probe,
5412 .remove = mvpp2_remove,
5414 .name = MVPP2_DRIVER_NAME,
5415 .of_match_table = mvpp2_match,
5416 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
5420 static int __init mvpp2_driver_init(void)
5422 return platform_driver_register(&mvpp2_driver);
5424 module_init(mvpp2_driver_init);
5426 static void __exit mvpp2_driver_exit(void)
5428 platform_driver_unregister(&mvpp2_driver);
5431 module_exit(mvpp2_driver_exit);
5433 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
5434 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
5435 MODULE_LICENSE("GPL v2");