2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/phylink.h>
32 #include <linux/platform_device.h>
33 #include <linux/skbuff.h>
35 #include "mvneta_bm.h"
41 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
42 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
43 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
47 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
60 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
63 #define MVNETA_PORT_RX_RESET 0x1cc0
64 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
65 #define MVNETA_PHY_ADDR 0x2000
66 #define MVNETA_PHY_ADDR_MASK 0x1f
67 #define MVNETA_MBUS_RETRY 0x2010
68 #define MVNETA_UNIT_INTR_CAUSE 0x2080
69 #define MVNETA_UNIT_CONTROL 0x20B0
70 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
71 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74 #define MVNETA_BASE_ADDR_ENABLE 0x2290
75 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
76 #define MVNETA_PORT_CONFIG 0x2400
77 #define MVNETA_UNI_PROMISC_MODE BIT(0)
78 #define MVNETA_DEF_RXQ(q) ((q) << 1)
79 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
93 #define MVNETA_MAC_ADDR_LOW 0x2414
94 #define MVNETA_MAC_ADDR_HIGH 0x2418
95 #define MVNETA_SDMA_CONFIG 0x241c
96 #define MVNETA_SDMA_BRST_SIZE_16 4
97 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
99 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
100 #define MVNETA_DESC_SWAP BIT(6)
101 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102 #define MVNETA_PORT_STATUS 0x2444
103 #define MVNETA_TX_IN_PRGRS BIT(0)
104 #define MVNETA_TX_FIFO_EMPTY BIT(8)
105 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
106 #define MVNETA_SERDES_CFG 0x24A0
107 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
108 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
109 #define MVNETA_TYPE_PRIO 0x24bc
110 #define MVNETA_FORCE_UNI BIT(21)
111 #define MVNETA_TXQ_CMD_1 0x24e4
112 #define MVNETA_TXQ_CMD 0x2448
113 #define MVNETA_TXQ_DISABLE_SHIFT 8
114 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
115 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
117 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
119 #define MVNETA_ACC_MODE 0x2500
120 #define MVNETA_BM_ADDRESS 0x2504
121 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
124 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
125 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
126 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
128 /* Exception Interrupt Port/Queue Cause register
130 * Their behavior depend of the mapping done using the PCPX2Q
131 * registers. For a given CPU if the bit associated to a queue is not
132 * set, then for the register a read from this CPU will always return
133 * 0 and a write won't do anything
136 #define MVNETA_INTR_NEW_CAUSE 0x25a0
137 #define MVNETA_INTR_NEW_MASK 0x25a4
139 /* bits 0..7 = TXQ SENT, one bit per queue.
140 * bits 8..15 = RXQ OCCUP, one bit per queue.
141 * bits 16..23 = RXQ FREE, one bit per queue.
142 * bit 29 = OLD_REG_SUM, see old reg ?
143 * bit 30 = TX_ERR_SUM, one bit for 4 ports
144 * bit 31 = MISC_SUM, one bit for 4 ports
146 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
150 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
152 #define MVNETA_INTR_OLD_CAUSE 0x25a8
153 #define MVNETA_INTR_OLD_MASK 0x25ac
155 /* Data Path Port/Queue Cause Register */
156 #define MVNETA_INTR_MISC_CAUSE 0x25b0
157 #define MVNETA_INTR_MISC_MASK 0x25b4
159 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161 #define MVNETA_CAUSE_PTP BIT(4)
163 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
169 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
172 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
176 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
180 #define MVNETA_INTR_ENABLE 0x25b8
181 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
182 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
184 #define MVNETA_RXQ_CMD 0x2680
185 #define MVNETA_RXQ_DISABLE_SHIFT 8
186 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189 #define MVNETA_GMAC_CTRL_0 0x2c00
190 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
192 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
193 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
194 #define MVNETA_GMAC_CTRL_2 0x2c08
195 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
196 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
197 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
198 #define MVNETA_GMAC2_PORT_RESET BIT(6)
199 #define MVNETA_GMAC_STATUS 0x2c10
200 #define MVNETA_GMAC_LINK_UP BIT(0)
201 #define MVNETA_GMAC_SPEED_1000 BIT(1)
202 #define MVNETA_GMAC_SPEED_100 BIT(2)
203 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
204 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
205 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
206 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
207 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
208 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
209 #define MVNETA_GMAC_SYNC_OK BIT(14)
210 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
211 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
212 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
213 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
214 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
215 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
216 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
217 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
218 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
219 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
220 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
221 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
222 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
223 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
224 #define MVNETA_MIB_COUNTERS_BASE 0x3000
225 #define MVNETA_MIB_LATE_COLLISION 0x7c
226 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
227 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
228 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
229 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
230 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
231 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
232 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
233 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
234 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
235 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
236 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
237 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
238 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
239 #define MVNETA_PORT_TX_RESET 0x3cf0
240 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
241 #define MVNETA_TX_MTU 0x3e0c
242 #define MVNETA_TX_TOKEN_SIZE 0x3e14
243 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
244 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
245 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
247 #define MVNETA_LPI_CTRL_0 0x2cc0
248 #define MVNETA_LPI_CTRL_1 0x2cc4
249 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
250 #define MVNETA_LPI_CTRL_2 0x2cc8
251 #define MVNETA_LPI_STATUS 0x2ccc
253 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
255 /* Descriptor ring Macros */
256 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
257 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
259 /* Various constants */
262 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
263 #define MVNETA_RX_COAL_PKTS 32
264 #define MVNETA_RX_COAL_USEC 100
266 /* The two bytes Marvell header. Either contains a special value used
267 * by Marvell switches when a specific hardware mode is enabled (not
268 * supported by this driver) or is filled automatically by zeroes on
269 * the RX side. Those two bytes being at the front of the Ethernet
270 * header, they allow to have the IP header aligned on a 4 bytes
271 * boundary automatically: the hardware skips those two bytes on its
274 #define MVNETA_MH_SIZE 2
276 #define MVNETA_VLAN_TAG_LEN 4
278 #define MVNETA_TX_CSUM_DEF_SIZE 1600
279 #define MVNETA_TX_CSUM_MAX_SIZE 9800
280 #define MVNETA_ACC_MODE_EXT1 1
281 #define MVNETA_ACC_MODE_EXT2 2
283 #define MVNETA_MAX_DECODE_WIN 6
285 /* Timeout constants */
286 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
287 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
288 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
290 #define MVNETA_TX_MTU_MAX 0x3ffff
292 /* The RSS lookup table actually has 256 entries but we do not use
295 #define MVNETA_RSS_LU_TABLE_SIZE 1
297 /* Max number of Rx descriptors */
298 #define MVNETA_MAX_RXD 512
300 /* Max number of Tx descriptors */
301 #define MVNETA_MAX_TXD 1024
303 /* Max number of allowed TCP segments for software TSO */
304 #define MVNETA_MAX_TSO_SEGS 100
306 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
308 /* descriptor aligned size */
309 #define MVNETA_DESC_ALIGNED_SIZE 32
311 /* Number of bytes to be taken into account by HW when putting incoming data
312 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
313 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
315 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
317 #define MVNETA_RX_PKT_SIZE(mtu) \
318 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
319 ETH_HLEN + ETH_FCS_LEN, \
322 #define IS_TSO_HEADER(txq, addr) \
323 ((addr >= txq->tso_hdrs_phys) && \
324 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
326 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
327 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
330 ETHTOOL_STAT_EEE_WAKEUP,
331 ETHTOOL_STAT_SKB_ALLOC_ERR,
332 ETHTOOL_STAT_REFILL_ERR,
336 struct mvneta_statistic {
337 unsigned short offset;
339 const char name[ETH_GSTRING_LEN];
346 static const struct mvneta_statistic mvneta_statistics[] = {
347 { 0x3000, T_REG_64, "good_octets_received", },
348 { 0x3010, T_REG_32, "good_frames_received", },
349 { 0x3008, T_REG_32, "bad_octets_received", },
350 { 0x3014, T_REG_32, "bad_frames_received", },
351 { 0x3018, T_REG_32, "broadcast_frames_received", },
352 { 0x301c, T_REG_32, "multicast_frames_received", },
353 { 0x3050, T_REG_32, "unrec_mac_control_received", },
354 { 0x3058, T_REG_32, "good_fc_received", },
355 { 0x305c, T_REG_32, "bad_fc_received", },
356 { 0x3060, T_REG_32, "undersize_received", },
357 { 0x3064, T_REG_32, "fragments_received", },
358 { 0x3068, T_REG_32, "oversize_received", },
359 { 0x306c, T_REG_32, "jabber_received", },
360 { 0x3070, T_REG_32, "mac_receive_error", },
361 { 0x3074, T_REG_32, "bad_crc_event", },
362 { 0x3078, T_REG_32, "collision", },
363 { 0x307c, T_REG_32, "late_collision", },
364 { 0x2484, T_REG_32, "rx_discard", },
365 { 0x2488, T_REG_32, "rx_overrun", },
366 { 0x3020, T_REG_32, "frames_64_octets", },
367 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
368 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
369 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
370 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
371 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
372 { 0x3038, T_REG_64, "good_octets_sent", },
373 { 0x3040, T_REG_32, "good_frames_sent", },
374 { 0x3044, T_REG_32, "excessive_collision", },
375 { 0x3048, T_REG_32, "multicast_frames_sent", },
376 { 0x304c, T_REG_32, "broadcast_frames_sent", },
377 { 0x3054, T_REG_32, "fc_sent", },
378 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
379 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
380 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
381 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
384 struct mvneta_pcpu_stats {
385 struct u64_stats_sync syncp;
394 struct mvneta_pcpu_port {
395 /* Pointer to the shared port */
396 struct mvneta_port *pp;
398 /* Pointer to the CPU-local NAPI struct */
399 struct napi_struct napi;
401 /* Cause of the previous interrupt */
407 struct mvneta_pcpu_port __percpu *ports;
408 struct mvneta_pcpu_stats __percpu *stats;
412 struct mvneta_rx_queue *rxqs;
413 struct mvneta_tx_queue *txqs;
414 struct net_device *dev;
415 struct hlist_node node_online;
416 struct hlist_node node_dead;
418 /* Protect the access to the percpu interrupt registers,
419 * ensuring that the configuration remains coherent.
425 struct napi_struct napi;
435 phy_interface_t phy_interface;
436 struct device_node *dn;
437 unsigned int tx_csum_limit;
438 struct phylink *phylink;
440 struct mvneta_bm *bm_priv;
441 struct mvneta_bm_pool *pool_long;
442 struct mvneta_bm_pool *pool_short;
449 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
451 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
453 /* Flags for special SoC configurations */
454 bool neta_armada3700;
455 u16 rx_offset_correction;
456 const struct mbus_dram_target_info *dram_target_info;
459 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
460 * layout of the transmit and reception DMA descriptors, and their
461 * layout is therefore defined by the hardware design
464 #define MVNETA_TX_L3_OFF_SHIFT 0
465 #define MVNETA_TX_IP_HLEN_SHIFT 8
466 #define MVNETA_TX_L4_UDP BIT(16)
467 #define MVNETA_TX_L3_IP6 BIT(17)
468 #define MVNETA_TXD_IP_CSUM BIT(18)
469 #define MVNETA_TXD_Z_PAD BIT(19)
470 #define MVNETA_TXD_L_DESC BIT(20)
471 #define MVNETA_TXD_F_DESC BIT(21)
472 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
473 MVNETA_TXD_L_DESC | \
475 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
476 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
478 #define MVNETA_RXD_ERR_CRC 0x0
479 #define MVNETA_RXD_BM_POOL_SHIFT 13
480 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
481 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
482 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
483 #define MVNETA_RXD_ERR_LEN BIT(18)
484 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
485 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
486 #define MVNETA_RXD_L3_IP4 BIT(25)
487 #define MVNETA_RXD_LAST_DESC BIT(26)
488 #define MVNETA_RXD_FIRST_DESC BIT(27)
489 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
490 MVNETA_RXD_LAST_DESC)
491 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
493 #if defined(__LITTLE_ENDIAN)
494 struct mvneta_tx_desc {
495 u32 command; /* Options used by HW for packet transmitting.*/
496 u16 reserverd1; /* csum_l4 (for future use) */
497 u16 data_size; /* Data size of transmitted packet in bytes */
498 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
499 u32 reserved2; /* hw_cmd - (for future use, PMT) */
500 u32 reserved3[4]; /* Reserved - (for future use) */
503 struct mvneta_rx_desc {
504 u32 status; /* Info about received packet */
505 u16 reserved1; /* pnc_info - (for future use, PnC) */
506 u16 data_size; /* Size of received packet in bytes */
508 u32 buf_phys_addr; /* Physical address of the buffer */
509 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
511 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
512 u16 reserved3; /* prefetch_cmd, for future use */
513 u16 reserved4; /* csum_l4 - (for future use, PnC) */
515 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
516 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
519 struct mvneta_tx_desc {
520 u16 data_size; /* Data size of transmitted packet in bytes */
521 u16 reserverd1; /* csum_l4 (for future use) */
522 u32 command; /* Options used by HW for packet transmitting.*/
523 u32 reserved2; /* hw_cmd - (for future use, PMT) */
524 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
525 u32 reserved3[4]; /* Reserved - (for future use) */
528 struct mvneta_rx_desc {
529 u16 data_size; /* Size of received packet in bytes */
530 u16 reserved1; /* pnc_info - (for future use, PnC) */
531 u32 status; /* Info about received packet */
533 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
534 u32 buf_phys_addr; /* Physical address of the buffer */
536 u16 reserved4; /* csum_l4 - (for future use, PnC) */
537 u16 reserved3; /* prefetch_cmd, for future use */
538 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
540 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
541 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
545 enum mvneta_tx_buf_type {
551 struct mvneta_tx_buf {
552 enum mvneta_tx_buf_type type;
554 struct xdp_frame *xdpf;
559 struct mvneta_tx_queue {
560 /* Number of this TX queue, in the range 0-7 */
563 /* Number of TX DMA descriptors in the descriptor ring */
566 /* Number of currently used TX DMA descriptor in the
571 int tx_stop_threshold;
572 int tx_wake_threshold;
574 /* Array of transmitted buffers */
575 struct mvneta_tx_buf *buf;
577 /* Index of last TX DMA descriptor that was inserted */
580 /* Index of the TX DMA descriptor to be cleaned up */
585 /* Virtual address of the TX DMA descriptors array */
586 struct mvneta_tx_desc *descs;
588 /* DMA address of the TX DMA descriptors array */
589 dma_addr_t descs_phys;
591 /* Index of the last TX DMA descriptor */
594 /* Index of the next TX DMA descriptor to process */
595 int next_desc_to_proc;
597 /* DMA buffers for TSO headers */
600 /* DMA address of TSO headers */
601 dma_addr_t tso_hdrs_phys;
603 /* Affinity mask for CPUs*/
604 cpumask_t affinity_mask;
607 struct mvneta_rx_queue {
608 /* rx queue number, in the range 0-7 */
611 /* num of rx descriptors in the rx descriptor ring */
617 /* Virtual address of the RX buffer */
618 void **buf_virt_addr;
620 /* Virtual address of the RX DMA descriptors array */
621 struct mvneta_rx_desc *descs;
623 /* DMA address of the RX DMA descriptors array */
624 dma_addr_t descs_phys;
626 /* Index of the last RX DMA descriptor */
629 /* Index of the next RX DMA descriptor to process */
630 int next_desc_to_proc;
632 /* Index of first RX DMA descriptor to refill */
636 /* pointer to uncomplete skb buffer */
645 static enum cpuhp_state online_hpstate;
646 /* The hardware supports eight (8) rx queues, but we are only allowing
647 * the first one to be used. Therefore, let's just allocate one queue.
649 static int rxq_number = 8;
650 static int txq_number = 8;
654 static int rx_copybreak __read_mostly = 256;
655 static int rx_header_size __read_mostly = 128;
657 /* HW BM need that each port be identify by a unique ID */
658 static int global_port_id;
660 #define MVNETA_DRIVER_NAME "mvneta"
661 #define MVNETA_DRIVER_VERSION "1.0"
663 /* Utility/helper methods */
665 /* Write helper method */
666 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
668 writel(data, pp->base + offset);
671 /* Read helper method */
672 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
674 return readl(pp->base + offset);
677 /* Increment txq get counter */
678 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
680 txq->txq_get_index++;
681 if (txq->txq_get_index == txq->size)
682 txq->txq_get_index = 0;
685 /* Increment txq put counter */
686 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
688 txq->txq_put_index++;
689 if (txq->txq_put_index == txq->size)
690 txq->txq_put_index = 0;
694 /* Clear all MIB counters */
695 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
700 /* Perform dummy reads from MIB counters */
701 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
702 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
703 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
704 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
707 /* Get System Network Statistics */
709 mvneta_get_stats64(struct net_device *dev,
710 struct rtnl_link_stats64 *stats)
712 struct mvneta_port *pp = netdev_priv(dev);
716 for_each_possible_cpu(cpu) {
717 struct mvneta_pcpu_stats *cpu_stats;
725 cpu_stats = per_cpu_ptr(pp->stats, cpu);
727 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
728 rx_packets = cpu_stats->rx_packets;
729 rx_bytes = cpu_stats->rx_bytes;
730 rx_dropped = cpu_stats->rx_dropped;
731 rx_errors = cpu_stats->rx_errors;
732 tx_packets = cpu_stats->tx_packets;
733 tx_bytes = cpu_stats->tx_bytes;
734 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
736 stats->rx_packets += rx_packets;
737 stats->rx_bytes += rx_bytes;
738 stats->rx_dropped += rx_dropped;
739 stats->rx_errors += rx_errors;
740 stats->tx_packets += tx_packets;
741 stats->tx_bytes += tx_bytes;
744 stats->tx_dropped = dev->stats.tx_dropped;
747 /* Rx descriptors helper methods */
749 /* Checks whether the RX descriptor having this status is both the first
750 * and the last descriptor for the RX packet. Each RX packet is currently
751 * received through a single RX descriptor, so not having each RX
752 * descriptor with its first and last bits set is an error
754 static int mvneta_rxq_desc_is_first_last(u32 status)
756 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
757 MVNETA_RXD_FIRST_LAST_DESC;
760 /* Add number of descriptors ready to receive new packets */
761 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
762 struct mvneta_rx_queue *rxq,
765 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
768 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
769 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
770 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
771 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
772 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
775 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
776 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
779 /* Get number of RX descriptors occupied by received packets */
780 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
781 struct mvneta_rx_queue *rxq)
785 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
786 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
789 /* Update num of rx desc called upon return from rx path or
790 * from mvneta_rxq_drop_pkts().
792 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
793 struct mvneta_rx_queue *rxq,
794 int rx_done, int rx_filled)
798 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
800 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
801 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
805 /* Only 255 descriptors can be added at once */
806 while ((rx_done > 0) || (rx_filled > 0)) {
807 if (rx_done <= 0xff) {
814 if (rx_filled <= 0xff) {
815 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
818 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
821 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
825 /* Get pointer to next RX descriptor to be processed by SW */
826 static struct mvneta_rx_desc *
827 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
829 int rx_desc = rxq->next_desc_to_proc;
831 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
832 prefetch(rxq->descs + rxq->next_desc_to_proc);
833 return rxq->descs + rx_desc;
836 /* Change maximum receive size of the port. */
837 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
841 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
842 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
843 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
844 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
845 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
849 /* Set rx queue offset */
850 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
851 struct mvneta_rx_queue *rxq,
856 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
857 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
860 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
861 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
865 /* Tx descriptors helper methods */
867 /* Update HW with number of TX descriptors to be sent */
868 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
869 struct mvneta_tx_queue *txq,
874 pend_desc += txq->pending;
876 /* Only 255 Tx descriptors can be added at once */
878 val = min(pend_desc, 255);
879 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
881 } while (pend_desc > 0);
885 /* Get pointer to next TX descriptor to be processed (send) by HW */
886 static struct mvneta_tx_desc *
887 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
889 int tx_desc = txq->next_desc_to_proc;
891 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
892 return txq->descs + tx_desc;
895 /* Release the last allocated TX descriptor. Useful to handle DMA
896 * mapping failures in the TX path.
898 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
900 if (txq->next_desc_to_proc == 0)
901 txq->next_desc_to_proc = txq->last_desc - 1;
903 txq->next_desc_to_proc--;
906 /* Set rxq buf size */
907 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
908 struct mvneta_rx_queue *rxq,
913 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
915 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
916 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
918 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
921 /* Disable buffer management (BM) */
922 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
923 struct mvneta_rx_queue *rxq)
927 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
928 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
929 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
932 /* Enable buffer management (BM) */
933 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
934 struct mvneta_rx_queue *rxq)
938 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
939 val |= MVNETA_RXQ_HW_BUF_ALLOC;
940 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
943 /* Notify HW about port's assignment of pool for bigger packets */
944 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
945 struct mvneta_rx_queue *rxq)
949 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
950 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
951 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
953 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
956 /* Notify HW about port's assignment of pool for smaller packets */
957 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
958 struct mvneta_rx_queue *rxq)
962 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
963 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
964 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
966 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
969 /* Set port's receive buffer size for assigned BM pool */
970 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
976 if (!IS_ALIGNED(buf_size, 8)) {
977 dev_warn(pp->dev->dev.parent,
978 "illegal buf_size value %d, round to %d\n",
979 buf_size, ALIGN(buf_size, 8));
980 buf_size = ALIGN(buf_size, 8);
983 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
984 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
985 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
988 /* Configure MBUS window in order to enable access BM internal SRAM */
989 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
992 u32 win_enable, win_protect;
995 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
997 if (pp->bm_win_id < 0) {
998 /* Find first not occupied window */
999 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1000 if (win_enable & (1 << i)) {
1005 if (i == MVNETA_MAX_DECODE_WIN)
1011 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1012 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1015 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1017 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1018 (attr << 8) | target);
1020 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1022 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1023 win_protect |= 3 << (2 * i);
1024 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1026 win_enable &= ~(1 << i);
1027 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1032 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1038 /* Get BM window information */
1039 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1046 /* Open NETA -> BM window */
1047 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1050 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1056 /* Assign and initialize pools for port. In case of fail
1057 * buffer manager will remain disabled for current port.
1059 static int mvneta_bm_port_init(struct platform_device *pdev,
1060 struct mvneta_port *pp)
1062 struct device_node *dn = pdev->dev.of_node;
1063 u32 long_pool_id, short_pool_id;
1065 if (!pp->neta_armada3700) {
1068 ret = mvneta_bm_port_mbus_init(pp);
1073 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1074 netdev_info(pp->dev, "missing long pool id\n");
1078 /* Create port's long pool depending on mtu */
1079 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1080 MVNETA_BM_LONG, pp->id,
1081 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1082 if (!pp->pool_long) {
1083 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1087 pp->pool_long->port_map |= 1 << pp->id;
1089 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1092 /* If short pool id is not defined, assume using single pool */
1093 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1094 short_pool_id = long_pool_id;
1096 /* Create port's short pool */
1097 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1098 MVNETA_BM_SHORT, pp->id,
1099 MVNETA_BM_SHORT_PKT_SIZE);
1100 if (!pp->pool_short) {
1101 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1102 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1106 if (short_pool_id != long_pool_id) {
1107 pp->pool_short->port_map |= 1 << pp->id;
1108 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1109 pp->pool_short->id);
1115 /* Update settings of a pool for bigger packets */
1116 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1118 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1119 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1122 /* Release all buffers from long pool */
1123 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1124 if (hwbm_pool->buf_num) {
1125 WARN(1, "cannot free all buffers in pool %d\n",
1130 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1131 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1132 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1133 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1135 /* Fill entire long pool */
1136 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1137 if (num != hwbm_pool->size) {
1138 WARN(1, "pool %d: %d of %d allocated\n",
1139 bm_pool->id, num, hwbm_pool->size);
1142 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1147 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1148 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1151 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1152 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1155 /* Start the Ethernet port RX and TX activity */
1156 static void mvneta_port_up(struct mvneta_port *pp)
1161 /* Enable all initialized TXs. */
1163 for (queue = 0; queue < txq_number; queue++) {
1164 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1166 q_map |= (1 << queue);
1168 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1171 /* Enable all initialized RXQs. */
1172 for (queue = 0; queue < rxq_number; queue++) {
1173 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1176 q_map |= (1 << queue);
1178 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1181 /* Stop the Ethernet port activity */
1182 static void mvneta_port_down(struct mvneta_port *pp)
1187 /* Stop Rx port activity. Check port Rx activity. */
1188 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1190 /* Issue stop command for active channels only */
1192 mvreg_write(pp, MVNETA_RXQ_CMD,
1193 val << MVNETA_RXQ_DISABLE_SHIFT);
1195 /* Wait for all Rx activity to terminate. */
1198 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1199 netdev_warn(pp->dev,
1200 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1206 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1207 } while (val & MVNETA_RXQ_ENABLE_MASK);
1209 /* Stop Tx port activity. Check port Tx activity. Issue stop
1210 * command for active channels only
1212 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1215 mvreg_write(pp, MVNETA_TXQ_CMD,
1216 (val << MVNETA_TXQ_DISABLE_SHIFT));
1218 /* Wait for all Tx activity to terminate. */
1221 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1222 netdev_warn(pp->dev,
1223 "TIMEOUT for TX stopped status=0x%08x\n",
1229 /* Check TX Command reg that all Txqs are stopped */
1230 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1232 } while (val & MVNETA_TXQ_ENABLE_MASK);
1234 /* Double check to verify that TX FIFO is empty */
1237 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1238 netdev_warn(pp->dev,
1239 "TX FIFO empty timeout status=0x%08x\n",
1245 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1246 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1247 (val & MVNETA_TX_IN_PRGRS));
1252 /* Enable the port by setting the port enable bit of the MAC control register */
1253 static void mvneta_port_enable(struct mvneta_port *pp)
1258 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1259 val |= MVNETA_GMAC0_PORT_ENABLE;
1260 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1263 /* Disable the port and wait for about 200 usec before retuning */
1264 static void mvneta_port_disable(struct mvneta_port *pp)
1268 /* Reset the Enable bit in the Serial Control Register */
1269 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1270 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1271 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1276 /* Multicast tables methods */
1278 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1279 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1287 val = 0x1 | (queue << 1);
1288 val |= (val << 24) | (val << 16) | (val << 8);
1291 for (offset = 0; offset <= 0xc; offset += 4)
1292 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1295 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1296 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1304 val = 0x1 | (queue << 1);
1305 val |= (val << 24) | (val << 16) | (val << 8);
1308 for (offset = 0; offset <= 0xfc; offset += 4)
1309 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1313 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1314 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1320 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1323 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1324 val = 0x1 | (queue << 1);
1325 val |= (val << 24) | (val << 16) | (val << 8);
1328 for (offset = 0; offset <= 0xfc; offset += 4)
1329 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1332 static void mvneta_percpu_unmask_interrupt(void *arg)
1334 struct mvneta_port *pp = arg;
1336 /* All the queue are unmasked, but actually only the ones
1337 * mapped to this CPU will be unmasked
1339 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1340 MVNETA_RX_INTR_MASK_ALL |
1341 MVNETA_TX_INTR_MASK_ALL |
1342 MVNETA_MISCINTR_INTR_MASK);
1345 static void mvneta_percpu_mask_interrupt(void *arg)
1347 struct mvneta_port *pp = arg;
1349 /* All the queue are masked, but actually only the ones
1350 * mapped to this CPU will be masked
1352 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1353 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1354 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1357 static void mvneta_percpu_clear_intr_cause(void *arg)
1359 struct mvneta_port *pp = arg;
1361 /* All the queue are cleared, but actually only the ones
1362 * mapped to this CPU will be cleared
1364 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1365 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1366 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1369 /* This method sets defaults to the NETA port:
1370 * Clears interrupt Cause and Mask registers.
1371 * Clears all MAC tables.
1372 * Sets defaults to all registers.
1373 * Resets RX and TX descriptor rings.
1375 * This method can be called after mvneta_port_down() to return the port
1376 * settings to defaults.
1378 static void mvneta_defaults_set(struct mvneta_port *pp)
1383 int max_cpu = num_present_cpus();
1385 /* Clear all Cause registers */
1386 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1388 /* Mask all interrupts */
1389 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1390 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1392 /* Enable MBUS Retry bit16 */
1393 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1395 /* Set CPU queue access map. CPUs are assigned to the RX and
1396 * TX queues modulo their number. If there is only one TX
1397 * queue then it is assigned to the CPU associated to the
1400 for_each_present_cpu(cpu) {
1401 int rxq_map = 0, txq_map = 0;
1403 if (!pp->neta_armada3700) {
1404 for (rxq = 0; rxq < rxq_number; rxq++)
1405 if ((rxq % max_cpu) == cpu)
1406 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1408 for (txq = 0; txq < txq_number; txq++)
1409 if ((txq % max_cpu) == cpu)
1410 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1412 /* With only one TX queue we configure a special case
1413 * which will allow to get all the irq on a single
1416 if (txq_number == 1)
1417 txq_map = (cpu == pp->rxq_def) ?
1418 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1421 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1422 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1425 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1428 /* Reset RX and TX DMAs */
1429 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1430 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1432 /* Disable Legacy WRR, Disable EJP, Release from reset */
1433 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1434 for (queue = 0; queue < txq_number; queue++) {
1435 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1436 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1439 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1440 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1442 /* Set Port Acceleration Mode */
1444 /* HW buffer management + legacy parser */
1445 val = MVNETA_ACC_MODE_EXT2;
1447 /* SW buffer management + legacy parser */
1448 val = MVNETA_ACC_MODE_EXT1;
1449 mvreg_write(pp, MVNETA_ACC_MODE, val);
1452 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1454 /* Update val of portCfg register accordingly with all RxQueue types */
1455 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1456 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1459 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1460 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1462 /* Build PORT_SDMA_CONFIG_REG */
1465 /* Default burst size */
1466 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1467 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1468 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1470 #if defined(__BIG_ENDIAN)
1471 val |= MVNETA_DESC_SWAP;
1474 /* Assign port SDMA configuration */
1475 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1477 /* Disable PHY polling in hardware, since we're using the
1478 * kernel phylib to do this.
1480 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1481 val &= ~MVNETA_PHY_POLLING_ENABLE;
1482 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1484 mvneta_set_ucast_table(pp, -1);
1485 mvneta_set_special_mcast_table(pp, -1);
1486 mvneta_set_other_mcast_table(pp, -1);
1488 /* Set port interrupt enable register - default enable all */
1489 mvreg_write(pp, MVNETA_INTR_ENABLE,
1490 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1491 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1493 mvneta_mib_counters_clear(pp);
1496 /* Set max sizes for tx queues */
1497 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1503 mtu = max_tx_size * 8;
1504 if (mtu > MVNETA_TX_MTU_MAX)
1505 mtu = MVNETA_TX_MTU_MAX;
1508 val = mvreg_read(pp, MVNETA_TX_MTU);
1509 val &= ~MVNETA_TX_MTU_MAX;
1511 mvreg_write(pp, MVNETA_TX_MTU, val);
1513 /* TX token size and all TXQs token size must be larger that MTU */
1514 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1516 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1519 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1521 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1523 for (queue = 0; queue < txq_number; queue++) {
1524 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1526 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1529 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1531 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1536 /* Set unicast address */
1537 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1540 unsigned int unicast_reg;
1541 unsigned int tbl_offset;
1542 unsigned int reg_offset;
1544 /* Locate the Unicast table entry */
1545 last_nibble = (0xf & last_nibble);
1547 /* offset from unicast tbl base */
1548 tbl_offset = (last_nibble / 4) * 4;
1550 /* offset within the above reg */
1551 reg_offset = last_nibble % 4;
1553 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1556 /* Clear accepts frame bit at specified unicast DA tbl entry */
1557 unicast_reg &= ~(0xff << (8 * reg_offset));
1559 unicast_reg &= ~(0xff << (8 * reg_offset));
1560 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1563 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1566 /* Set mac address */
1567 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1574 mac_l = (addr[4] << 8) | (addr[5]);
1575 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1576 (addr[2] << 8) | (addr[3] << 0);
1578 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1579 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1582 /* Accept frames of this address */
1583 mvneta_set_ucast_addr(pp, addr[5], queue);
1586 /* Set the number of packets that will be received before RX interrupt
1587 * will be generated by HW.
1589 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1590 struct mvneta_rx_queue *rxq, u32 value)
1592 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1593 value | MVNETA_RXQ_NON_OCCUPIED(0));
1596 /* Set the time delay in usec before RX interrupt will be generated by
1599 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1600 struct mvneta_rx_queue *rxq, u32 value)
1603 unsigned long clk_rate;
1605 clk_rate = clk_get_rate(pp->clk);
1606 val = (clk_rate / 1000000) * value;
1608 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1611 /* Set threshold for TX_DONE pkts coalescing */
1612 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1613 struct mvneta_tx_queue *txq, u32 value)
1617 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1619 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1620 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1622 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1625 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1626 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1627 u32 phys_addr, void *virt_addr,
1628 struct mvneta_rx_queue *rxq)
1632 rx_desc->buf_phys_addr = phys_addr;
1633 i = rx_desc - rxq->descs;
1634 rxq->buf_virt_addr[i] = virt_addr;
1637 /* Decrement sent descriptors counter */
1638 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1639 struct mvneta_tx_queue *txq,
1644 /* Only 255 TX descriptors can be updated at once */
1645 while (sent_desc > 0xff) {
1646 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1647 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1648 sent_desc = sent_desc - 0xff;
1651 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1652 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1655 /* Get number of TX descriptors already sent by HW */
1656 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1657 struct mvneta_tx_queue *txq)
1662 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1663 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1664 MVNETA_TXQ_SENT_DESC_SHIFT;
1669 /* Get number of sent descriptors and decrement counter.
1670 * The number of sent descriptors is returned.
1672 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1673 struct mvneta_tx_queue *txq)
1677 /* Get number of sent descriptors */
1678 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1680 /* Decrement sent descriptors counter */
1682 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1687 /* Set TXQ descriptors fields relevant for CSUM calculation */
1688 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1689 int ip_hdr_len, int l4_proto)
1693 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1694 * G_L4_chk, L4_type; required only for checksum
1697 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1698 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1700 if (l3_proto == htons(ETH_P_IP))
1701 command |= MVNETA_TXD_IP_CSUM;
1703 command |= MVNETA_TX_L3_IP6;
1705 if (l4_proto == IPPROTO_TCP)
1706 command |= MVNETA_TX_L4_CSUM_FULL;
1707 else if (l4_proto == IPPROTO_UDP)
1708 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1710 command |= MVNETA_TX_L4_CSUM_NOT;
1716 /* Display more error info */
1717 static void mvneta_rx_error(struct mvneta_port *pp,
1718 struct mvneta_rx_desc *rx_desc)
1720 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1721 u32 status = rx_desc->status;
1723 /* update per-cpu counter */
1724 u64_stats_update_begin(&stats->syncp);
1726 u64_stats_update_end(&stats->syncp);
1728 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1729 case MVNETA_RXD_ERR_CRC:
1730 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1731 status, rx_desc->data_size);
1733 case MVNETA_RXD_ERR_OVERRUN:
1734 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1735 status, rx_desc->data_size);
1737 case MVNETA_RXD_ERR_LEN:
1738 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1739 status, rx_desc->data_size);
1741 case MVNETA_RXD_ERR_RESOURCE:
1742 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1743 status, rx_desc->data_size);
1748 /* Handle RX checksum offload based on the descriptor's status */
1749 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1750 struct sk_buff *skb)
1752 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1753 (status & MVNETA_RXD_L3_IP4) &&
1754 (status & MVNETA_RXD_L4_CSUM_OK)) {
1756 skb->ip_summed = CHECKSUM_UNNECESSARY;
1760 skb->ip_summed = CHECKSUM_NONE;
1763 /* Return tx queue pointer (find last set bit) according to <cause> returned
1764 * form tx_done reg. <cause> must not be null. The return value is always a
1765 * valid queue for matching the first one found in <cause>.
1767 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1770 int queue = fls(cause) - 1;
1772 return &pp->txqs[queue];
1775 /* Free tx queue skbuffs */
1776 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1777 struct mvneta_tx_queue *txq, int num,
1778 struct netdev_queue *nq)
1780 unsigned int bytes_compl = 0, pkts_compl = 0;
1783 for (i = 0; i < num; i++) {
1784 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1785 struct mvneta_tx_desc *tx_desc = txq->descs +
1788 mvneta_txq_inc_get(txq);
1790 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1791 dma_unmap_single(pp->dev->dev.parent,
1792 tx_desc->buf_phys_addr,
1793 tx_desc->data_size, DMA_TO_DEVICE);
1797 bytes_compl += buf->skb->len;
1799 dev_kfree_skb_any(buf->skb);
1802 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1805 /* Handle end of transmission */
1806 static void mvneta_txq_done(struct mvneta_port *pp,
1807 struct mvneta_tx_queue *txq)
1809 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1812 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1816 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1818 txq->count -= tx_done;
1820 if (netif_tx_queue_stopped(nq)) {
1821 if (txq->count <= txq->tx_wake_threshold)
1822 netif_tx_wake_queue(nq);
1826 /* Refill processing for SW buffer management */
1827 /* Allocate page per descriptor */
1828 static int mvneta_rx_refill(struct mvneta_port *pp,
1829 struct mvneta_rx_desc *rx_desc,
1830 struct mvneta_rx_queue *rxq,
1833 dma_addr_t phys_addr;
1836 page = __dev_alloc_page(gfp_mask);
1840 /* map page for use */
1841 phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1843 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1848 phys_addr += pp->rx_offset_correction;
1849 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1853 /* Handle tx checksum */
1854 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1856 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1858 __be16 l3_proto = vlan_get_protocol(skb);
1861 if (l3_proto == htons(ETH_P_IP)) {
1862 struct iphdr *ip4h = ip_hdr(skb);
1864 /* Calculate IPv4 checksum and L4 checksum */
1865 ip_hdr_len = ip4h->ihl;
1866 l4_proto = ip4h->protocol;
1867 } else if (l3_proto == htons(ETH_P_IPV6)) {
1868 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1870 /* Read l4_protocol from one of IPv6 extra headers */
1871 if (skb_network_header_len(skb) > 0)
1872 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1873 l4_proto = ip6h->nexthdr;
1875 return MVNETA_TX_L4_CSUM_NOT;
1877 return mvneta_txq_desc_csum(skb_network_offset(skb),
1878 l3_proto, ip_hdr_len, l4_proto);
1881 return MVNETA_TX_L4_CSUM_NOT;
1884 /* Drop packets received by the RXQ and free buffers */
1885 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1886 struct mvneta_rx_queue *rxq)
1890 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1892 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1895 for (i = 0; i < rx_done; i++) {
1896 struct mvneta_rx_desc *rx_desc =
1897 mvneta_rxq_next_desc_get(rxq);
1898 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1899 struct mvneta_bm_pool *bm_pool;
1901 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1902 /* Return dropped buffer to the pool */
1903 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1904 rx_desc->buf_phys_addr);
1909 for (i = 0; i < rxq->size; i++) {
1910 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1911 void *data = rxq->buf_virt_addr[i];
1912 if (!data || !(rx_desc->buf_phys_addr))
1915 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1916 PAGE_SIZE, DMA_FROM_DEVICE);
1922 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1924 struct mvneta_rx_desc *rx_desc;
1925 int curr_desc = rxq->first_to_refill;
1928 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1929 rx_desc = rxq->descs + curr_desc;
1930 if (!(rx_desc->buf_phys_addr)) {
1931 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
1932 pr_err("Can't refill queue %d. Done %d from %d\n",
1933 rxq->id, i, rxq->refill_num);
1938 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1940 rxq->refill_num -= i;
1941 rxq->first_to_refill = curr_desc;
1946 /* Main rx processing when using software buffer management */
1947 static int mvneta_rx_swbm(struct napi_struct *napi,
1948 struct mvneta_port *pp, int budget,
1949 struct mvneta_rx_queue *rxq)
1951 struct net_device *dev = pp->dev;
1952 int rx_todo, rx_proc;
1957 /* Get number of received packets */
1958 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
1961 /* Fairness NAPI loop */
1962 while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
1963 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1964 unsigned char *data;
1966 dma_addr_t phys_addr;
1967 u32 rx_status, index;
1968 int rx_bytes, skb_size, copy_size;
1969 int frag_num, frag_size, frag_offset;
1971 index = rx_desc - rxq->descs;
1972 page = (struct page *)rxq->buf_virt_addr[index];
1973 data = page_address(page);
1974 /* Prefetch header */
1977 phys_addr = rx_desc->buf_phys_addr;
1978 rx_status = rx_desc->status;
1982 if (rx_status & MVNETA_RXD_FIRST_DESC) {
1983 /* Check errors only for FIRST descriptor */
1984 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
1985 mvneta_rx_error(pp, rx_desc);
1986 /* leave the descriptor untouched */
1989 rx_bytes = rx_desc->data_size -
1990 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1992 /* Allocate small skb for each new packet */
1993 skb_size = max(rx_copybreak, rx_header_size);
1994 rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
1995 if (unlikely(!rxq->skb)) {
1996 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1999 "Can't allocate skb on queue %d\n",
2002 rxq->skb_alloc_err++;
2004 u64_stats_update_begin(&stats->syncp);
2005 stats->rx_dropped++;
2006 u64_stats_update_end(&stats->syncp);
2009 copy_size = min(skb_size, rx_bytes);
2011 /* Copy data from buffer to SKB, skip Marvell header */
2012 memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
2014 skb_put(rxq->skb, copy_size);
2015 rxq->left_size = rx_bytes - copy_size;
2017 mvneta_rx_csum(pp, rx_status, rxq->skb);
2018 if (rxq->left_size == 0) {
2019 int size = copy_size + MVNETA_MH_SIZE;
2021 dma_sync_single_range_for_cpu(dev->dev.parent,
2026 /* leave the descriptor and buffer untouched */
2028 /* refill descriptor with new buffer later */
2029 rx_desc->buf_phys_addr = 0;
2032 frag_offset = copy_size + MVNETA_MH_SIZE;
2033 frag_size = min(rxq->left_size,
2034 (int)(PAGE_SIZE - frag_offset));
2035 skb_add_rx_frag(rxq->skb, frag_num, page,
2036 frag_offset, frag_size,
2038 dma_unmap_page(dev->dev.parent, phys_addr,
2039 PAGE_SIZE, DMA_FROM_DEVICE);
2040 rxq->left_size -= frag_size;
2043 /* Middle or Last descriptor */
2044 if (unlikely(!rxq->skb)) {
2045 pr_debug("no skb for rx_status 0x%x\n",
2049 if (!rxq->left_size) {
2050 /* last descriptor has only FCS */
2051 /* and can be discarded */
2052 dma_sync_single_range_for_cpu(dev->dev.parent,
2056 /* leave the descriptor and buffer untouched */
2058 /* refill descriptor with new buffer later */
2059 rx_desc->buf_phys_addr = 0;
2061 frag_num = skb_shinfo(rxq->skb)->nr_frags;
2063 frag_size = min(rxq->left_size,
2064 (int)(PAGE_SIZE - frag_offset));
2065 skb_add_rx_frag(rxq->skb, frag_num, page,
2066 frag_offset, frag_size,
2069 dma_unmap_page(dev->dev.parent, phys_addr,
2070 PAGE_SIZE, DMA_FROM_DEVICE);
2072 rxq->left_size -= frag_size;
2074 } /* Middle or Last descriptor */
2076 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2077 /* no last descriptor this time */
2080 if (rxq->left_size) {
2081 pr_err("get last desc, but left_size (%d) != 0\n",
2083 dev_kfree_skb_any(rxq->skb);
2089 rcvd_bytes += rxq->skb->len;
2091 /* Linux processing */
2092 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2094 if (dev->features & NETIF_F_GRO)
2095 napi_gro_receive(napi, rxq->skb);
2097 netif_receive_skb(rxq->skb);
2099 /* clean uncomplete skb pointer in queue */
2105 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2107 u64_stats_update_begin(&stats->syncp);
2108 stats->rx_packets += rcvd_pkts;
2109 stats->rx_bytes += rcvd_bytes;
2110 u64_stats_update_end(&stats->syncp);
2113 /* return some buffers to hardware queue, one at a time is too slow */
2114 refill = mvneta_rx_refill_queue(pp, rxq);
2116 /* Update rxq management counters */
2117 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2122 /* Main rx processing when using hardware buffer management */
2123 static int mvneta_rx_hwbm(struct napi_struct *napi,
2124 struct mvneta_port *pp, int rx_todo,
2125 struct mvneta_rx_queue *rxq)
2127 struct net_device *dev = pp->dev;
2132 /* Get number of received packets */
2133 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2135 if (rx_todo > rx_done)
2140 /* Fairness NAPI loop */
2141 while (rx_done < rx_todo) {
2142 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2143 struct mvneta_bm_pool *bm_pool = NULL;
2144 struct sk_buff *skb;
2145 unsigned char *data;
2146 dma_addr_t phys_addr;
2147 u32 rx_status, frag_size;
2152 rx_status = rx_desc->status;
2153 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2154 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2155 phys_addr = rx_desc->buf_phys_addr;
2156 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2157 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2159 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2160 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2161 err_drop_frame_ret_pool:
2162 /* Return the buffer to the pool */
2163 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2164 rx_desc->buf_phys_addr);
2166 mvneta_rx_error(pp, rx_desc);
2167 /* leave the descriptor untouched */
2171 if (rx_bytes <= rx_copybreak) {
2172 /* better copy a small frame and not unmap the DMA region */
2173 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2175 goto err_drop_frame_ret_pool;
2177 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2178 rx_desc->buf_phys_addr,
2179 MVNETA_MH_SIZE + NET_SKB_PAD,
2182 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2185 skb->protocol = eth_type_trans(skb, dev);
2186 mvneta_rx_csum(pp, rx_status, skb);
2187 napi_gro_receive(napi, skb);
2190 rcvd_bytes += rx_bytes;
2192 /* Return the buffer to the pool */
2193 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2194 rx_desc->buf_phys_addr);
2196 /* leave the descriptor and buffer untouched */
2200 /* Refill processing */
2201 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2203 netdev_err(dev, "Linux processing - Can't refill\n");
2205 goto err_drop_frame_ret_pool;
2208 frag_size = bm_pool->hwbm_pool.frag_size;
2210 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2212 /* After refill old buffer has to be unmapped regardless
2213 * the skb is successfully built or not.
2215 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2216 bm_pool->buf_size, DMA_FROM_DEVICE);
2218 goto err_drop_frame;
2221 rcvd_bytes += rx_bytes;
2223 /* Linux processing */
2224 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2225 skb_put(skb, rx_bytes);
2227 skb->protocol = eth_type_trans(skb, dev);
2229 mvneta_rx_csum(pp, rx_status, skb);
2231 napi_gro_receive(napi, skb);
2235 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2237 u64_stats_update_begin(&stats->syncp);
2238 stats->rx_packets += rcvd_pkts;
2239 stats->rx_bytes += rcvd_bytes;
2240 u64_stats_update_end(&stats->syncp);
2243 /* Update rxq management counters */
2244 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2250 mvneta_tso_put_hdr(struct sk_buff *skb,
2251 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2253 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2254 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2255 struct mvneta_tx_desc *tx_desc;
2257 tx_desc = mvneta_txq_next_desc_get(txq);
2258 tx_desc->data_size = hdr_len;
2259 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2260 tx_desc->command |= MVNETA_TXD_F_DESC;
2261 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2262 txq->txq_put_index * TSO_HEADER_SIZE;
2263 buf->type = MVNETA_TYPE_SKB;
2266 mvneta_txq_inc_put(txq);
2270 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2271 struct sk_buff *skb, char *data, int size,
2272 bool last_tcp, bool is_last)
2274 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2275 struct mvneta_tx_desc *tx_desc;
2277 tx_desc = mvneta_txq_next_desc_get(txq);
2278 tx_desc->data_size = size;
2279 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2280 size, DMA_TO_DEVICE);
2281 if (unlikely(dma_mapping_error(dev->dev.parent,
2282 tx_desc->buf_phys_addr))) {
2283 mvneta_txq_desc_put(txq);
2287 tx_desc->command = 0;
2288 buf->type = MVNETA_TYPE_SKB;
2292 /* last descriptor in the TCP packet */
2293 tx_desc->command = MVNETA_TXD_L_DESC;
2295 /* last descriptor in SKB */
2299 mvneta_txq_inc_put(txq);
2303 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2304 struct mvneta_tx_queue *txq)
2306 int total_len, data_left;
2308 struct mvneta_port *pp = netdev_priv(dev);
2310 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2313 /* Count needed descriptors */
2314 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2317 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2318 pr_info("*** Is this even possible???!?!?\n");
2322 /* Initialize the TSO handler, and prepare the first payload */
2323 tso_start(skb, &tso);
2325 total_len = skb->len - hdr_len;
2326 while (total_len > 0) {
2329 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2330 total_len -= data_left;
2333 /* prepare packet headers: MAC + IP + TCP */
2334 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2335 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2337 mvneta_tso_put_hdr(skb, pp, txq);
2339 while (data_left > 0) {
2343 size = min_t(int, tso.size, data_left);
2345 if (mvneta_tso_put_data(dev, txq, skb,
2352 tso_build_data(skb, &tso, size);
2359 /* Release all used data descriptors; header descriptors must not
2362 for (i = desc_count - 1; i >= 0; i--) {
2363 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2364 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2365 dma_unmap_single(pp->dev->dev.parent,
2366 tx_desc->buf_phys_addr,
2369 mvneta_txq_desc_put(txq);
2374 /* Handle tx fragmentation processing */
2375 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2376 struct mvneta_tx_queue *txq)
2378 struct mvneta_tx_desc *tx_desc;
2379 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2381 for (i = 0; i < nr_frags; i++) {
2382 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2383 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2384 void *addr = page_address(frag->page.p) + frag->page_offset;
2386 tx_desc = mvneta_txq_next_desc_get(txq);
2387 tx_desc->data_size = frag->size;
2389 tx_desc->buf_phys_addr =
2390 dma_map_single(pp->dev->dev.parent, addr,
2391 tx_desc->data_size, DMA_TO_DEVICE);
2393 if (dma_mapping_error(pp->dev->dev.parent,
2394 tx_desc->buf_phys_addr)) {
2395 mvneta_txq_desc_put(txq);
2399 if (i == nr_frags - 1) {
2400 /* Last descriptor */
2401 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2404 /* Descriptor in the middle: Not First, Not Last */
2405 tx_desc->command = 0;
2408 buf->type = MVNETA_TYPE_SKB;
2409 mvneta_txq_inc_put(txq);
2415 /* Release all descriptors that were used to map fragments of
2416 * this packet, as well as the corresponding DMA mappings
2418 for (i = i - 1; i >= 0; i--) {
2419 tx_desc = txq->descs + i;
2420 dma_unmap_single(pp->dev->dev.parent,
2421 tx_desc->buf_phys_addr,
2424 mvneta_txq_desc_put(txq);
2430 /* Main tx processing */
2431 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2433 struct mvneta_port *pp = netdev_priv(dev);
2434 u16 txq_id = skb_get_queue_mapping(skb);
2435 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2436 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2437 struct mvneta_tx_desc *tx_desc;
2442 if (!netif_running(dev))
2445 if (skb_is_gso(skb)) {
2446 frags = mvneta_tx_tso(skb, dev, txq);
2450 frags = skb_shinfo(skb)->nr_frags + 1;
2452 /* Get a descriptor for the first part of the packet */
2453 tx_desc = mvneta_txq_next_desc_get(txq);
2455 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2457 tx_desc->data_size = skb_headlen(skb);
2459 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2462 if (unlikely(dma_mapping_error(dev->dev.parent,
2463 tx_desc->buf_phys_addr))) {
2464 mvneta_txq_desc_put(txq);
2469 buf->type = MVNETA_TYPE_SKB;
2471 /* First and Last descriptor */
2472 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2473 tx_desc->command = tx_cmd;
2475 mvneta_txq_inc_put(txq);
2477 /* First but not Last */
2478 tx_cmd |= MVNETA_TXD_F_DESC;
2480 mvneta_txq_inc_put(txq);
2481 tx_desc->command = tx_cmd;
2482 /* Continue with other skb fragments */
2483 if (mvneta_tx_frag_process(pp, skb, txq)) {
2484 dma_unmap_single(dev->dev.parent,
2485 tx_desc->buf_phys_addr,
2488 mvneta_txq_desc_put(txq);
2496 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2497 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2499 netdev_tx_sent_queue(nq, len);
2501 txq->count += frags;
2502 if (txq->count >= txq->tx_stop_threshold)
2503 netif_tx_stop_queue(nq);
2505 if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2506 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2507 mvneta_txq_pend_desc_add(pp, txq, frags);
2509 txq->pending += frags;
2511 u64_stats_update_begin(&stats->syncp);
2512 stats->tx_packets++;
2513 stats->tx_bytes += len;
2514 u64_stats_update_end(&stats->syncp);
2516 dev->stats.tx_dropped++;
2517 dev_kfree_skb_any(skb);
2520 return NETDEV_TX_OK;
2524 /* Free tx resources, when resetting a port */
2525 static void mvneta_txq_done_force(struct mvneta_port *pp,
2526 struct mvneta_tx_queue *txq)
2529 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2530 int tx_done = txq->count;
2532 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2536 txq->txq_put_index = 0;
2537 txq->txq_get_index = 0;
2540 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2541 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2543 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2545 struct mvneta_tx_queue *txq;
2546 struct netdev_queue *nq;
2548 while (cause_tx_done) {
2549 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2551 nq = netdev_get_tx_queue(pp->dev, txq->id);
2552 __netif_tx_lock(nq, smp_processor_id());
2555 mvneta_txq_done(pp, txq);
2557 __netif_tx_unlock(nq);
2558 cause_tx_done &= ~((1 << txq->id));
2562 /* Compute crc8 of the specified address, using a unique algorithm ,
2563 * according to hw spec, different than generic crc8 algorithm
2565 static int mvneta_addr_crc(unsigned char *addr)
2570 for (i = 0; i < ETH_ALEN; i++) {
2573 crc = (crc ^ addr[i]) << 8;
2574 for (j = 7; j >= 0; j--) {
2575 if (crc & (0x100 << j))
2583 /* This method controls the net device special MAC multicast support.
2584 * The Special Multicast Table for MAC addresses supports MAC of the form
2585 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2586 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2587 * Table entries in the DA-Filter table. This method set the Special
2588 * Multicast Table appropriate entry.
2590 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2591 unsigned char last_byte,
2594 unsigned int smc_table_reg;
2595 unsigned int tbl_offset;
2596 unsigned int reg_offset;
2598 /* Register offset from SMC table base */
2599 tbl_offset = (last_byte / 4);
2600 /* Entry offset within the above reg */
2601 reg_offset = last_byte % 4;
2603 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2607 smc_table_reg &= ~(0xff << (8 * reg_offset));
2609 smc_table_reg &= ~(0xff << (8 * reg_offset));
2610 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2613 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2617 /* This method controls the network device Other MAC multicast support.
2618 * The Other Multicast Table is used for multicast of another type.
2619 * A CRC-8 is used as an index to the Other Multicast Table entries
2620 * in the DA-Filter table.
2621 * The method gets the CRC-8 value from the calling routine and
2622 * sets the Other Multicast Table appropriate entry according to the
2625 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2629 unsigned int omc_table_reg;
2630 unsigned int tbl_offset;
2631 unsigned int reg_offset;
2633 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2634 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2636 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2639 /* Clear accepts frame bit at specified Other DA table entry */
2640 omc_table_reg &= ~(0xff << (8 * reg_offset));
2642 omc_table_reg &= ~(0xff << (8 * reg_offset));
2643 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2646 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2649 /* The network device supports multicast using two tables:
2650 * 1) Special Multicast Table for MAC addresses of the form
2651 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2652 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2653 * Table entries in the DA-Filter table.
2654 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2655 * is used as an index to the Other Multicast Table entries in the
2658 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2661 unsigned char crc_result = 0;
2663 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2664 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2668 crc_result = mvneta_addr_crc(p_addr);
2670 if (pp->mcast_count[crc_result] == 0) {
2671 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2676 pp->mcast_count[crc_result]--;
2677 if (pp->mcast_count[crc_result] != 0) {
2678 netdev_info(pp->dev,
2679 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2680 pp->mcast_count[crc_result], crc_result);
2684 pp->mcast_count[crc_result]++;
2686 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2691 /* Configure Fitering mode of Ethernet port */
2692 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2695 u32 port_cfg_reg, val;
2697 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2699 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2701 /* Set / Clear UPM bit in port configuration register */
2703 /* Accept all Unicast addresses */
2704 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2705 val |= MVNETA_FORCE_UNI;
2706 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2707 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2709 /* Reject all Unicast addresses */
2710 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2711 val &= ~MVNETA_FORCE_UNI;
2714 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2715 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2718 /* register unicast and multicast addresses */
2719 static void mvneta_set_rx_mode(struct net_device *dev)
2721 struct mvneta_port *pp = netdev_priv(dev);
2722 struct netdev_hw_addr *ha;
2724 if (dev->flags & IFF_PROMISC) {
2725 /* Accept all: Multicast + Unicast */
2726 mvneta_rx_unicast_promisc_set(pp, 1);
2727 mvneta_set_ucast_table(pp, pp->rxq_def);
2728 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2729 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2731 /* Accept single Unicast */
2732 mvneta_rx_unicast_promisc_set(pp, 0);
2733 mvneta_set_ucast_table(pp, -1);
2734 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2736 if (dev->flags & IFF_ALLMULTI) {
2737 /* Accept all multicast */
2738 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2739 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2741 /* Accept only initialized multicast */
2742 mvneta_set_special_mcast_table(pp, -1);
2743 mvneta_set_other_mcast_table(pp, -1);
2745 if (!netdev_mc_empty(dev)) {
2746 netdev_for_each_mc_addr(ha, dev) {
2747 mvneta_mcast_addr_set(pp, ha->addr,
2755 /* Interrupt handling - the callback for request_irq() */
2756 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2758 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2760 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2761 napi_schedule(&pp->napi);
2766 /* Interrupt handling - the callback for request_percpu_irq() */
2767 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2769 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2771 disable_percpu_irq(port->pp->dev->irq);
2772 napi_schedule(&port->napi);
2777 static void mvneta_link_change(struct mvneta_port *pp)
2779 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2781 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2785 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2786 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2787 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2788 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2789 * Each CPU has its own causeRxTx register
2791 static int mvneta_poll(struct napi_struct *napi, int budget)
2796 struct mvneta_port *pp = netdev_priv(napi->dev);
2797 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2799 if (!netif_running(pp->dev)) {
2800 napi_complete(napi);
2804 /* Read cause register */
2805 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2806 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2807 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2809 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2811 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2812 MVNETA_CAUSE_LINK_CHANGE))
2813 mvneta_link_change(pp);
2816 /* Release Tx descriptors */
2817 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2818 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2819 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2822 /* For the case where the last mvneta_poll did not process all
2825 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2828 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2830 rx_queue = rx_queue - 1;
2832 rx_done = mvneta_rx_hwbm(napi, pp, budget,
2833 &pp->rxqs[rx_queue]);
2835 rx_done = mvneta_rx_swbm(napi, pp, budget,
2836 &pp->rxqs[rx_queue]);
2839 if (rx_done < budget) {
2841 napi_complete_done(napi, rx_done);
2843 if (pp->neta_armada3700) {
2844 unsigned long flags;
2846 local_irq_save(flags);
2847 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2848 MVNETA_RX_INTR_MASK(rxq_number) |
2849 MVNETA_TX_INTR_MASK(txq_number) |
2850 MVNETA_MISCINTR_INTR_MASK);
2851 local_irq_restore(flags);
2853 enable_percpu_irq(pp->dev->irq, 0);
2857 if (pp->neta_armada3700)
2858 pp->cause_rx_tx = cause_rx_tx;
2860 port->cause_rx_tx = cause_rx_tx;
2865 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2866 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2871 for (i = 0; i < num; i++) {
2872 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2873 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2876 "%s:rxq %d, %d of %d buffs filled\n",
2877 __func__, rxq->id, i, num);
2882 /* Add this number of RX descriptors as non occupied (ready to
2885 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2890 /* Free all packets pending transmit from all TXQs and reset TX port */
2891 static void mvneta_tx_reset(struct mvneta_port *pp)
2895 /* free the skb's in the tx ring */
2896 for (queue = 0; queue < txq_number; queue++)
2897 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2899 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2900 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2903 static void mvneta_rx_reset(struct mvneta_port *pp)
2905 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2906 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2909 /* Rx/Tx queue initialization/cleanup methods */
2911 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2912 struct mvneta_rx_queue *rxq)
2914 rxq->size = pp->rx_ring_size;
2916 /* Allocate memory for RX descriptors */
2917 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2918 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2919 &rxq->descs_phys, GFP_KERNEL);
2923 rxq->last_desc = rxq->size - 1;
2928 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2929 struct mvneta_rx_queue *rxq)
2931 /* Set Rx descriptors queue starting address */
2932 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2933 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2935 /* Set coalescing pkts and time */
2936 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2937 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2941 mvneta_rxq_offset_set(pp, rxq, 0);
2942 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
2944 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2945 mvneta_rxq_bm_disable(pp, rxq);
2946 mvneta_rxq_fill(pp, rxq, rxq->size);
2949 mvneta_rxq_offset_set(pp, rxq,
2950 NET_SKB_PAD - pp->rx_offset_correction);
2952 mvneta_rxq_bm_enable(pp, rxq);
2953 /* Fill RXQ with buffers from RX pool */
2954 mvneta_rxq_long_pool_set(pp, rxq);
2955 mvneta_rxq_short_pool_set(pp, rxq);
2956 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2960 /* Create a specified RX queue */
2961 static int mvneta_rxq_init(struct mvneta_port *pp,
2962 struct mvneta_rx_queue *rxq)
2967 ret = mvneta_rxq_sw_init(pp, rxq);
2971 mvneta_rxq_hw_init(pp, rxq);
2976 /* Cleanup Rx queue */
2977 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2978 struct mvneta_rx_queue *rxq)
2980 mvneta_rxq_drop_pkts(pp, rxq);
2983 dev_kfree_skb_any(rxq->skb);
2986 dma_free_coherent(pp->dev->dev.parent,
2987 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2993 rxq->next_desc_to_proc = 0;
2994 rxq->descs_phys = 0;
2995 rxq->first_to_refill = 0;
2996 rxq->refill_num = 0;
3001 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3002 struct mvneta_tx_queue *txq)
3006 txq->size = pp->tx_ring_size;
3008 /* A queue must always have room for at least one skb.
3009 * Therefore, stop the queue when the free entries reaches
3010 * the maximum number of descriptors per skb.
3012 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3013 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3015 /* Allocate memory for TX descriptors */
3016 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3017 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3018 &txq->descs_phys, GFP_KERNEL);
3022 txq->last_desc = txq->size - 1;
3024 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3026 dma_free_coherent(pp->dev->dev.parent,
3027 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3028 txq->descs, txq->descs_phys);
3032 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3033 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3034 txq->size * TSO_HEADER_SIZE,
3035 &txq->tso_hdrs_phys, GFP_KERNEL);
3036 if (!txq->tso_hdrs) {
3038 dma_free_coherent(pp->dev->dev.parent,
3039 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3040 txq->descs, txq->descs_phys);
3044 /* Setup XPS mapping */
3045 if (pp->neta_armada3700)
3047 else if (txq_number > 1)
3048 cpu = txq->id % num_present_cpus();
3050 cpu = pp->rxq_def % num_present_cpus();
3051 cpumask_set_cpu(cpu, &txq->affinity_mask);
3052 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3057 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3058 struct mvneta_tx_queue *txq)
3060 /* Set maximum bandwidth for enabled TXQs */
3061 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3062 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3064 /* Set Tx descriptors queue starting address */
3065 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3066 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3068 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3071 /* Create and initialize a tx queue */
3072 static int mvneta_txq_init(struct mvneta_port *pp,
3073 struct mvneta_tx_queue *txq)
3077 ret = mvneta_txq_sw_init(pp, txq);
3081 mvneta_txq_hw_init(pp, txq);
3086 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3087 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3088 struct mvneta_tx_queue *txq)
3090 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3095 dma_free_coherent(pp->dev->dev.parent,
3096 txq->size * TSO_HEADER_SIZE,
3097 txq->tso_hdrs, txq->tso_hdrs_phys);
3099 dma_free_coherent(pp->dev->dev.parent,
3100 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3101 txq->descs, txq->descs_phys);
3103 netdev_tx_reset_queue(nq);
3107 txq->next_desc_to_proc = 0;
3108 txq->descs_phys = 0;
3111 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3112 struct mvneta_tx_queue *txq)
3114 /* Set minimum bandwidth for disabled TXQs */
3115 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3116 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3118 /* Set Tx descriptors queue starting address and size */
3119 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3120 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3123 static void mvneta_txq_deinit(struct mvneta_port *pp,
3124 struct mvneta_tx_queue *txq)
3126 mvneta_txq_sw_deinit(pp, txq);
3127 mvneta_txq_hw_deinit(pp, txq);
3130 /* Cleanup all Tx queues */
3131 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3135 for (queue = 0; queue < txq_number; queue++)
3136 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3139 /* Cleanup all Rx queues */
3140 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3144 for (queue = 0; queue < rxq_number; queue++)
3145 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3149 /* Init all Rx queues */
3150 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3154 for (queue = 0; queue < rxq_number; queue++) {
3155 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3158 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3160 mvneta_cleanup_rxqs(pp);
3168 /* Init all tx queues */
3169 static int mvneta_setup_txqs(struct mvneta_port *pp)
3173 for (queue = 0; queue < txq_number; queue++) {
3174 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3176 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3178 mvneta_cleanup_txqs(pp);
3186 static void mvneta_start_dev(struct mvneta_port *pp)
3190 mvneta_max_rx_size_set(pp, pp->pkt_size);
3191 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3193 /* start the Rx/Tx activity */
3194 mvneta_port_enable(pp);
3196 if (!pp->neta_armada3700) {
3197 /* Enable polling on the port */
3198 for_each_online_cpu(cpu) {
3199 struct mvneta_pcpu_port *port =
3200 per_cpu_ptr(pp->ports, cpu);
3202 napi_enable(&port->napi);
3205 napi_enable(&pp->napi);
3208 /* Unmask interrupts. It has to be done from each CPU */
3209 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3211 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3212 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3213 MVNETA_CAUSE_LINK_CHANGE);
3215 phylink_start(pp->phylink);
3216 netif_tx_start_all_queues(pp->dev);
3219 static void mvneta_stop_dev(struct mvneta_port *pp)
3223 phylink_stop(pp->phylink);
3225 if (!pp->neta_armada3700) {
3226 for_each_online_cpu(cpu) {
3227 struct mvneta_pcpu_port *port =
3228 per_cpu_ptr(pp->ports, cpu);
3230 napi_disable(&port->napi);
3233 napi_disable(&pp->napi);
3236 netif_carrier_off(pp->dev);
3238 mvneta_port_down(pp);
3239 netif_tx_stop_all_queues(pp->dev);
3241 /* Stop the port activity */
3242 mvneta_port_disable(pp);
3244 /* Clear all ethernet port interrupts */
3245 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3247 /* Mask all ethernet port interrupts */
3248 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3250 mvneta_tx_reset(pp);
3251 mvneta_rx_reset(pp);
3254 static void mvneta_percpu_enable(void *arg)
3256 struct mvneta_port *pp = arg;
3258 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3261 static void mvneta_percpu_disable(void *arg)
3263 struct mvneta_port *pp = arg;
3265 disable_percpu_irq(pp->dev->irq);
3268 /* Change the device mtu */
3269 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3271 struct mvneta_port *pp = netdev_priv(dev);
3274 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3275 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3276 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3277 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3282 if (!netif_running(dev)) {
3284 mvneta_bm_update_mtu(pp, mtu);
3286 netdev_update_features(dev);
3290 /* The interface is running, so we have to force a
3291 * reallocation of the queues
3293 mvneta_stop_dev(pp);
3294 on_each_cpu(mvneta_percpu_disable, pp, true);
3296 mvneta_cleanup_txqs(pp);
3297 mvneta_cleanup_rxqs(pp);
3300 mvneta_bm_update_mtu(pp, mtu);
3302 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3304 ret = mvneta_setup_rxqs(pp);
3306 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3310 ret = mvneta_setup_txqs(pp);
3312 netdev_err(dev, "unable to setup txqs after MTU change\n");
3316 on_each_cpu(mvneta_percpu_enable, pp, true);
3317 mvneta_start_dev(pp);
3319 netdev_update_features(dev);
3324 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3325 netdev_features_t features)
3327 struct mvneta_port *pp = netdev_priv(dev);
3329 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3330 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3332 "Disable IP checksum for MTU greater than %dB\n",
3339 /* Get mac address */
3340 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3342 u32 mac_addr_l, mac_addr_h;
3344 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3345 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3346 addr[0] = (mac_addr_h >> 24) & 0xFF;
3347 addr[1] = (mac_addr_h >> 16) & 0xFF;
3348 addr[2] = (mac_addr_h >> 8) & 0xFF;
3349 addr[3] = mac_addr_h & 0xFF;
3350 addr[4] = (mac_addr_l >> 8) & 0xFF;
3351 addr[5] = mac_addr_l & 0xFF;
3354 /* Handle setting mac address */
3355 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3357 struct mvneta_port *pp = netdev_priv(dev);
3358 struct sockaddr *sockaddr = addr;
3361 ret = eth_prepare_mac_addr_change(dev, addr);
3364 /* Remove previous address table entry */
3365 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3367 /* Set new addr in hw */
3368 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3370 eth_commit_mac_addr_change(dev, addr);
3374 static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3375 struct phylink_link_state *state)
3377 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3379 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3380 if (state->interface != PHY_INTERFACE_MODE_NA &&
3381 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3382 state->interface != PHY_INTERFACE_MODE_SGMII &&
3383 !phy_interface_mode_is_8023z(state->interface) &&
3384 !phy_interface_mode_is_rgmii(state->interface)) {
3385 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3389 /* Allow all the expected bits */
3390 phylink_set(mask, Autoneg);
3391 phylink_set_port_modes(mask);
3393 /* Asymmetric pause is unsupported */
3394 phylink_set(mask, Pause);
3395 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3396 phylink_set(mask, 1000baseT_Full);
3397 phylink_set(mask, 1000baseX_Full);
3399 if (!phy_interface_mode_is_8023z(state->interface)) {
3400 /* 10M and 100M are only supported in non-802.3z mode */
3401 phylink_set(mask, 10baseT_Half);
3402 phylink_set(mask, 10baseT_Full);
3403 phylink_set(mask, 100baseT_Half);
3404 phylink_set(mask, 100baseT_Full);
3407 bitmap_and(supported, supported, mask,
3408 __ETHTOOL_LINK_MODE_MASK_NBITS);
3409 bitmap_and(state->advertising, state->advertising, mask,
3410 __ETHTOOL_LINK_MODE_MASK_NBITS);
3413 static int mvneta_mac_link_state(struct net_device *ndev,
3414 struct phylink_link_state *state)
3416 struct mvneta_port *pp = netdev_priv(ndev);
3419 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3421 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3422 state->speed = SPEED_1000;
3423 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3424 state->speed = SPEED_100;
3426 state->speed = SPEED_10;
3428 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3429 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3430 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3433 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3434 state->pause |= MLO_PAUSE_RX;
3435 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3436 state->pause |= MLO_PAUSE_TX;
3441 static void mvneta_mac_an_restart(struct net_device *ndev)
3443 struct mvneta_port *pp = netdev_priv(ndev);
3444 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3446 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3447 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3448 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3449 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3452 static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
3453 const struct phylink_link_state *state)
3455 struct mvneta_port *pp = netdev_priv(ndev);
3456 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3457 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3458 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3459 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3461 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3462 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3463 MVNETA_GMAC2_PORT_RESET);
3464 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3465 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3466 MVNETA_GMAC_INBAND_RESTART_AN |
3467 MVNETA_GMAC_CONFIG_MII_SPEED |
3468 MVNETA_GMAC_CONFIG_GMII_SPEED |
3469 MVNETA_GMAC_AN_SPEED_EN |
3470 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3471 MVNETA_GMAC_CONFIG_FLOW_CTRL |
3472 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3473 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3474 MVNETA_GMAC_AN_DUPLEX_EN);
3476 /* Even though it might look weird, when we're configured in
3477 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3479 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3481 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3482 state->interface == PHY_INTERFACE_MODE_SGMII ||
3483 phy_interface_mode_is_8023z(state->interface))
3484 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3486 if (phylink_test(state->advertising, Pause))
3487 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3488 if (state->pause & MLO_PAUSE_TXRX_MASK)
3489 new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3491 if (!phylink_autoneg_inband(mode)) {
3492 /* Phy or fixed speed */
3494 new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3496 if (state->speed == SPEED_1000)
3497 new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3498 else if (state->speed == SPEED_100)
3499 new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
3500 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3501 /* SGMII mode receives the state from the PHY */
3502 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3503 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3504 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3505 MVNETA_GMAC_FORCE_LINK_PASS)) |
3506 MVNETA_GMAC_INBAND_AN_ENABLE |
3507 MVNETA_GMAC_AN_SPEED_EN |
3508 MVNETA_GMAC_AN_DUPLEX_EN;
3510 /* 802.3z negotiation - only 1000base-X */
3511 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3512 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3513 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3514 MVNETA_GMAC_FORCE_LINK_PASS)) |
3515 MVNETA_GMAC_INBAND_AN_ENABLE |
3516 MVNETA_GMAC_CONFIG_GMII_SPEED |
3517 /* The MAC only supports FD mode */
3518 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3520 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3521 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3524 /* Armada 370 documentation says we can only change the port mode
3525 * and in-band enable when the link is down, so force it down
3526 * while making these changes. We also do this for GMAC_CTRL2 */
3527 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3528 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3529 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3530 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3531 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3532 MVNETA_GMAC_FORCE_LINK_DOWN);
3535 if (new_ctrl0 != gmac_ctrl0)
3536 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3537 if (new_ctrl2 != gmac_ctrl2)
3538 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3539 if (new_clk != gmac_clk)
3540 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3541 if (new_an != gmac_an)
3542 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3544 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3545 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3546 MVNETA_GMAC2_PORT_RESET) != 0)
3551 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3555 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3557 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3559 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3560 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3563 static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
3564 phy_interface_t interface)
3566 struct mvneta_port *pp = netdev_priv(ndev);
3569 mvneta_port_down(pp);
3571 if (!phylink_autoneg_inband(mode)) {
3572 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3573 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3574 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3575 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3578 pp->eee_active = false;
3579 mvneta_set_eee(pp, false);
3582 static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
3583 phy_interface_t interface,
3584 struct phy_device *phy)
3586 struct mvneta_port *pp = netdev_priv(ndev);
3589 if (!phylink_autoneg_inband(mode)) {
3590 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3591 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3592 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3593 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3598 if (phy && pp->eee_enabled) {
3599 pp->eee_active = phy_init_eee(phy, 0) >= 0;
3600 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3604 static const struct phylink_mac_ops mvneta_phylink_ops = {
3605 .validate = mvneta_validate,
3606 .mac_link_state = mvneta_mac_link_state,
3607 .mac_an_restart = mvneta_mac_an_restart,
3608 .mac_config = mvneta_mac_config,
3609 .mac_link_down = mvneta_mac_link_down,
3610 .mac_link_up = mvneta_mac_link_up,
3613 static int mvneta_mdio_probe(struct mvneta_port *pp)
3615 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3616 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3619 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3621 phylink_ethtool_get_wol(pp->phylink, &wol);
3622 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3627 static void mvneta_mdio_remove(struct mvneta_port *pp)
3629 phylink_disconnect_phy(pp->phylink);
3632 /* Electing a CPU must be done in an atomic way: it should be done
3633 * after or before the removal/insertion of a CPU and this function is
3636 static void mvneta_percpu_elect(struct mvneta_port *pp)
3638 int elected_cpu = 0, max_cpu, cpu, i = 0;
3640 /* Use the cpu associated to the rxq when it is online, in all
3641 * the other cases, use the cpu 0 which can't be offline.
3643 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
3644 elected_cpu = pp->rxq_def;
3646 max_cpu = num_present_cpus();
3648 for_each_online_cpu(cpu) {
3649 int rxq_map = 0, txq_map = 0;
3652 for (rxq = 0; rxq < rxq_number; rxq++)
3653 if ((rxq % max_cpu) == cpu)
3654 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3656 if (cpu == elected_cpu)
3657 /* Map the default receive queue queue to the
3660 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3662 /* We update the TX queue map only if we have one
3663 * queue. In this case we associate the TX queue to
3664 * the CPU bound to the default RX queue
3666 if (txq_number == 1)
3667 txq_map = (cpu == elected_cpu) ?
3668 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3670 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3671 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3673 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3675 /* Update the interrupt mask on each CPU according the
3678 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3685 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3688 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3690 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3692 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
3693 * are routed to CPU 0, so we don't need all the cpu-hotplug support
3695 if (pp->neta_armada3700)
3698 spin_lock(&pp->lock);
3700 * Configuring the driver for a new CPU while the driver is
3701 * stopping is racy, so just avoid it.
3703 if (pp->is_stopped) {
3704 spin_unlock(&pp->lock);
3707 netif_tx_stop_all_queues(pp->dev);
3710 * We have to synchronise on tha napi of each CPU except the one
3711 * just being woken up
3713 for_each_online_cpu(other_cpu) {
3714 if (other_cpu != cpu) {
3715 struct mvneta_pcpu_port *other_port =
3716 per_cpu_ptr(pp->ports, other_cpu);
3718 napi_synchronize(&other_port->napi);
3722 /* Mask all ethernet port interrupts */
3723 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3724 napi_enable(&port->napi);
3727 * Enable per-CPU interrupts on the CPU that is
3730 mvneta_percpu_enable(pp);
3733 * Enable per-CPU interrupt on the one CPU we care
3736 mvneta_percpu_elect(pp);
3738 /* Unmask all ethernet port interrupts */
3739 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3740 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3741 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3742 MVNETA_CAUSE_LINK_CHANGE);
3743 netif_tx_start_all_queues(pp->dev);
3744 spin_unlock(&pp->lock);
3748 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3750 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3752 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3755 * Thanks to this lock we are sure that any pending cpu election is
3758 spin_lock(&pp->lock);
3759 /* Mask all ethernet port interrupts */
3760 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3761 spin_unlock(&pp->lock);
3763 napi_synchronize(&port->napi);
3764 napi_disable(&port->napi);
3765 /* Disable per-CPU interrupts on the CPU that is brought down. */
3766 mvneta_percpu_disable(pp);
3770 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3772 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3775 /* Check if a new CPU must be elected now this on is down */
3776 spin_lock(&pp->lock);
3777 mvneta_percpu_elect(pp);
3778 spin_unlock(&pp->lock);
3779 /* Unmask all ethernet port interrupts */
3780 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3781 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3782 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3783 MVNETA_CAUSE_LINK_CHANGE);
3784 netif_tx_start_all_queues(pp->dev);
3788 static int mvneta_open(struct net_device *dev)
3790 struct mvneta_port *pp = netdev_priv(dev);
3793 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3795 ret = mvneta_setup_rxqs(pp);
3799 ret = mvneta_setup_txqs(pp);
3801 goto err_cleanup_rxqs;
3803 /* Connect to port interrupt line */
3804 if (pp->neta_armada3700)
3805 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3808 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3809 dev->name, pp->ports);
3811 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3812 goto err_cleanup_txqs;
3815 if (!pp->neta_armada3700) {
3816 /* Enable per-CPU interrupt on all the CPU to handle our RX
3819 on_each_cpu(mvneta_percpu_enable, pp, true);
3821 pp->is_stopped = false;
3822 /* Register a CPU notifier to handle the case where our CPU
3823 * might be taken offline.
3825 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3830 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3833 goto err_free_online_hp;
3836 /* In default link is down */
3837 netif_carrier_off(pp->dev);
3839 ret = mvneta_mdio_probe(pp);
3841 netdev_err(dev, "cannot probe MDIO bus\n");
3842 goto err_free_dead_hp;
3845 mvneta_start_dev(pp);
3850 if (!pp->neta_armada3700)
3851 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3854 if (!pp->neta_armada3700)
3855 cpuhp_state_remove_instance_nocalls(online_hpstate,
3858 if (pp->neta_armada3700) {
3859 free_irq(pp->dev->irq, pp);
3861 on_each_cpu(mvneta_percpu_disable, pp, true);
3862 free_percpu_irq(pp->dev->irq, pp->ports);
3865 mvneta_cleanup_txqs(pp);
3867 mvneta_cleanup_rxqs(pp);
3871 /* Stop the port, free port interrupt line */
3872 static int mvneta_stop(struct net_device *dev)
3874 struct mvneta_port *pp = netdev_priv(dev);
3876 if (!pp->neta_armada3700) {
3877 /* Inform that we are stopping so we don't want to setup the
3878 * driver for new CPUs in the notifiers. The code of the
3879 * notifier for CPU online is protected by the same spinlock,
3880 * so when we get the lock, the notifer work is done.
3882 spin_lock(&pp->lock);
3883 pp->is_stopped = true;
3884 spin_unlock(&pp->lock);
3886 mvneta_stop_dev(pp);
3887 mvneta_mdio_remove(pp);
3889 cpuhp_state_remove_instance_nocalls(online_hpstate,
3891 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3893 on_each_cpu(mvneta_percpu_disable, pp, true);
3894 free_percpu_irq(dev->irq, pp->ports);
3896 mvneta_stop_dev(pp);
3897 mvneta_mdio_remove(pp);
3898 free_irq(dev->irq, pp);
3901 mvneta_cleanup_rxqs(pp);
3902 mvneta_cleanup_txqs(pp);
3907 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3909 struct mvneta_port *pp = netdev_priv(dev);
3911 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
3914 /* Ethtool methods */
3916 /* Set link ksettings (phy address, speed) for ethtools */
3918 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3919 const struct ethtool_link_ksettings *cmd)
3921 struct mvneta_port *pp = netdev_priv(ndev);
3923 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
3926 /* Get link ksettings for ethtools */
3928 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
3929 struct ethtool_link_ksettings *cmd)
3931 struct mvneta_port *pp = netdev_priv(ndev);
3933 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
3936 static int mvneta_ethtool_nway_reset(struct net_device *dev)
3938 struct mvneta_port *pp = netdev_priv(dev);
3940 return phylink_ethtool_nway_reset(pp->phylink);
3943 /* Set interrupt coalescing for ethtools */
3944 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3945 struct ethtool_coalesce *c)
3947 struct mvneta_port *pp = netdev_priv(dev);
3950 for (queue = 0; queue < rxq_number; queue++) {
3951 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3952 rxq->time_coal = c->rx_coalesce_usecs;
3953 rxq->pkts_coal = c->rx_max_coalesced_frames;
3954 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3955 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3958 for (queue = 0; queue < txq_number; queue++) {
3959 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3960 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3961 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3967 /* get coalescing for ethtools */
3968 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3969 struct ethtool_coalesce *c)
3971 struct mvneta_port *pp = netdev_priv(dev);
3973 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3974 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3976 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3981 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3982 struct ethtool_drvinfo *drvinfo)
3984 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3985 sizeof(drvinfo->driver));
3986 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3987 sizeof(drvinfo->version));
3988 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3989 sizeof(drvinfo->bus_info));
3993 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3994 struct ethtool_ringparam *ring)
3996 struct mvneta_port *pp = netdev_priv(netdev);
3998 ring->rx_max_pending = MVNETA_MAX_RXD;
3999 ring->tx_max_pending = MVNETA_MAX_TXD;
4000 ring->rx_pending = pp->rx_ring_size;
4001 ring->tx_pending = pp->tx_ring_size;
4004 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4005 struct ethtool_ringparam *ring)
4007 struct mvneta_port *pp = netdev_priv(dev);
4009 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4011 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4012 ring->rx_pending : MVNETA_MAX_RXD;
4014 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4015 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4016 if (pp->tx_ring_size != ring->tx_pending)
4017 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4018 pp->tx_ring_size, ring->tx_pending);
4020 if (netif_running(dev)) {
4022 if (mvneta_open(dev)) {
4024 "error on opening device after ring param change\n");
4032 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4033 struct ethtool_pauseparam *pause)
4035 struct mvneta_port *pp = netdev_priv(dev);
4037 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4040 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4041 struct ethtool_pauseparam *pause)
4043 struct mvneta_port *pp = netdev_priv(dev);
4045 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4048 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4051 if (sset == ETH_SS_STATS) {
4054 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4055 memcpy(data + i * ETH_GSTRING_LEN,
4056 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4060 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4062 const struct mvneta_statistic *s;
4063 void __iomem *base = pp->base;
4068 for (i = 0, s = mvneta_statistics;
4069 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4075 val = readl_relaxed(base + s->offset);
4078 /* Docs say to read low 32-bit then high */
4079 low = readl_relaxed(base + s->offset);
4080 high = readl_relaxed(base + s->offset + 4);
4081 val = (u64)high << 32 | low;
4084 switch (s->offset) {
4085 case ETHTOOL_STAT_EEE_WAKEUP:
4086 val = phylink_get_eee_err(pp->phylink);
4088 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4089 val = pp->rxqs[0].skb_alloc_err;
4091 case ETHTOOL_STAT_REFILL_ERR:
4092 val = pp->rxqs[0].refill_err;
4098 pp->ethtool_stats[i] += val;
4102 static void mvneta_ethtool_get_stats(struct net_device *dev,
4103 struct ethtool_stats *stats, u64 *data)
4105 struct mvneta_port *pp = netdev_priv(dev);
4108 mvneta_ethtool_update_stats(pp);
4110 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4111 *data++ = pp->ethtool_stats[i];
4114 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4116 if (sset == ETH_SS_STATS)
4117 return ARRAY_SIZE(mvneta_statistics);
4121 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4123 return MVNETA_RSS_LU_TABLE_SIZE;
4126 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4127 struct ethtool_rxnfc *info,
4128 u32 *rules __always_unused)
4130 switch (info->cmd) {
4131 case ETHTOOL_GRXRINGS:
4132 info->data = rxq_number;
4141 static int mvneta_config_rss(struct mvneta_port *pp)
4146 netif_tx_stop_all_queues(pp->dev);
4148 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4150 if (!pp->neta_armada3700) {
4151 /* We have to synchronise on the napi of each CPU */
4152 for_each_online_cpu(cpu) {
4153 struct mvneta_pcpu_port *pcpu_port =
4154 per_cpu_ptr(pp->ports, cpu);
4156 napi_synchronize(&pcpu_port->napi);
4157 napi_disable(&pcpu_port->napi);
4160 napi_synchronize(&pp->napi);
4161 napi_disable(&pp->napi);
4164 pp->rxq_def = pp->indir[0];
4166 /* Update unicast mapping */
4167 mvneta_set_rx_mode(pp->dev);
4169 /* Update val of portCfg register accordingly with all RxQueue types */
4170 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4171 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4173 /* Update the elected CPU matching the new rxq_def */
4174 spin_lock(&pp->lock);
4175 mvneta_percpu_elect(pp);
4176 spin_unlock(&pp->lock);
4178 if (!pp->neta_armada3700) {
4179 /* We have to synchronise on the napi of each CPU */
4180 for_each_online_cpu(cpu) {
4181 struct mvneta_pcpu_port *pcpu_port =
4182 per_cpu_ptr(pp->ports, cpu);
4184 napi_enable(&pcpu_port->napi);
4187 napi_enable(&pp->napi);
4190 netif_tx_start_all_queues(pp->dev);
4195 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4196 const u8 *key, const u8 hfunc)
4198 struct mvneta_port *pp = netdev_priv(dev);
4200 /* Current code for Armada 3700 doesn't support RSS features yet */
4201 if (pp->neta_armada3700)
4204 /* We require at least one supported parameter to be changed
4205 * and no change in any of the unsupported parameters
4208 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4214 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4216 return mvneta_config_rss(pp);
4219 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4222 struct mvneta_port *pp = netdev_priv(dev);
4224 /* Current code for Armada 3700 doesn't support RSS features yet */
4225 if (pp->neta_armada3700)
4229 *hfunc = ETH_RSS_HASH_TOP;
4234 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4239 static void mvneta_ethtool_get_wol(struct net_device *dev,
4240 struct ethtool_wolinfo *wol)
4242 struct mvneta_port *pp = netdev_priv(dev);
4244 phylink_ethtool_get_wol(pp->phylink, wol);
4247 static int mvneta_ethtool_set_wol(struct net_device *dev,
4248 struct ethtool_wolinfo *wol)
4250 struct mvneta_port *pp = netdev_priv(dev);
4253 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4255 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4260 static int mvneta_ethtool_get_eee(struct net_device *dev,
4261 struct ethtool_eee *eee)
4263 struct mvneta_port *pp = netdev_priv(dev);
4266 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4268 eee->eee_enabled = pp->eee_enabled;
4269 eee->eee_active = pp->eee_active;
4270 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4271 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4273 return phylink_ethtool_get_eee(pp->phylink, eee);
4276 static int mvneta_ethtool_set_eee(struct net_device *dev,
4277 struct ethtool_eee *eee)
4279 struct mvneta_port *pp = netdev_priv(dev);
4282 /* The Armada 37x documents do not give limits for this other than
4283 * it being an 8-bit register. */
4284 if (eee->tx_lpi_enabled &&
4285 (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
4288 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4289 lpi_ctl0 &= ~(0xff << 8);
4290 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4291 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4293 pp->eee_enabled = eee->eee_enabled;
4294 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4296 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4298 return phylink_ethtool_set_eee(pp->phylink, eee);
4301 static const struct net_device_ops mvneta_netdev_ops = {
4302 .ndo_open = mvneta_open,
4303 .ndo_stop = mvneta_stop,
4304 .ndo_start_xmit = mvneta_tx,
4305 .ndo_set_rx_mode = mvneta_set_rx_mode,
4306 .ndo_set_mac_address = mvneta_set_mac_addr,
4307 .ndo_change_mtu = mvneta_change_mtu,
4308 .ndo_fix_features = mvneta_fix_features,
4309 .ndo_get_stats64 = mvneta_get_stats64,
4310 .ndo_do_ioctl = mvneta_ioctl,
4313 static const struct ethtool_ops mvneta_eth_tool_ops = {
4314 .nway_reset = mvneta_ethtool_nway_reset,
4315 .get_link = ethtool_op_get_link,
4316 .set_coalesce = mvneta_ethtool_set_coalesce,
4317 .get_coalesce = mvneta_ethtool_get_coalesce,
4318 .get_drvinfo = mvneta_ethtool_get_drvinfo,
4319 .get_ringparam = mvneta_ethtool_get_ringparam,
4320 .set_ringparam = mvneta_ethtool_set_ringparam,
4321 .get_pauseparam = mvneta_ethtool_get_pauseparam,
4322 .set_pauseparam = mvneta_ethtool_set_pauseparam,
4323 .get_strings = mvneta_ethtool_get_strings,
4324 .get_ethtool_stats = mvneta_ethtool_get_stats,
4325 .get_sset_count = mvneta_ethtool_get_sset_count,
4326 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4327 .get_rxnfc = mvneta_ethtool_get_rxnfc,
4328 .get_rxfh = mvneta_ethtool_get_rxfh,
4329 .set_rxfh = mvneta_ethtool_set_rxfh,
4330 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4331 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4332 .get_wol = mvneta_ethtool_get_wol,
4333 .set_wol = mvneta_ethtool_set_wol,
4334 .get_eee = mvneta_ethtool_get_eee,
4335 .set_eee = mvneta_ethtool_set_eee,
4339 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4344 mvneta_port_disable(pp);
4346 /* Set port default values */
4347 mvneta_defaults_set(pp);
4349 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4353 /* Initialize TX descriptor rings */
4354 for (queue = 0; queue < txq_number; queue++) {
4355 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4357 txq->size = pp->tx_ring_size;
4358 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4361 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4365 /* Create Rx descriptor rings */
4366 for (queue = 0; queue < rxq_number; queue++) {
4367 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4369 rxq->size = pp->rx_ring_size;
4370 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4371 rxq->time_coal = MVNETA_RX_COAL_USEC;
4373 = devm_kmalloc_array(pp->dev->dev.parent,
4375 sizeof(*rxq->buf_virt_addr),
4377 if (!rxq->buf_virt_addr)
4384 /* platform glue : initialize decoding windows */
4385 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4386 const struct mbus_dram_target_info *dram)
4392 for (i = 0; i < 6; i++) {
4393 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4394 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4397 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4404 for (i = 0; i < dram->num_cs; i++) {
4405 const struct mbus_dram_window *cs = dram->cs + i;
4407 mvreg_write(pp, MVNETA_WIN_BASE(i),
4408 (cs->base & 0xffff0000) |
4409 (cs->mbus_attr << 8) |
4410 dram->mbus_dram_target_id);
4412 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4413 (cs->size - 1) & 0xffff0000);
4415 win_enable &= ~(1 << i);
4416 win_protect |= 3 << (2 * i);
4419 /* For Armada3700 open default 4GB Mbus window, leaving
4420 * arbitration of target/attribute to a different layer
4423 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4424 win_enable &= ~BIT(0);
4428 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4429 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4432 /* Power up the port */
4433 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4435 /* MAC Cause register should be cleared */
4436 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4438 if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4439 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4440 else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4441 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
4442 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4443 else if (!phy_interface_mode_is_rgmii(phy_mode))
4449 /* Device initialization routine */
4450 static int mvneta_probe(struct platform_device *pdev)
4452 struct resource *res;
4453 struct device_node *dn = pdev->dev.of_node;
4454 struct device_node *bm_node;
4455 struct mvneta_port *pp;
4456 struct net_device *dev;
4457 struct phylink *phylink;
4458 const char *dt_mac_addr;
4459 char hw_mac_addr[ETH_ALEN];
4460 const char *mac_from;
4466 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4470 dev->irq = irq_of_parse_and_map(dn, 0);
4471 if (dev->irq == 0) {
4473 goto err_free_netdev;
4476 phy_mode = of_get_phy_mode(dn);
4478 dev_err(&pdev->dev, "incorrect phy-mode\n");
4483 phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
4484 &mvneta_phylink_ops);
4485 if (IS_ERR(phylink)) {
4486 err = PTR_ERR(phylink);
4490 dev->tx_queue_len = MVNETA_MAX_TXD;
4491 dev->watchdog_timeo = 5 * HZ;
4492 dev->netdev_ops = &mvneta_netdev_ops;
4494 dev->ethtool_ops = &mvneta_eth_tool_ops;
4496 pp = netdev_priv(dev);
4497 spin_lock_init(&pp->lock);
4498 pp->phylink = phylink;
4499 pp->phy_interface = phy_mode;
4502 pp->rxq_def = rxq_def;
4503 pp->indir[0] = rxq_def;
4505 /* Get special SoC configurations */
4506 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4507 pp->neta_armada3700 = true;
4509 pp->clk = devm_clk_get(&pdev->dev, "core");
4510 if (IS_ERR(pp->clk))
4511 pp->clk = devm_clk_get(&pdev->dev, NULL);
4512 if (IS_ERR(pp->clk)) {
4513 err = PTR_ERR(pp->clk);
4514 goto err_free_phylink;
4517 clk_prepare_enable(pp->clk);
4519 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4520 if (!IS_ERR(pp->clk_bus))
4521 clk_prepare_enable(pp->clk_bus);
4523 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4524 pp->base = devm_ioremap_resource(&pdev->dev, res);
4525 if (IS_ERR(pp->base)) {
4526 err = PTR_ERR(pp->base);
4530 /* Alloc per-cpu port structure */
4531 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4537 /* Alloc per-cpu stats */
4538 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4541 goto err_free_ports;
4544 dt_mac_addr = of_get_mac_address(dn);
4546 mac_from = "device tree";
4547 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4549 mvneta_get_mac_addr(pp, hw_mac_addr);
4550 if (is_valid_ether_addr(hw_mac_addr)) {
4551 mac_from = "hardware";
4552 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4554 mac_from = "random";
4555 eth_hw_addr_random(dev);
4559 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4560 if (tx_csum_limit < 0 ||
4561 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4562 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4563 dev_info(&pdev->dev,
4564 "Wrong TX csum limit in DT, set to %dB\n",
4565 MVNETA_TX_CSUM_DEF_SIZE);
4567 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4568 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4570 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4573 pp->tx_csum_limit = tx_csum_limit;
4575 pp->dram_target_info = mv_mbus_dram_info();
4576 /* Armada3700 requires setting default configuration of Mbus
4577 * windows, however without using filled mbus_dram_target_info
4580 if (pp->dram_target_info || pp->neta_armada3700)
4581 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4583 pp->tx_ring_size = MVNETA_MAX_TXD;
4584 pp->rx_ring_size = MVNETA_MAX_RXD;
4587 SET_NETDEV_DEV(dev, &pdev->dev);
4589 pp->id = global_port_id++;
4590 pp->rx_offset_correction = 0; /* not relevant for SW BM */
4592 /* Obtain access to BM resources if enabled and already initialized */
4593 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4595 pp->bm_priv = mvneta_bm_get(bm_node);
4597 err = mvneta_bm_port_init(pdev, pp);
4599 dev_info(&pdev->dev,
4600 "use SW buffer management\n");
4601 mvneta_bm_put(pp->bm_priv);
4605 /* Set RX packet offset correction for platforms, whose
4606 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4607 * platforms and 0B for 32-bit ones.
4609 pp->rx_offset_correction = max(0,
4611 MVNETA_RX_PKT_OFFSET_CORRECTION);
4613 of_node_put(bm_node);
4615 err = mvneta_init(&pdev->dev, pp);
4619 err = mvneta_port_power_up(pp, phy_mode);
4621 dev_err(&pdev->dev, "can't power up port\n");
4625 /* Armada3700 network controller does not support per-cpu
4626 * operation, so only single NAPI should be initialized.
4628 if (pp->neta_armada3700) {
4629 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4631 for_each_present_cpu(cpu) {
4632 struct mvneta_pcpu_port *port =
4633 per_cpu_ptr(pp->ports, cpu);
4635 netif_napi_add(dev, &port->napi, mvneta_poll,
4641 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
4642 dev->hw_features |= dev->features;
4643 dev->vlan_features |= dev->features;
4644 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4645 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4647 /* MTU range: 68 - 9676 */
4648 dev->min_mtu = ETH_MIN_MTU;
4649 /* 9676 == 9700 - 20 and rounding to 8 */
4650 dev->max_mtu = 9676;
4652 err = register_netdev(dev);
4654 dev_err(&pdev->dev, "failed to register\n");
4658 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4661 platform_set_drvdata(pdev, pp->dev);
4667 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4668 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4670 mvneta_bm_put(pp->bm_priv);
4672 free_percpu(pp->stats);
4674 free_percpu(pp->ports);
4676 clk_disable_unprepare(pp->clk_bus);
4677 clk_disable_unprepare(pp->clk);
4680 phylink_destroy(pp->phylink);
4682 irq_dispose_mapping(dev->irq);
4688 /* Device removal routine */
4689 static int mvneta_remove(struct platform_device *pdev)
4691 struct net_device *dev = platform_get_drvdata(pdev);
4692 struct mvneta_port *pp = netdev_priv(dev);
4694 unregister_netdev(dev);
4695 clk_disable_unprepare(pp->clk_bus);
4696 clk_disable_unprepare(pp->clk);
4697 free_percpu(pp->ports);
4698 free_percpu(pp->stats);
4699 irq_dispose_mapping(dev->irq);
4700 phylink_destroy(pp->phylink);
4704 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4705 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4707 mvneta_bm_put(pp->bm_priv);
4713 #ifdef CONFIG_PM_SLEEP
4714 static int mvneta_suspend(struct device *device)
4717 struct net_device *dev = dev_get_drvdata(device);
4718 struct mvneta_port *pp = netdev_priv(dev);
4720 if (!netif_running(dev))
4723 if (!pp->neta_armada3700) {
4724 spin_lock(&pp->lock);
4725 pp->is_stopped = true;
4726 spin_unlock(&pp->lock);
4728 cpuhp_state_remove_instance_nocalls(online_hpstate,
4730 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4735 mvneta_stop_dev(pp);
4738 for (queue = 0; queue < rxq_number; queue++) {
4739 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4741 mvneta_rxq_drop_pkts(pp, rxq);
4744 for (queue = 0; queue < txq_number; queue++) {
4745 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4747 mvneta_txq_hw_deinit(pp, txq);
4751 netif_device_detach(dev);
4752 clk_disable_unprepare(pp->clk_bus);
4753 clk_disable_unprepare(pp->clk);
4758 static int mvneta_resume(struct device *device)
4760 struct platform_device *pdev = to_platform_device(device);
4761 struct net_device *dev = dev_get_drvdata(device);
4762 struct mvneta_port *pp = netdev_priv(dev);
4765 clk_prepare_enable(pp->clk);
4766 if (!IS_ERR(pp->clk_bus))
4767 clk_prepare_enable(pp->clk_bus);
4768 if (pp->dram_target_info || pp->neta_armada3700)
4769 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4771 err = mvneta_bm_port_init(pdev, pp);
4773 dev_info(&pdev->dev, "use SW buffer management\n");
4777 mvneta_defaults_set(pp);
4778 err = mvneta_port_power_up(pp, pp->phy_interface);
4780 dev_err(device, "can't power up port\n");
4784 netif_device_attach(dev);
4786 if (!netif_running(dev))
4789 for (queue = 0; queue < rxq_number; queue++) {
4790 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4792 rxq->next_desc_to_proc = 0;
4793 mvneta_rxq_hw_init(pp, rxq);
4796 for (queue = 0; queue < txq_number; queue++) {
4797 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4799 txq->next_desc_to_proc = 0;
4800 mvneta_txq_hw_init(pp, txq);
4803 if (!pp->neta_armada3700) {
4804 spin_lock(&pp->lock);
4805 pp->is_stopped = false;
4806 spin_unlock(&pp->lock);
4807 cpuhp_state_add_instance_nocalls(online_hpstate,
4809 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4814 mvneta_start_dev(pp);
4816 mvneta_set_rx_mode(dev);
4822 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4824 static const struct of_device_id mvneta_match[] = {
4825 { .compatible = "marvell,armada-370-neta" },
4826 { .compatible = "marvell,armada-xp-neta" },
4827 { .compatible = "marvell,armada-3700-neta" },
4830 MODULE_DEVICE_TABLE(of, mvneta_match);
4832 static struct platform_driver mvneta_driver = {
4833 .probe = mvneta_probe,
4834 .remove = mvneta_remove,
4836 .name = MVNETA_DRIVER_NAME,
4837 .of_match_table = mvneta_match,
4838 .pm = &mvneta_pm_ops,
4842 static int __init mvneta_driver_init(void)
4846 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4848 mvneta_cpu_down_prepare);
4851 online_hpstate = ret;
4852 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4853 NULL, mvneta_cpu_dead);
4857 ret = platform_driver_register(&mvneta_driver);
4863 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4865 cpuhp_remove_multi_state(online_hpstate);
4869 module_init(mvneta_driver_init);
4871 static void __exit mvneta_driver_exit(void)
4873 platform_driver_unregister(&mvneta_driver);
4874 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4875 cpuhp_remove_multi_state(online_hpstate);
4877 module_exit(mvneta_driver_exit);
4879 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4880 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4881 MODULE_LICENSE("GPL");
4883 module_param(rxq_number, int, 0444);
4884 module_param(txq_number, int, 0444);
4886 module_param(rxq_def, int, 0444);
4887 module_param(rx_copybreak, int, 0644);