2 * Texas Instruments Ethernet Switch Driver
4 * Copyright (C) 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/gpio/consumer.h>
34 #include <linux/of_mdio.h>
35 #include <linux/of_net.h>
36 #include <linux/of_device.h>
37 #include <linux/if_vlan.h>
38 #include <linux/kmemleak.h>
39 #include <linux/sys_soc.h>
41 #include <linux/pinctrl/consumer.h>
42 #include <net/pkt_cls.h>
47 #include "davinci_cpdma.h"
49 #include <net/pkt_sched.h>
51 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
52 NETIF_MSG_DRV | NETIF_MSG_LINK | \
53 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
54 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
55 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
56 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
57 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
60 #define cpsw_info(priv, type, format, ...) \
62 if (netif_msg_##type(priv) && net_ratelimit()) \
63 dev_info(priv->dev, format, ## __VA_ARGS__); \
66 #define cpsw_err(priv, type, format, ...) \
68 if (netif_msg_##type(priv) && net_ratelimit()) \
69 dev_err(priv->dev, format, ## __VA_ARGS__); \
72 #define cpsw_dbg(priv, type, format, ...) \
74 if (netif_msg_##type(priv) && net_ratelimit()) \
75 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
78 #define cpsw_notice(priv, type, format, ...) \
80 if (netif_msg_##type(priv) && net_ratelimit()) \
81 dev_notice(priv->dev, format, ## __VA_ARGS__); \
84 #define ALE_ALL_PORTS 0x7
86 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
87 #define CPSW_MINOR_VERSION(reg) (reg & 0xff)
88 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
90 #define CPSW_VERSION_1 0x19010a
91 #define CPSW_VERSION_2 0x19010c
92 #define CPSW_VERSION_3 0x19010f
93 #define CPSW_VERSION_4 0x190112
95 #define HOST_PORT_NUM 0
96 #define CPSW_ALE_PORTS_NUM 3
97 #define SLIVER_SIZE 0x40
99 #define CPSW1_HOST_PORT_OFFSET 0x028
100 #define CPSW1_SLAVE_OFFSET 0x050
101 #define CPSW1_SLAVE_SIZE 0x040
102 #define CPSW1_CPDMA_OFFSET 0x100
103 #define CPSW1_STATERAM_OFFSET 0x200
104 #define CPSW1_HW_STATS 0x400
105 #define CPSW1_CPTS_OFFSET 0x500
106 #define CPSW1_ALE_OFFSET 0x600
107 #define CPSW1_SLIVER_OFFSET 0x700
109 #define CPSW2_HOST_PORT_OFFSET 0x108
110 #define CPSW2_SLAVE_OFFSET 0x200
111 #define CPSW2_SLAVE_SIZE 0x100
112 #define CPSW2_CPDMA_OFFSET 0x800
113 #define CPSW2_HW_STATS 0x900
114 #define CPSW2_STATERAM_OFFSET 0xa00
115 #define CPSW2_CPTS_OFFSET 0xc00
116 #define CPSW2_ALE_OFFSET 0xd00
117 #define CPSW2_SLIVER_OFFSET 0xd80
118 #define CPSW2_BD_OFFSET 0x2000
120 #define CPDMA_RXTHRESH 0x0c0
121 #define CPDMA_RXFREE 0x0e0
122 #define CPDMA_TXHDP 0x00
123 #define CPDMA_RXHDP 0x20
124 #define CPDMA_TXCP 0x40
125 #define CPDMA_RXCP 0x60
127 #define CPSW_POLL_WEIGHT 64
128 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
129 #define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
130 #define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
132 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
134 #define RX_PRIORITY_MAPPING 0x76543210
135 #define TX_PRIORITY_MAPPING 0x33221100
136 #define CPDMA_TX_PRIORITY_MAP 0x76543210
138 #define CPSW_VLAN_AWARE BIT(1)
139 #define CPSW_RX_VLAN_ENCAP BIT(2)
140 #define CPSW_ALE_VLAN_AWARE 1
142 #define CPSW_FIFO_NORMAL_MODE (0 << 16)
143 #define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
144 #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
146 #define CPSW_INTPACEEN (0x3f << 16)
147 #define CPSW_INTPRESCALE_MASK (0x7FF << 0)
148 #define CPSW_CMINTMAX_CNT 63
149 #define CPSW_CMINTMIN_CNT 2
150 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
151 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
153 #define cpsw_slave_index(cpsw, priv) \
154 ((cpsw->data.dual_emac) ? priv->emac_port : \
155 cpsw->data.active_slave)
157 #define CPSW_MAX_QUEUES 8
158 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
159 #define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
160 #define CPSW_FIFO_SHAPE_EN_SHIFT 16
161 #define CPSW_FIFO_RATE_EN_SHIFT 20
162 #define CPSW_TC_NUM 4
163 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
164 #define CPSW_PCT_MASK 0x7f
166 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
167 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
168 #define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
169 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
170 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
172 CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
173 CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
174 CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
175 CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
178 static int debug_level;
179 module_param(debug_level, int, 0);
180 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
182 static int ale_ageout = 10;
183 module_param(ale_ageout, int, 0);
184 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
186 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
187 module_param(rx_packet_max, int, 0);
188 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
190 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
191 module_param(descs_pool_size, int, 0444);
192 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
194 struct cpsw_wr_regs {
214 struct cpsw_ss_regs {
231 #define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
232 #define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
233 #define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
234 #define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
235 #define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
236 #define CPSW1_TS_CTL 0x14 /* Time Sync Control */
237 #define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
238 #define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
241 #define CPSW2_CONTROL 0x00 /* Control Register */
242 #define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
243 #define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
244 #define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
245 #define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
246 #define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
247 #define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
249 /* CPSW_PORT_V1 and V2 */
250 #define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
251 #define SA_HI 0x24 /* CPGMAC_SL Source Address High */
252 #define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
254 /* CPSW_PORT_V2 only */
255 #define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
256 #define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
257 #define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
258 #define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
259 #define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
260 #define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
261 #define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
262 #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
264 /* Bit definitions for the CPSW2_CONTROL register */
265 #define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
266 #define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
267 #define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
268 #define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
269 #define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
270 #define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
271 #define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
272 #define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
273 #define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
274 #define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
275 #define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
276 #define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
277 #define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
278 #define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
279 #define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
280 #define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
281 #define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
282 #define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
284 #define CTRL_V2_TS_BITS \
285 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
286 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
288 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
289 #define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
290 #define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
293 #define CTRL_V3_TS_BITS \
294 (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
295 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
298 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
299 #define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
300 #define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
302 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
303 #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
304 #define TS_SEQ_ID_OFFSET_MASK (0x3f)
305 #define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
306 #define TS_MSG_TYPE_EN_MASK (0xffff)
308 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
309 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
311 /* Bit definitions for the CPSW1_TS_CTL register */
312 #define CPSW_V1_TS_RX_EN BIT(0)
313 #define CPSW_V1_TS_TX_EN BIT(4)
314 #define CPSW_V1_MSG_TYPE_OFS 16
316 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
317 #define CPSW_V1_SEQ_ID_OFS_SHIFT 16
319 #define CPSW_MAX_BLKS_TX 15
320 #define CPSW_MAX_BLKS_TX_SHIFT 4
321 #define CPSW_MAX_BLKS_RX 5
323 struct cpsw_host_regs {
329 u32 cpdma_tx_pri_map;
330 u32 cpdma_rx_chan_map;
333 struct cpsw_sliver_regs {
346 struct cpsw_hw_stats {
348 u32 rxbroadcastframes;
349 u32 rxmulticastframes;
352 u32 rxaligncodeerrors;
353 u32 rxoversizedframes;
355 u32 rxundersizedframes;
360 u32 txbroadcastframes;
361 u32 txmulticastframes;
363 u32 txdeferredframes;
364 u32 txcollisionframes;
365 u32 txsinglecollframes;
366 u32 txmultcollframes;
367 u32 txexcessivecollisions;
368 u32 txlatecollisions;
370 u32 txcarriersenseerrors;
373 u32 octetframes65t127;
374 u32 octetframes128t255;
375 u32 octetframes256t511;
376 u32 octetframes512t1023;
377 u32 octetframes1024tup;
384 struct cpsw_slave_data {
385 struct device_node *phy_node;
386 char phy_id[MII_BUS_ID_SIZE];
388 u8 mac_addr[ETH_ALEN];
389 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
392 struct cpsw_platform_data {
393 struct cpsw_slave_data *slave_data;
394 u32 ss_reg_ofs; /* Subsystem control register offset */
395 u32 channels; /* number of cpdma channels (symmetric) */
396 u32 slaves; /* number of slave cpgmac ports */
397 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
398 u32 ale_entries; /* ale table size */
399 u32 bd_ram_size; /*buffer descriptor ram size */
400 u32 mac_control; /* Mac control register */
401 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
402 bool dual_emac; /* Enable Dual EMAC mode */
407 struct cpsw_sliver_regs __iomem *sliver;
410 struct cpsw_slave_data *data;
411 struct phy_device *phy;
412 struct net_device *ndev;
416 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
418 return readl_relaxed(slave->regs + offset);
421 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
423 writel_relaxed(val, slave->regs + offset);
427 struct cpdma_chan *ch;
433 struct cpsw_platform_data data;
434 struct napi_struct napi_rx;
435 struct napi_struct napi_tx;
436 struct cpsw_ss_regs __iomem *regs;
437 struct cpsw_wr_regs __iomem *wr_regs;
438 u8 __iomem *hw_stats;
439 struct cpsw_host_regs __iomem *host_port_regs;
444 struct cpsw_slave *slaves;
445 struct cpdma_ctlr *dma;
446 struct cpsw_vector txv[CPSW_MAX_QUEUES];
447 struct cpsw_vector rxv[CPSW_MAX_QUEUES];
448 struct cpsw_ale *ale;
450 bool rx_irq_disabled;
451 bool tx_irq_disabled;
452 u32 irqs_table[IRQ_NUM];
454 int rx_ch_num, tx_ch_num;
460 struct net_device *ndev;
463 u8 mac_addr[ETH_ALEN];
467 int fifo_bw[CPSW_TC_NUM];
470 struct cpsw_common *cpsw;
474 char stat_string[ETH_GSTRING_LEN];
486 #define CPSW_STAT(m) CPSW_STATS, \
487 sizeof(((struct cpsw_hw_stats *)0)->m), \
488 offsetof(struct cpsw_hw_stats, m)
489 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
490 sizeof(((struct cpdma_chan_stats *)0)->m), \
491 offsetof(struct cpdma_chan_stats, m)
492 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
493 sizeof(((struct cpdma_chan_stats *)0)->m), \
494 offsetof(struct cpdma_chan_stats, m)
496 static const struct cpsw_stats cpsw_gstrings_stats[] = {
497 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
498 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
499 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
500 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
501 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
502 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
503 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
504 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
505 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
506 { "Rx Fragments", CPSW_STAT(rxfragments) },
507 { "Rx Octets", CPSW_STAT(rxoctets) },
508 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
509 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
510 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
511 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
512 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
513 { "Collisions", CPSW_STAT(txcollisionframes) },
514 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
515 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
516 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
517 { "Late Collisions", CPSW_STAT(txlatecollisions) },
518 { "Tx Underrun", CPSW_STAT(txunderrun) },
519 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
520 { "Tx Octets", CPSW_STAT(txoctets) },
521 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
522 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
523 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
524 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
525 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
526 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
527 { "Net Octets", CPSW_STAT(netoctets) },
528 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
529 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
530 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
533 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
534 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
535 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
536 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
537 { "misqueued", CPDMA_RX_STAT(misqueued) },
538 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
539 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
540 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
541 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
542 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
543 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
544 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
545 { "requeue", CPDMA_RX_STAT(requeue) },
546 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
549 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
550 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
552 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
553 #define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
554 #define for_each_slave(priv, func, arg...) \
556 struct cpsw_slave *slave; \
557 struct cpsw_common *cpsw = (priv)->cpsw; \
559 if (cpsw->data.dual_emac) \
560 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
562 for (n = cpsw->data.slaves, \
563 slave = cpsw->slaves; \
565 (func)(slave++, ##arg); \
568 static inline int cpsw_get_slave_port(u32 slave_num)
570 return slave_num + 1;
573 static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
575 struct cpsw_common *cpsw = priv->cpsw;
577 if (cpsw->data.dual_emac) {
578 struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
579 int slave_port = cpsw_get_slave_port(slave->slave_num);
581 cpsw_ale_add_mcast(cpsw->ale, addr,
582 1 << slave_port | ALE_PORT_HOST,
583 ALE_VLAN, slave->port_vlan, 0);
587 cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
590 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
592 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
593 struct cpsw_ale *ale = cpsw->ale;
596 if (cpsw->data.dual_emac) {
599 /* Enabling promiscuous mode for one interface will be
600 * common for both the interface as the interface shares
601 * the same hardware resource.
603 for (i = 0; i < cpsw->data.slaves; i++)
604 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
607 if (!enable && flag) {
609 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
614 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
616 dev_dbg(&ndev->dev, "promiscuity enabled\n");
619 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
620 dev_dbg(&ndev->dev, "promiscuity disabled\n");
624 unsigned long timeout = jiffies + HZ;
626 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
627 for (i = 0; i <= cpsw->data.slaves; i++) {
628 cpsw_ale_control_set(ale, i,
629 ALE_PORT_NOLEARN, 1);
630 cpsw_ale_control_set(ale, i,
631 ALE_PORT_NO_SA_UPDATE, 1);
634 /* Clear All Untouched entries */
635 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
638 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
640 } while (time_after(timeout, jiffies));
641 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
643 /* Clear all mcast from ALE */
644 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
645 __dev_mc_unsync(ndev, NULL);
647 /* Flood All Unicast Packets to Host port */
648 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
649 dev_dbg(&ndev->dev, "promiscuity enabled\n");
651 /* Don't Flood All Unicast Packets to Host port */
652 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
654 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
655 for (i = 0; i <= cpsw->data.slaves; i++) {
656 cpsw_ale_control_set(ale, i,
657 ALE_PORT_NOLEARN, 0);
658 cpsw_ale_control_set(ale, i,
659 ALE_PORT_NO_SA_UPDATE, 0);
661 dev_dbg(&ndev->dev, "promiscuity disabled\n");
666 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
668 struct cpsw_priv *priv = netdev_priv(ndev);
669 struct cpsw_common *cpsw = priv->cpsw;
672 if (cpsw->data.dual_emac)
673 vid = cpsw->slaves[priv->emac_port].port_vlan;
675 vid = cpsw->data.default_vlan;
677 if (ndev->flags & IFF_PROMISC) {
678 /* Enable promiscuous mode */
679 cpsw_set_promiscious(ndev, true);
680 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
683 /* Disable promiscuous mode */
684 cpsw_set_promiscious(ndev, false);
687 /* Restore allmulti on vlans if necessary */
688 cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
690 /* Clear all mcast from ALE */
691 cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
693 if (!netdev_mc_empty(ndev)) {
694 struct netdev_hw_addr *ha;
696 /* program multicast address list into ALE register */
697 netdev_for_each_mc_addr(ha, ndev) {
698 cpsw_add_mcast(priv, ha->addr);
703 static void cpsw_intr_enable(struct cpsw_common *cpsw)
705 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
706 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
708 cpdma_ctlr_int_ctrl(cpsw->dma, true);
712 static void cpsw_intr_disable(struct cpsw_common *cpsw)
714 writel_relaxed(0, &cpsw->wr_regs->tx_en);
715 writel_relaxed(0, &cpsw->wr_regs->rx_en);
717 cpdma_ctlr_int_ctrl(cpsw->dma, false);
721 static void cpsw_tx_handler(void *token, int len, int status)
723 struct netdev_queue *txq;
724 struct sk_buff *skb = token;
725 struct net_device *ndev = skb->dev;
726 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
728 /* Check whether the queue is stopped due to stalled tx dma, if the
729 * queue is stopped then start the queue as we have free desc for tx
731 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
732 if (unlikely(netif_tx_queue_stopped(txq)))
733 netif_tx_wake_queue(txq);
735 cpts_tx_timestamp(cpsw->cpts, skb);
736 ndev->stats.tx_packets++;
737 ndev->stats.tx_bytes += len;
738 dev_kfree_skb_any(skb);
741 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
743 struct cpsw_priv *priv = netdev_priv(skb->dev);
744 struct cpsw_common *cpsw = priv->cpsw;
745 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
746 u16 vtag, vid, prio, pkt_type;
748 /* Remove VLAN header encapsulation word */
749 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
751 pkt_type = (rx_vlan_encap_hdr >>
752 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
753 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
754 /* Ignore unknown & Priority-tagged packets*/
755 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
756 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
759 vid = (rx_vlan_encap_hdr >>
760 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
762 /* Ignore vid 0 and pass packet as is */
765 /* Ignore default vlans in dual mac mode */
766 if (cpsw->data.dual_emac &&
767 vid == cpsw->slaves[priv->emac_port].port_vlan)
770 prio = (rx_vlan_encap_hdr >>
771 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
772 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
774 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
775 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
777 /* strip vlan tag for VLAN-tagged packet */
778 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
779 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
780 skb_pull(skb, VLAN_HLEN);
784 static void cpsw_rx_handler(void *token, int len, int status)
786 struct cpdma_chan *ch;
787 struct sk_buff *skb = token;
788 struct sk_buff *new_skb;
789 struct net_device *ndev = skb->dev;
791 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
793 if (cpsw->data.dual_emac) {
794 port = CPDMA_RX_SOURCE_PORT(status);
796 ndev = cpsw->slaves[--port].ndev;
801 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
802 /* In dual emac mode check for all interfaces */
803 if (cpsw->data.dual_emac && cpsw->usage_count &&
805 /* The packet received is for the interface which
806 * is already down and the other interface is up
807 * and running, instead of freeing which results
808 * in reducing of the number of rx descriptor in
809 * DMA engine, requeue skb back to cpdma.
815 /* the interface is going down, skbs are purged */
816 dev_kfree_skb_any(skb);
820 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
822 skb_copy_queue_mapping(new_skb, skb);
824 if (status & CPDMA_RX_VLAN_ENCAP)
825 cpsw_rx_vlan_encap(skb);
826 cpts_rx_timestamp(cpsw->cpts, skb);
827 skb->protocol = eth_type_trans(skb, ndev);
828 netif_receive_skb(skb);
829 ndev->stats.rx_bytes += len;
830 ndev->stats.rx_packets++;
831 kmemleak_not_leak(new_skb);
833 ndev->stats.rx_dropped++;
838 if (netif_dormant(ndev)) {
839 dev_kfree_skb_any(new_skb);
843 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
844 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
845 skb_tailroom(new_skb), 0);
846 if (WARN_ON(ret < 0))
847 dev_kfree_skb_any(new_skb);
850 static void cpsw_split_res(struct net_device *ndev)
852 struct cpsw_priv *priv = netdev_priv(ndev);
853 u32 consumed_rate = 0, bigest_rate = 0;
854 struct cpsw_common *cpsw = priv->cpsw;
855 struct cpsw_vector *txv = cpsw->txv;
856 int i, ch_weight, rlim_ch_num = 0;
857 int budget, bigest_rate_ch = 0;
858 u32 ch_rate, max_rate;
861 for (i = 0; i < cpsw->tx_ch_num; i++) {
862 ch_rate = cpdma_chan_get_rate(txv[i].ch);
867 consumed_rate += ch_rate;
870 if (cpsw->tx_ch_num == rlim_ch_num) {
871 max_rate = consumed_rate;
872 } else if (!rlim_ch_num) {
873 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
875 max_rate = consumed_rate;
877 max_rate = cpsw->speed * 1000;
879 /* if max_rate is less then expected due to reduced link speed,
880 * split proportionally according next potential max speed
882 if (max_rate < consumed_rate)
885 if (max_rate < consumed_rate)
888 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
889 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
890 (cpsw->tx_ch_num - rlim_ch_num);
891 bigest_rate = (max_rate - consumed_rate) /
892 (cpsw->tx_ch_num - rlim_ch_num);
895 /* split tx weight/budget */
896 budget = CPSW_POLL_WEIGHT;
897 for (i = 0; i < cpsw->tx_ch_num; i++) {
898 ch_rate = cpdma_chan_get_rate(txv[i].ch);
900 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
903 if (ch_rate > bigest_rate) {
905 bigest_rate = ch_rate;
908 ch_weight = (ch_rate * 100) / max_rate;
911 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
913 txv[i].budget = ch_budget;
916 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
919 budget -= txv[i].budget;
923 txv[bigest_rate_ch].budget += budget;
925 /* split rx budget */
926 budget = CPSW_POLL_WEIGHT;
927 ch_budget = budget / cpsw->rx_ch_num;
928 for (i = 0; i < cpsw->rx_ch_num; i++) {
929 cpsw->rxv[i].budget = ch_budget;
934 cpsw->rxv[0].budget += budget;
937 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
939 struct cpsw_common *cpsw = dev_id;
941 writel(0, &cpsw->wr_regs->tx_en);
942 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
944 if (cpsw->quirk_irq) {
945 disable_irq_nosync(cpsw->irqs_table[1]);
946 cpsw->tx_irq_disabled = true;
949 napi_schedule(&cpsw->napi_tx);
953 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
955 struct cpsw_common *cpsw = dev_id;
957 writel(0, &cpsw->wr_regs->rx_en);
958 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
960 if (cpsw->quirk_irq) {
961 disable_irq_nosync(cpsw->irqs_table[0]);
962 cpsw->rx_irq_disabled = true;
965 napi_schedule(&cpsw->napi_rx);
969 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
972 int num_tx, cur_budget, ch;
973 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
974 struct cpsw_vector *txv;
976 /* process every unprocessed channel */
977 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
978 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
979 if (!(ch_map & 0x80))
982 txv = &cpsw->txv[ch];
983 if (unlikely(txv->budget > budget - num_tx))
984 cur_budget = budget - num_tx;
986 cur_budget = txv->budget;
988 num_tx += cpdma_chan_process(txv->ch, cur_budget);
989 if (num_tx >= budget)
993 if (num_tx < budget) {
994 napi_complete(napi_tx);
995 writel(0xff, &cpsw->wr_regs->tx_en);
1001 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1003 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1006 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1007 if (num_tx < budget) {
1008 napi_complete(napi_tx);
1009 writel(0xff, &cpsw->wr_regs->tx_en);
1010 if (cpsw->tx_irq_disabled) {
1011 cpsw->tx_irq_disabled = false;
1012 enable_irq(cpsw->irqs_table[1]);
1019 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
1022 int num_rx, cur_budget, ch;
1023 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1024 struct cpsw_vector *rxv;
1026 /* process every unprocessed channel */
1027 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
1028 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
1029 if (!(ch_map & 0x01))
1032 rxv = &cpsw->rxv[ch];
1033 if (unlikely(rxv->budget > budget - num_rx))
1034 cur_budget = budget - num_rx;
1036 cur_budget = rxv->budget;
1038 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
1039 if (num_rx >= budget)
1043 if (num_rx < budget) {
1044 napi_complete_done(napi_rx, num_rx);
1045 writel(0xff, &cpsw->wr_regs->rx_en);
1051 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1053 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1056 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1057 if (num_rx < budget) {
1058 napi_complete_done(napi_rx, num_rx);
1059 writel(0xff, &cpsw->wr_regs->rx_en);
1060 if (cpsw->rx_irq_disabled) {
1061 cpsw->rx_irq_disabled = false;
1062 enable_irq(cpsw->irqs_table[0]);
1069 static inline void soft_reset(const char *module, void __iomem *reg)
1071 unsigned long timeout = jiffies + HZ;
1073 writel_relaxed(1, reg);
1076 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1078 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1081 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1082 struct cpsw_priv *priv)
1084 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1085 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1088 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1090 struct cpsw_common *cpsw = priv->cpsw;
1091 struct cpsw_slave *slave;
1092 u32 shift, mask, val;
1094 val = readl_relaxed(&cpsw->regs->ptype);
1096 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1097 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1104 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1106 struct cpsw_common *cpsw = priv->cpsw;
1107 struct cpsw_slave *slave;
1108 u32 shift, mask, val;
1110 val = readl_relaxed(&cpsw->regs->ptype);
1112 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1113 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1114 mask = (1 << --fifo) << shift;
1115 val = on ? val | mask : val & ~mask;
1117 writel_relaxed(val, &cpsw->regs->ptype);
1120 static void _cpsw_adjust_link(struct cpsw_slave *slave,
1121 struct cpsw_priv *priv, bool *link)
1123 struct phy_device *phy = slave->phy;
1124 u32 mac_control = 0;
1126 struct cpsw_common *cpsw = priv->cpsw;
1131 slave_port = cpsw_get_slave_port(slave->slave_num);
1134 mac_control = cpsw->data.mac_control;
1136 /* enable forwarding */
1137 cpsw_ale_control_set(cpsw->ale, slave_port,
1138 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1140 if (phy->speed == 1000)
1141 mac_control |= BIT(7); /* GIGABITEN */
1143 mac_control |= BIT(0); /* FULLDUPLEXEN */
1145 /* set speed_in input in case RMII mode is used in 100Mbps */
1146 if (phy->speed == 100)
1147 mac_control |= BIT(15);
1148 /* in band mode only works in 10Mbps RGMII mode */
1149 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1150 mac_control |= BIT(18); /* In Band mode */
1153 mac_control |= BIT(3);
1156 mac_control |= BIT(4);
1160 if (priv->shp_cfg_speed &&
1161 priv->shp_cfg_speed != slave->phy->speed &&
1162 !cpsw_shp_is_off(priv))
1164 "Speed was changed, CBS shaper speeds are changed!");
1167 /* disable forwarding */
1168 cpsw_ale_control_set(cpsw->ale, slave_port,
1169 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1172 if (mac_control != slave->mac_control) {
1173 phy_print_status(phy);
1174 writel_relaxed(mac_control, &slave->sliver->mac_control);
1177 slave->mac_control = mac_control;
1180 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1184 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1185 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1186 speed += cpsw->slaves[i].phy->speed;
1191 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1196 /* re-split resources only in case speed was changed */
1197 speed = cpsw_get_common_speed(cpsw);
1198 if (speed == cpsw->speed || !speed)
1201 cpsw->speed = speed;
1203 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1204 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1211 /* cases not dependent on speed */
1212 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1218 static void cpsw_adjust_link(struct net_device *ndev)
1220 struct cpsw_priv *priv = netdev_priv(ndev);
1221 struct cpsw_common *cpsw = priv->cpsw;
1224 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1227 if (cpsw_need_resplit(cpsw))
1228 cpsw_split_res(ndev);
1230 netif_carrier_on(ndev);
1231 if (netif_running(ndev))
1232 netif_tx_wake_all_queues(ndev);
1234 netif_carrier_off(ndev);
1235 netif_tx_stop_all_queues(ndev);
1239 static int cpsw_get_coalesce(struct net_device *ndev,
1240 struct ethtool_coalesce *coal)
1242 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1244 coal->rx_coalesce_usecs = cpsw->coal_intvl;
1248 static int cpsw_set_coalesce(struct net_device *ndev,
1249 struct ethtool_coalesce *coal)
1251 struct cpsw_priv *priv = netdev_priv(ndev);
1253 u32 num_interrupts = 0;
1257 struct cpsw_common *cpsw = priv->cpsw;
1259 coal_intvl = coal->rx_coalesce_usecs;
1261 int_ctrl = readl(&cpsw->wr_regs->int_control);
1262 prescale = cpsw->bus_freq_mhz * 4;
1264 if (!coal->rx_coalesce_usecs) {
1265 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1269 if (coal_intvl < CPSW_CMINTMIN_INTVL)
1270 coal_intvl = CPSW_CMINTMIN_INTVL;
1272 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1273 /* Interrupt pacer works with 4us Pulse, we can
1274 * throttle further by dilating the 4us pulse.
1276 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1278 if (addnl_dvdr > 1) {
1279 prescale *= addnl_dvdr;
1280 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1281 coal_intvl = (CPSW_CMINTMAX_INTVL
1285 coal_intvl = CPSW_CMINTMAX_INTVL;
1289 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1290 writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1291 writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1293 int_ctrl |= CPSW_INTPACEEN;
1294 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1295 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1298 writel(int_ctrl, &cpsw->wr_regs->int_control);
1300 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1301 cpsw->coal_intvl = coal_intvl;
1306 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1308 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1312 return (CPSW_STATS_COMMON_LEN +
1313 (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1320 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1326 ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1327 for (i = 0; i < ch_stats_len; i++) {
1328 line = i % CPSW_STATS_CH_LEN;
1329 snprintf(*p, ETH_GSTRING_LEN,
1330 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1331 (long)(i / CPSW_STATS_CH_LEN),
1332 cpsw_gstrings_ch_stats[line].stat_string);
1333 *p += ETH_GSTRING_LEN;
1337 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1339 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1343 switch (stringset) {
1345 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1346 memcpy(p, cpsw_gstrings_stats[i].stat_string,
1348 p += ETH_GSTRING_LEN;
1351 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1352 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1357 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1358 struct ethtool_stats *stats, u64 *data)
1361 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1362 struct cpdma_chan_stats ch_stats;
1365 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
1366 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1367 data[l] = readl(cpsw->hw_stats +
1368 cpsw_gstrings_stats[l].stat_offset);
1370 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1371 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1372 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1373 p = (u8 *)&ch_stats +
1374 cpsw_gstrings_ch_stats[i].stat_offset;
1375 data[l] = *(u32 *)p;
1379 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1380 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1381 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1382 p = (u8 *)&ch_stats +
1383 cpsw_gstrings_ch_stats[i].stat_offset;
1384 data[l] = *(u32 *)p;
1389 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1390 struct sk_buff *skb,
1391 struct cpdma_chan *txch)
1393 struct cpsw_common *cpsw = priv->cpsw;
1395 skb_tx_timestamp(skb);
1396 return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1397 priv->emac_port + cpsw->data.dual_emac);
1400 static inline void cpsw_add_dual_emac_def_ale_entries(
1401 struct cpsw_priv *priv, struct cpsw_slave *slave,
1404 struct cpsw_common *cpsw = priv->cpsw;
1405 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1407 if (cpsw->version == CPSW_VERSION_1)
1408 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1410 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1411 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1412 port_mask, port_mask, 0);
1413 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1414 port_mask, ALE_VLAN, slave->port_vlan, 0);
1415 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1416 HOST_PORT_NUM, ALE_VLAN |
1417 ALE_SECURE, slave->port_vlan);
1418 cpsw_ale_control_set(cpsw->ale, slave_port,
1419 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1422 static void soft_reset_slave(struct cpsw_slave *slave)
1426 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1427 soft_reset(name, &slave->sliver->soft_reset);
1430 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1433 struct phy_device *phy;
1434 struct cpsw_common *cpsw = priv->cpsw;
1436 soft_reset_slave(slave);
1438 /* setup priority mapping */
1439 writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1441 switch (cpsw->version) {
1442 case CPSW_VERSION_1:
1443 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1444 /* Increase RX FIFO size to 5 for supporting fullduplex
1448 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1449 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1451 case CPSW_VERSION_2:
1452 case CPSW_VERSION_3:
1453 case CPSW_VERSION_4:
1454 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1455 /* Increase RX FIFO size to 5 for supporting fullduplex
1459 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1460 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1464 /* setup max packet size, and mac address */
1465 writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1466 cpsw_set_slave_mac(slave, priv);
1468 slave->mac_control = 0; /* no link yet */
1470 slave_port = cpsw_get_slave_port(slave->slave_num);
1472 if (cpsw->data.dual_emac)
1473 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1475 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1476 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1478 if (slave->data->phy_node) {
1479 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1480 &cpsw_adjust_link, 0, slave->data->phy_if);
1482 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1483 slave->data->phy_node,
1488 phy = phy_connect(priv->ndev, slave->data->phy_id,
1489 &cpsw_adjust_link, slave->data->phy_if);
1492 "phy \"%s\" not found on slave %d, err %ld\n",
1493 slave->data->phy_id, slave->slave_num,
1501 phy_attached_info(slave->phy);
1503 phy_start(slave->phy);
1505 /* Configure GMII_SEL register */
1506 cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
1509 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1511 struct cpsw_common *cpsw = priv->cpsw;
1512 const int vlan = cpsw->data.default_vlan;
1515 int unreg_mcast_mask;
1517 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1520 writel(vlan, &cpsw->host_port_regs->port_vlan);
1522 for (i = 0; i < cpsw->data.slaves; i++)
1523 slave_write(cpsw->slaves + i, vlan, reg);
1525 if (priv->ndev->flags & IFF_ALLMULTI)
1526 unreg_mcast_mask = ALE_ALL_PORTS;
1528 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1530 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1531 ALE_ALL_PORTS, ALE_ALL_PORTS,
1535 static void cpsw_init_host_port(struct cpsw_priv *priv)
1539 struct cpsw_common *cpsw = priv->cpsw;
1541 /* soft reset the controller and initialize ale */
1542 soft_reset("cpsw", &cpsw->regs->soft_reset);
1543 cpsw_ale_start(cpsw->ale);
1545 /* switch to vlan unaware mode */
1546 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1547 CPSW_ALE_VLAN_AWARE);
1548 control_reg = readl(&cpsw->regs->control);
1549 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1550 writel(control_reg, &cpsw->regs->control);
1551 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1552 CPSW_FIFO_NORMAL_MODE;
1553 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1555 /* setup host port priority mapping */
1556 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1557 &cpsw->host_port_regs->cpdma_tx_pri_map);
1558 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1560 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1561 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1563 if (!cpsw->data.dual_emac) {
1564 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1566 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1567 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1571 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1573 struct cpsw_common *cpsw = priv->cpsw;
1574 struct sk_buff *skb;
1578 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1579 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1580 for (i = 0; i < ch_buf_num; i++) {
1581 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1582 cpsw->rx_packet_max,
1585 cpsw_err(priv, ifup, "cannot allocate skb\n");
1589 skb_set_queue_mapping(skb, ch);
1590 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1591 skb->data, skb_tailroom(skb),
1594 cpsw_err(priv, ifup,
1595 "cannot submit skb to channel %d rx, error %d\n",
1600 kmemleak_not_leak(skb);
1603 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1610 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1614 slave_port = cpsw_get_slave_port(slave->slave_num);
1618 phy_stop(slave->phy);
1619 phy_disconnect(slave->phy);
1621 cpsw_ale_control_set(cpsw->ale, slave_port,
1622 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1623 soft_reset_slave(slave);
1626 static int cpsw_tc_to_fifo(int tc, int num_tc)
1628 if (tc == num_tc - 1)
1631 return CPSW_FIFO_SHAPERS_NUM - tc;
1634 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1636 struct cpsw_common *cpsw = priv->cpsw;
1637 u32 val = 0, send_pct, shift;
1638 struct cpsw_slave *slave;
1641 if (bw > priv->shp_cfg_speed * 1000)
1644 /* shaping has to stay enabled for highest fifos linearly
1645 * and fifo bw no more then interface can allow
1647 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1648 send_pct = slave_read(slave, SEND_PERCENT);
1649 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1651 if (i >= fifo || !priv->fifo_bw[i])
1654 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1658 if (!priv->fifo_bw[i] && i > fifo) {
1659 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1663 shift = (i - 1) * 8;
1665 send_pct &= ~(CPSW_PCT_MASK << shift);
1666 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1670 send_pct |= val << shift;
1675 if (priv->fifo_bw[i])
1676 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1682 slave_write(slave, send_pct, SEND_PERCENT);
1683 priv->fifo_bw[fifo] = bw;
1685 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1686 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1690 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1694 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1696 struct cpsw_common *cpsw = priv->cpsw;
1697 struct cpsw_slave *slave;
1698 u32 tx_in_ctl_rg, val;
1701 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1705 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1706 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1707 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1710 cpsw_fifo_shp_on(priv, fifo, bw);
1712 val = slave_read(slave, tx_in_ctl_rg);
1713 if (cpsw_shp_is_off(priv)) {
1714 /* disable FIFOs rate limited queues */
1715 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1717 /* set type of FIFO queues to normal priority mode */
1718 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1720 /* set type of FIFO queues to be rate limited */
1722 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1724 priv->shp_cfg_speed = 0;
1727 /* toggle a FIFO rate limited queue */
1729 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1731 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1732 slave_write(slave, val, tx_in_ctl_rg);
1734 /* FIFO transmit shape enable */
1735 cpsw_fifo_shp_on(priv, fifo, bw);
1742 * shaping for class A should be set first
1744 static int cpsw_set_cbs(struct net_device *ndev,
1745 struct tc_cbs_qopt_offload *qopt)
1747 struct cpsw_priv *priv = netdev_priv(ndev);
1748 struct cpsw_common *cpsw = priv->cpsw;
1749 struct cpsw_slave *slave;
1754 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1756 /* enable channels in backward order, as highest FIFOs must be rate
1757 * limited first and for compliance with CPDMA rate limited channels
1758 * that also used in bacward order. FIFO0 cannot be rate limited.
1760 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1762 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1766 /* do nothing, it's disabled anyway */
1767 if (!qopt->enable && !priv->fifo_bw[fifo])
1770 /* shapers can be set if link speed is known */
1771 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1772 if (slave->phy && slave->phy->link) {
1773 if (priv->shp_cfg_speed &&
1774 priv->shp_cfg_speed != slave->phy->speed)
1775 prev_speed = priv->shp_cfg_speed;
1777 priv->shp_cfg_speed = slave->phy->speed;
1780 if (!priv->shp_cfg_speed) {
1781 dev_err(priv->dev, "Link speed is not known");
1785 ret = pm_runtime_get_sync(cpsw->dev);
1787 pm_runtime_put_noidle(cpsw->dev);
1791 bw = qopt->enable ? qopt->idleslope : 0;
1792 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1794 priv->shp_cfg_speed = prev_speed;
1798 if (bw && prev_speed)
1800 "Speed was changed, CBS shaper speeds are changed!");
1802 pm_runtime_put_sync(cpsw->dev);
1806 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1810 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1811 bw = priv->fifo_bw[fifo];
1815 cpsw_set_fifo_rlimit(priv, fifo, bw);
1819 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1821 struct cpsw_common *cpsw = priv->cpsw;
1822 u32 tx_prio_map = 0;
1826 if (!priv->mqprio_hw)
1829 for (i = 0; i < 8; i++) {
1830 tc = netdev_get_prio_tc_map(priv->ndev, i);
1831 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1832 tx_prio_map |= fifo << (4 * i);
1835 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1836 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1838 slave_write(slave, tx_prio_map, tx_prio_rg);
1841 /* restore resources after port reset */
1842 static void cpsw_restore(struct cpsw_priv *priv)
1844 /* restore MQPRIO offload */
1845 for_each_slave(priv, cpsw_mqprio_resume, priv);
1847 /* restore CBS offload */
1848 for_each_slave(priv, cpsw_cbs_resume, priv);
1851 static int cpsw_ndo_open(struct net_device *ndev)
1853 struct cpsw_priv *priv = netdev_priv(ndev);
1854 struct cpsw_common *cpsw = priv->cpsw;
1858 ret = pm_runtime_get_sync(cpsw->dev);
1860 pm_runtime_put_noidle(cpsw->dev);
1864 netif_carrier_off(ndev);
1866 /* Notify the stack of the actual queue counts. */
1867 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1869 dev_err(priv->dev, "cannot set real number of tx queues\n");
1873 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1875 dev_err(priv->dev, "cannot set real number of rx queues\n");
1879 reg = cpsw->version;
1881 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1882 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1883 CPSW_RTL_VERSION(reg));
1885 /* Initialize host and slave ports */
1886 if (!cpsw->usage_count)
1887 cpsw_init_host_port(priv);
1888 for_each_slave(priv, cpsw_slave_open, priv);
1890 /* Add default VLAN */
1891 if (!cpsw->data.dual_emac)
1892 cpsw_add_default_vlan(priv);
1894 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1895 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1897 /* initialize shared resources for every ndev */
1898 if (!cpsw->usage_count) {
1899 /* disable priority elevation */
1900 writel_relaxed(0, &cpsw->regs->ptype);
1902 /* enable statistics collection only on all ports */
1903 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1905 /* Enable internal fifo flow control */
1906 writel(0x7, &cpsw->regs->flow_control);
1908 napi_enable(&cpsw->napi_rx);
1909 napi_enable(&cpsw->napi_tx);
1911 if (cpsw->tx_irq_disabled) {
1912 cpsw->tx_irq_disabled = false;
1913 enable_irq(cpsw->irqs_table[1]);
1916 if (cpsw->rx_irq_disabled) {
1917 cpsw->rx_irq_disabled = false;
1918 enable_irq(cpsw->irqs_table[0]);
1921 ret = cpsw_fill_rx_channels(priv);
1925 if (cpts_register(cpsw->cpts))
1926 dev_err(priv->dev, "error registering cpts device\n");
1932 /* Enable Interrupt pacing if configured */
1933 if (cpsw->coal_intvl != 0) {
1934 struct ethtool_coalesce coal;
1936 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1937 cpsw_set_coalesce(ndev, &coal);
1940 cpdma_ctlr_start(cpsw->dma);
1941 cpsw_intr_enable(cpsw);
1942 cpsw->usage_count++;
1947 cpdma_ctlr_stop(cpsw->dma);
1948 for_each_slave(priv, cpsw_slave_stop, cpsw);
1949 pm_runtime_put_sync(cpsw->dev);
1950 netif_carrier_off(priv->ndev);
1954 static int cpsw_ndo_stop(struct net_device *ndev)
1956 struct cpsw_priv *priv = netdev_priv(ndev);
1957 struct cpsw_common *cpsw = priv->cpsw;
1959 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1960 netif_tx_stop_all_queues(priv->ndev);
1961 netif_carrier_off(priv->ndev);
1963 if (cpsw->usage_count <= 1) {
1964 napi_disable(&cpsw->napi_rx);
1965 napi_disable(&cpsw->napi_tx);
1966 cpts_unregister(cpsw->cpts);
1967 cpsw_intr_disable(cpsw);
1968 cpdma_ctlr_stop(cpsw->dma);
1969 cpsw_ale_stop(cpsw->ale);
1971 for_each_slave(priv, cpsw_slave_stop, cpsw);
1973 if (cpsw_need_resplit(cpsw))
1974 cpsw_split_res(ndev);
1976 cpsw->usage_count--;
1977 pm_runtime_put_sync(cpsw->dev);
1981 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1982 struct net_device *ndev)
1984 struct cpsw_priv *priv = netdev_priv(ndev);
1985 struct cpsw_common *cpsw = priv->cpsw;
1986 struct cpts *cpts = cpsw->cpts;
1987 struct netdev_queue *txq;
1988 struct cpdma_chan *txch;
1991 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1992 cpsw_err(priv, tx_err, "packet pad failed\n");
1993 ndev->stats.tx_dropped++;
1994 return NET_XMIT_DROP;
1997 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1998 cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
1999 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2001 q_idx = skb_get_queue_mapping(skb);
2002 if (q_idx >= cpsw->tx_ch_num)
2003 q_idx = q_idx % cpsw->tx_ch_num;
2005 txch = cpsw->txv[q_idx].ch;
2006 txq = netdev_get_tx_queue(ndev, q_idx);
2007 ret = cpsw_tx_packet_submit(priv, skb, txch);
2008 if (unlikely(ret != 0)) {
2009 cpsw_err(priv, tx_err, "desc submit failed\n");
2013 /* If there is no more tx desc left free then we need to
2014 * tell the kernel to stop sending us tx frames.
2016 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2017 netif_tx_stop_queue(txq);
2019 /* Barrier, so that stop_queue visible to other cpus */
2020 smp_mb__after_atomic();
2022 if (cpdma_check_free_tx_desc(txch))
2023 netif_tx_wake_queue(txq);
2026 return NETDEV_TX_OK;
2028 ndev->stats.tx_dropped++;
2029 netif_tx_stop_queue(txq);
2031 /* Barrier, so that stop_queue visible to other cpus */
2032 smp_mb__after_atomic();
2034 if (cpdma_check_free_tx_desc(txch))
2035 netif_tx_wake_queue(txq);
2037 return NETDEV_TX_BUSY;
2040 #if IS_ENABLED(CONFIG_TI_CPTS)
2042 static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
2044 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
2047 if (!cpts_is_tx_enabled(cpsw->cpts) &&
2048 !cpts_is_rx_enabled(cpsw->cpts)) {
2049 slave_write(slave, 0, CPSW1_TS_CTL);
2053 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2054 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2056 if (cpts_is_tx_enabled(cpsw->cpts))
2057 ts_en |= CPSW_V1_TS_TX_EN;
2059 if (cpts_is_rx_enabled(cpsw->cpts))
2060 ts_en |= CPSW_V1_TS_RX_EN;
2062 slave_write(slave, ts_en, CPSW1_TS_CTL);
2063 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2066 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2068 struct cpsw_slave *slave;
2069 struct cpsw_common *cpsw = priv->cpsw;
2072 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2074 ctrl = slave_read(slave, CPSW2_CONTROL);
2075 switch (cpsw->version) {
2076 case CPSW_VERSION_2:
2077 ctrl &= ~CTRL_V2_ALL_TS_MASK;
2079 if (cpts_is_tx_enabled(cpsw->cpts))
2080 ctrl |= CTRL_V2_TX_TS_BITS;
2082 if (cpts_is_rx_enabled(cpsw->cpts))
2083 ctrl |= CTRL_V2_RX_TS_BITS;
2085 case CPSW_VERSION_3:
2087 ctrl &= ~CTRL_V3_ALL_TS_MASK;
2089 if (cpts_is_tx_enabled(cpsw->cpts))
2090 ctrl |= CTRL_V3_TX_TS_BITS;
2092 if (cpts_is_rx_enabled(cpsw->cpts))
2093 ctrl |= CTRL_V3_RX_TS_BITS;
2097 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2099 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2100 slave_write(slave, ctrl, CPSW2_CONTROL);
2101 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
2104 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2106 struct cpsw_priv *priv = netdev_priv(dev);
2107 struct hwtstamp_config cfg;
2108 struct cpsw_common *cpsw = priv->cpsw;
2109 struct cpts *cpts = cpsw->cpts;
2111 if (cpsw->version != CPSW_VERSION_1 &&
2112 cpsw->version != CPSW_VERSION_2 &&
2113 cpsw->version != CPSW_VERSION_3)
2116 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2119 /* reserved for future extensions */
2123 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
2126 switch (cfg.rx_filter) {
2127 case HWTSTAMP_FILTER_NONE:
2128 cpts_rx_enable(cpts, 0);
2130 case HWTSTAMP_FILTER_ALL:
2131 case HWTSTAMP_FILTER_NTP_ALL:
2133 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2134 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2135 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2136 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2137 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2139 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2140 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2141 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2142 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2143 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2144 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2145 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2146 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2147 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2148 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2149 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2155 cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
2157 switch (cpsw->version) {
2158 case CPSW_VERSION_1:
2159 cpsw_hwtstamp_v1(cpsw);
2161 case CPSW_VERSION_2:
2162 case CPSW_VERSION_3:
2163 cpsw_hwtstamp_v2(priv);
2169 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2172 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2174 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2175 struct cpts *cpts = cpsw->cpts;
2176 struct hwtstamp_config cfg;
2178 if (cpsw->version != CPSW_VERSION_1 &&
2179 cpsw->version != CPSW_VERSION_2 &&
2180 cpsw->version != CPSW_VERSION_3)
2184 cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2185 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2186 cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2187 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2189 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2192 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2197 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2201 #endif /*CONFIG_TI_CPTS*/
2203 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2205 struct cpsw_priv *priv = netdev_priv(dev);
2206 struct cpsw_common *cpsw = priv->cpsw;
2207 int slave_no = cpsw_slave_index(cpsw, priv);
2209 if (!netif_running(dev))
2214 return cpsw_hwtstamp_set(dev, req);
2216 return cpsw_hwtstamp_get(dev, req);
2219 if (!cpsw->slaves[slave_no].phy)
2221 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2224 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2226 struct cpsw_priv *priv = netdev_priv(ndev);
2227 struct cpsw_common *cpsw = priv->cpsw;
2230 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2231 ndev->stats.tx_errors++;
2232 cpsw_intr_disable(cpsw);
2233 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2234 cpdma_chan_stop(cpsw->txv[ch].ch);
2235 cpdma_chan_start(cpsw->txv[ch].ch);
2238 cpsw_intr_enable(cpsw);
2239 netif_trans_update(ndev);
2240 netif_tx_wake_all_queues(ndev);
2243 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2245 struct cpsw_priv *priv = netdev_priv(ndev);
2246 struct sockaddr *addr = (struct sockaddr *)p;
2247 struct cpsw_common *cpsw = priv->cpsw;
2252 if (!is_valid_ether_addr(addr->sa_data))
2253 return -EADDRNOTAVAIL;
2255 ret = pm_runtime_get_sync(cpsw->dev);
2257 pm_runtime_put_noidle(cpsw->dev);
2261 if (cpsw->data.dual_emac) {
2262 vid = cpsw->slaves[priv->emac_port].port_vlan;
2266 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2268 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2271 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2272 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2273 for_each_slave(priv, cpsw_set_slave_mac, priv);
2275 pm_runtime_put(cpsw->dev);
2280 #ifdef CONFIG_NET_POLL_CONTROLLER
2281 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2283 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2285 cpsw_intr_disable(cpsw);
2286 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2287 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2288 cpsw_intr_enable(cpsw);
2292 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2296 int unreg_mcast_mask = 0;
2298 struct cpsw_common *cpsw = priv->cpsw;
2300 if (cpsw->data.dual_emac) {
2301 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2303 if (priv->ndev->flags & IFF_ALLMULTI)
2304 unreg_mcast_mask = port_mask;
2306 port_mask = ALE_ALL_PORTS;
2308 if (priv->ndev->flags & IFF_ALLMULTI)
2309 unreg_mcast_mask = ALE_ALL_PORTS;
2311 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2314 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2319 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2320 HOST_PORT_NUM, ALE_VLAN, vid);
2324 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2325 port_mask, ALE_VLAN, vid, 0);
2327 goto clean_vlan_ucast;
2331 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2332 HOST_PORT_NUM, ALE_VLAN, vid);
2334 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2338 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2339 __be16 proto, u16 vid)
2341 struct cpsw_priv *priv = netdev_priv(ndev);
2342 struct cpsw_common *cpsw = priv->cpsw;
2345 if (vid == cpsw->data.default_vlan)
2348 ret = pm_runtime_get_sync(cpsw->dev);
2350 pm_runtime_put_noidle(cpsw->dev);
2354 if (cpsw->data.dual_emac) {
2355 /* In dual EMAC, reserved VLAN id should not be used for
2356 * creating VLAN interfaces as this can break the dual
2357 * EMAC port separation
2361 for (i = 0; i < cpsw->data.slaves; i++) {
2362 if (vid == cpsw->slaves[i].port_vlan) {
2369 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2370 ret = cpsw_add_vlan_ale_entry(priv, vid);
2372 pm_runtime_put(cpsw->dev);
2376 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2377 __be16 proto, u16 vid)
2379 struct cpsw_priv *priv = netdev_priv(ndev);
2380 struct cpsw_common *cpsw = priv->cpsw;
2383 if (vid == cpsw->data.default_vlan)
2386 ret = pm_runtime_get_sync(cpsw->dev);
2388 pm_runtime_put_noidle(cpsw->dev);
2392 if (cpsw->data.dual_emac) {
2395 for (i = 0; i < cpsw->data.slaves; i++) {
2396 if (vid == cpsw->slaves[i].port_vlan)
2401 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2402 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2403 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2404 HOST_PORT_NUM, ALE_VLAN, vid);
2405 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2408 pm_runtime_put(cpsw->dev);
2412 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2414 struct cpsw_priv *priv = netdev_priv(ndev);
2415 struct cpsw_common *cpsw = priv->cpsw;
2416 struct cpsw_slave *slave;
2421 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2422 if (ch_rate == rate)
2425 ch_rate = rate * 1000;
2426 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2427 if ((ch_rate < min_rate && ch_rate)) {
2428 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2433 if (rate > cpsw->speed) {
2434 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2438 ret = pm_runtime_get_sync(cpsw->dev);
2440 pm_runtime_put_noidle(cpsw->dev);
2444 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2445 pm_runtime_put(cpsw->dev);
2450 /* update rates for slaves tx queues */
2451 for (i = 0; i < cpsw->data.slaves; i++) {
2452 slave = &cpsw->slaves[i];
2456 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2459 cpsw_split_res(ndev);
2463 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2465 struct tc_mqprio_qopt_offload *mqprio = type_data;
2466 struct cpsw_priv *priv = netdev_priv(ndev);
2467 struct cpsw_common *cpsw = priv->cpsw;
2468 int fifo, num_tc, count, offset;
2469 struct cpsw_slave *slave;
2470 u32 tx_prio_map = 0;
2473 num_tc = mqprio->qopt.num_tc;
2474 if (num_tc > CPSW_TC_NUM)
2477 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2480 ret = pm_runtime_get_sync(cpsw->dev);
2482 pm_runtime_put_noidle(cpsw->dev);
2487 for (i = 0; i < 8; i++) {
2488 tc = mqprio->qopt.prio_tc_map[i];
2489 fifo = cpsw_tc_to_fifo(tc, num_tc);
2490 tx_prio_map |= fifo << (4 * i);
2493 netdev_set_num_tc(ndev, num_tc);
2494 for (i = 0; i < num_tc; i++) {
2495 count = mqprio->qopt.count[i];
2496 offset = mqprio->qopt.offset[i];
2497 netdev_set_tc_queue(ndev, i, count, offset);
2501 if (!mqprio->qopt.hw) {
2502 /* restore default configuration */
2503 netdev_reset_tc(ndev);
2504 tx_prio_map = TX_PRIORITY_MAPPING;
2507 priv->mqprio_hw = mqprio->qopt.hw;
2509 offset = cpsw->version == CPSW_VERSION_1 ?
2510 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2512 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2513 slave_write(slave, tx_prio_map, offset);
2515 pm_runtime_put_sync(cpsw->dev);
2520 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2524 case TC_SETUP_QDISC_CBS:
2525 return cpsw_set_cbs(ndev, type_data);
2527 case TC_SETUP_QDISC_MQPRIO:
2528 return cpsw_set_mqprio(ndev, type_data);
2535 static const struct net_device_ops cpsw_netdev_ops = {
2536 .ndo_open = cpsw_ndo_open,
2537 .ndo_stop = cpsw_ndo_stop,
2538 .ndo_start_xmit = cpsw_ndo_start_xmit,
2539 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
2540 .ndo_do_ioctl = cpsw_ndo_ioctl,
2541 .ndo_validate_addr = eth_validate_addr,
2542 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
2543 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
2544 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
2545 #ifdef CONFIG_NET_POLL_CONTROLLER
2546 .ndo_poll_controller = cpsw_ndo_poll_controller,
2548 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2549 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
2550 .ndo_setup_tc = cpsw_ndo_setup_tc,
2553 static int cpsw_get_regs_len(struct net_device *ndev)
2555 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2557 return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2560 static void cpsw_get_regs(struct net_device *ndev,
2561 struct ethtool_regs *regs, void *p)
2564 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2566 /* update CPSW IP version */
2567 regs->version = cpsw->version;
2569 cpsw_ale_dump(cpsw->ale, reg);
2572 static void cpsw_get_drvinfo(struct net_device *ndev,
2573 struct ethtool_drvinfo *info)
2575 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2576 struct platform_device *pdev = to_platform_device(cpsw->dev);
2578 strlcpy(info->driver, "cpsw", sizeof(info->driver));
2579 strlcpy(info->version, "1.0", sizeof(info->version));
2580 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2583 static u32 cpsw_get_msglevel(struct net_device *ndev)
2585 struct cpsw_priv *priv = netdev_priv(ndev);
2586 return priv->msg_enable;
2589 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2591 struct cpsw_priv *priv = netdev_priv(ndev);
2592 priv->msg_enable = value;
2595 #if IS_ENABLED(CONFIG_TI_CPTS)
2596 static int cpsw_get_ts_info(struct net_device *ndev,
2597 struct ethtool_ts_info *info)
2599 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2601 info->so_timestamping =
2602 SOF_TIMESTAMPING_TX_HARDWARE |
2603 SOF_TIMESTAMPING_TX_SOFTWARE |
2604 SOF_TIMESTAMPING_RX_HARDWARE |
2605 SOF_TIMESTAMPING_RX_SOFTWARE |
2606 SOF_TIMESTAMPING_SOFTWARE |
2607 SOF_TIMESTAMPING_RAW_HARDWARE;
2608 info->phc_index = cpsw->cpts->phc_index;
2610 (1 << HWTSTAMP_TX_OFF) |
2611 (1 << HWTSTAMP_TX_ON);
2613 (1 << HWTSTAMP_FILTER_NONE) |
2614 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2615 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2619 static int cpsw_get_ts_info(struct net_device *ndev,
2620 struct ethtool_ts_info *info)
2622 info->so_timestamping =
2623 SOF_TIMESTAMPING_TX_SOFTWARE |
2624 SOF_TIMESTAMPING_RX_SOFTWARE |
2625 SOF_TIMESTAMPING_SOFTWARE;
2626 info->phc_index = -1;
2628 info->rx_filters = 0;
2633 static int cpsw_get_link_ksettings(struct net_device *ndev,
2634 struct ethtool_link_ksettings *ecmd)
2636 struct cpsw_priv *priv = netdev_priv(ndev);
2637 struct cpsw_common *cpsw = priv->cpsw;
2638 int slave_no = cpsw_slave_index(cpsw, priv);
2640 if (!cpsw->slaves[slave_no].phy)
2643 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2647 static int cpsw_set_link_ksettings(struct net_device *ndev,
2648 const struct ethtool_link_ksettings *ecmd)
2650 struct cpsw_priv *priv = netdev_priv(ndev);
2651 struct cpsw_common *cpsw = priv->cpsw;
2652 int slave_no = cpsw_slave_index(cpsw, priv);
2654 if (cpsw->slaves[slave_no].phy)
2655 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2661 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2663 struct cpsw_priv *priv = netdev_priv(ndev);
2664 struct cpsw_common *cpsw = priv->cpsw;
2665 int slave_no = cpsw_slave_index(cpsw, priv);
2670 if (cpsw->slaves[slave_no].phy)
2671 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2674 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2676 struct cpsw_priv *priv = netdev_priv(ndev);
2677 struct cpsw_common *cpsw = priv->cpsw;
2678 int slave_no = cpsw_slave_index(cpsw, priv);
2680 if (cpsw->slaves[slave_no].phy)
2681 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2686 static void cpsw_get_pauseparam(struct net_device *ndev,
2687 struct ethtool_pauseparam *pause)
2689 struct cpsw_priv *priv = netdev_priv(ndev);
2691 pause->autoneg = AUTONEG_DISABLE;
2692 pause->rx_pause = priv->rx_pause ? true : false;
2693 pause->tx_pause = priv->tx_pause ? true : false;
2696 static int cpsw_set_pauseparam(struct net_device *ndev,
2697 struct ethtool_pauseparam *pause)
2699 struct cpsw_priv *priv = netdev_priv(ndev);
2702 priv->rx_pause = pause->rx_pause ? true : false;
2703 priv->tx_pause = pause->tx_pause ? true : false;
2705 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2709 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2711 struct cpsw_priv *priv = netdev_priv(ndev);
2712 struct cpsw_common *cpsw = priv->cpsw;
2715 ret = pm_runtime_get_sync(cpsw->dev);
2717 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2718 pm_runtime_put_noidle(cpsw->dev);
2724 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2726 struct cpsw_priv *priv = netdev_priv(ndev);
2729 ret = pm_runtime_put(priv->cpsw->dev);
2731 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2734 static void cpsw_get_channels(struct net_device *ndev,
2735 struct ethtool_channels *ch)
2737 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2739 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2740 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2741 ch->max_combined = 0;
2743 ch->other_count = 0;
2744 ch->rx_count = cpsw->rx_ch_num;
2745 ch->tx_count = cpsw->tx_ch_num;
2746 ch->combined_count = 0;
2749 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2750 struct ethtool_channels *ch)
2752 if (cpsw->quirk_irq) {
2753 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2757 if (ch->combined_count)
2760 /* verify we have at least one channel in each direction */
2761 if (!ch->rx_count || !ch->tx_count)
2764 if (ch->rx_count > cpsw->data.channels ||
2765 ch->tx_count > cpsw->data.channels)
2771 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2773 struct cpsw_common *cpsw = priv->cpsw;
2774 void (*handler)(void *, int, int);
2775 struct netdev_queue *queue;
2776 struct cpsw_vector *vec;
2780 ch = &cpsw->rx_ch_num;
2782 handler = cpsw_rx_handler;
2784 ch = &cpsw->tx_ch_num;
2786 handler = cpsw_tx_handler;
2789 while (*ch < ch_num) {
2790 vch = rx ? *ch : 7 - *ch;
2791 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2792 queue = netdev_get_tx_queue(priv->ndev, *ch);
2793 queue->tx_maxrate = 0;
2795 if (IS_ERR(vec[*ch].ch))
2796 return PTR_ERR(vec[*ch].ch);
2801 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2802 (rx ? "rx" : "tx"));
2806 while (*ch > ch_num) {
2809 ret = cpdma_chan_destroy(vec[*ch].ch);
2813 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2814 (rx ? "rx" : "tx"));
2820 static int cpsw_update_channels(struct cpsw_priv *priv,
2821 struct ethtool_channels *ch)
2825 ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2829 ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2836 static void cpsw_suspend_data_pass(struct net_device *ndev)
2838 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2839 struct cpsw_slave *slave;
2842 /* Disable NAPI scheduling */
2843 cpsw_intr_disable(cpsw);
2845 /* Stop all transmit queues for every network device.
2846 * Disable re-using rx descriptors with dormant_on.
2848 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2849 if (!(slave->ndev && netif_running(slave->ndev)))
2852 netif_tx_stop_all_queues(slave->ndev);
2853 netif_dormant_on(slave->ndev);
2856 /* Handle rest of tx packets and stop cpdma channels */
2857 cpdma_ctlr_stop(cpsw->dma);
2860 static int cpsw_resume_data_pass(struct net_device *ndev)
2862 struct cpsw_priv *priv = netdev_priv(ndev);
2863 struct cpsw_common *cpsw = priv->cpsw;
2864 struct cpsw_slave *slave;
2867 /* Allow rx packets handling */
2868 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2869 if (slave->ndev && netif_running(slave->ndev))
2870 netif_dormant_off(slave->ndev);
2872 /* After this receive is started */
2873 if (cpsw->usage_count) {
2874 ret = cpsw_fill_rx_channels(priv);
2878 cpdma_ctlr_start(cpsw->dma);
2879 cpsw_intr_enable(cpsw);
2882 /* Resume transmit for every affected interface */
2883 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2884 if (slave->ndev && netif_running(slave->ndev))
2885 netif_tx_start_all_queues(slave->ndev);
2890 static int cpsw_set_channels(struct net_device *ndev,
2891 struct ethtool_channels *chs)
2893 struct cpsw_priv *priv = netdev_priv(ndev);
2894 struct cpsw_common *cpsw = priv->cpsw;
2895 struct cpsw_slave *slave;
2898 ret = cpsw_check_ch_settings(cpsw, chs);
2902 cpsw_suspend_data_pass(ndev);
2903 ret = cpsw_update_channels(priv, chs);
2907 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2908 if (!(slave->ndev && netif_running(slave->ndev)))
2911 /* Inform stack about new count of queues */
2912 ret = netif_set_real_num_tx_queues(slave->ndev,
2915 dev_err(priv->dev, "cannot set real number of tx queues\n");
2919 ret = netif_set_real_num_rx_queues(slave->ndev,
2922 dev_err(priv->dev, "cannot set real number of rx queues\n");
2927 if (cpsw->usage_count)
2928 cpsw_split_res(ndev);
2930 ret = cpsw_resume_data_pass(ndev);
2934 dev_err(priv->dev, "cannot update channels number, closing device\n");
2939 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2941 struct cpsw_priv *priv = netdev_priv(ndev);
2942 struct cpsw_common *cpsw = priv->cpsw;
2943 int slave_no = cpsw_slave_index(cpsw, priv);
2945 if (cpsw->slaves[slave_no].phy)
2946 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
2951 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2953 struct cpsw_priv *priv = netdev_priv(ndev);
2954 struct cpsw_common *cpsw = priv->cpsw;
2955 int slave_no = cpsw_slave_index(cpsw, priv);
2957 if (cpsw->slaves[slave_no].phy)
2958 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
2963 static int cpsw_nway_reset(struct net_device *ndev)
2965 struct cpsw_priv *priv = netdev_priv(ndev);
2966 struct cpsw_common *cpsw = priv->cpsw;
2967 int slave_no = cpsw_slave_index(cpsw, priv);
2969 if (cpsw->slaves[slave_no].phy)
2970 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
2975 static void cpsw_get_ringparam(struct net_device *ndev,
2976 struct ethtool_ringparam *ering)
2978 struct cpsw_priv *priv = netdev_priv(ndev);
2979 struct cpsw_common *cpsw = priv->cpsw;
2982 ering->tx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
2983 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
2984 ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
2985 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
2988 static int cpsw_set_ringparam(struct net_device *ndev,
2989 struct ethtool_ringparam *ering)
2991 struct cpsw_priv *priv = netdev_priv(ndev);
2992 struct cpsw_common *cpsw = priv->cpsw;
2995 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
2997 if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
2998 ering->rx_pending < CPSW_MAX_QUEUES ||
2999 ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
3002 if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3005 cpsw_suspend_data_pass(ndev);
3007 cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3009 if (cpsw->usage_count)
3010 cpdma_chan_split_pool(cpsw->dma);
3012 ret = cpsw_resume_data_pass(ndev);
3016 dev_err(&ndev->dev, "cannot set ring params, closing device\n");
3021 static const struct ethtool_ops cpsw_ethtool_ops = {
3022 .get_drvinfo = cpsw_get_drvinfo,
3023 .get_msglevel = cpsw_get_msglevel,
3024 .set_msglevel = cpsw_set_msglevel,
3025 .get_link = ethtool_op_get_link,
3026 .get_ts_info = cpsw_get_ts_info,
3027 .get_coalesce = cpsw_get_coalesce,
3028 .set_coalesce = cpsw_set_coalesce,
3029 .get_sset_count = cpsw_get_sset_count,
3030 .get_strings = cpsw_get_strings,
3031 .get_ethtool_stats = cpsw_get_ethtool_stats,
3032 .get_pauseparam = cpsw_get_pauseparam,
3033 .set_pauseparam = cpsw_set_pauseparam,
3034 .get_wol = cpsw_get_wol,
3035 .set_wol = cpsw_set_wol,
3036 .get_regs_len = cpsw_get_regs_len,
3037 .get_regs = cpsw_get_regs,
3038 .begin = cpsw_ethtool_op_begin,
3039 .complete = cpsw_ethtool_op_complete,
3040 .get_channels = cpsw_get_channels,
3041 .set_channels = cpsw_set_channels,
3042 .get_link_ksettings = cpsw_get_link_ksettings,
3043 .set_link_ksettings = cpsw_set_link_ksettings,
3044 .get_eee = cpsw_get_eee,
3045 .set_eee = cpsw_set_eee,
3046 .nway_reset = cpsw_nway_reset,
3047 .get_ringparam = cpsw_get_ringparam,
3048 .set_ringparam = cpsw_set_ringparam,
3051 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
3052 u32 slave_reg_ofs, u32 sliver_reg_ofs)
3054 void __iomem *regs = cpsw->regs;
3055 int slave_num = slave->slave_num;
3056 struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
3059 slave->regs = regs + slave_reg_ofs;
3060 slave->sliver = regs + sliver_reg_ofs;
3061 slave->port_vlan = data->dual_emac_res_vlan;
3064 static int cpsw_probe_dt(struct cpsw_platform_data *data,
3065 struct platform_device *pdev)
3067 struct device_node *node = pdev->dev.of_node;
3068 struct device_node *slave_node;
3075 if (of_property_read_u32(node, "slaves", &prop)) {
3076 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
3079 data->slaves = prop;
3081 if (of_property_read_u32(node, "active_slave", &prop)) {
3082 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
3085 data->active_slave = prop;
3087 data->slave_data = devm_kcalloc(&pdev->dev,
3089 sizeof(struct cpsw_slave_data),
3091 if (!data->slave_data)
3094 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
3095 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
3098 data->channels = prop;
3100 if (of_property_read_u32(node, "ale_entries", &prop)) {
3101 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
3104 data->ale_entries = prop;
3106 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
3107 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
3110 data->bd_ram_size = prop;
3112 if (of_property_read_u32(node, "mac_control", &prop)) {
3113 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
3116 data->mac_control = prop;
3118 if (of_property_read_bool(node, "dual_emac"))
3119 data->dual_emac = 1;
3122 * Populate all the child nodes here...
3124 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3125 /* We do not want to force this, as in some cases may not have child */
3127 dev_warn(&pdev->dev, "Doesn't have any child node\n");
3129 for_each_available_child_of_node(node, slave_node) {
3130 struct cpsw_slave_data *slave_data = data->slave_data + i;
3131 const void *mac_addr = NULL;
3135 /* This is no slave child node, continue */
3136 if (strcmp(slave_node->name, "slave"))
3139 slave_data->phy_node = of_parse_phandle(slave_node,
3141 parp = of_get_property(slave_node, "phy_id", &lenp);
3142 if (slave_data->phy_node) {
3144 "slave[%d] using phy-handle=\"%pOF\"\n",
3145 i, slave_data->phy_node);
3146 } else if (of_phy_is_fixed_link(slave_node)) {
3147 /* In the case of a fixed PHY, the DT node associated
3148 * to the PHY is the Ethernet MAC DT node.
3150 ret = of_phy_register_fixed_link(slave_node);
3152 if (ret != -EPROBE_DEFER)
3153 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
3156 slave_data->phy_node = of_node_get(slave_node);
3159 struct device_node *mdio_node;
3160 struct platform_device *mdio;
3162 if (lenp != (sizeof(__be32) * 2)) {
3163 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3166 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3167 phyid = be32_to_cpup(parp+1);
3168 mdio = of_find_device_by_node(mdio_node);
3169 of_node_put(mdio_node);
3171 dev_err(&pdev->dev, "Missing mdio platform device\n");
3174 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3175 PHY_ID_FMT, mdio->name, phyid);
3176 put_device(&mdio->dev);
3179 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3183 slave_data->phy_if = of_get_phy_mode(slave_node);
3184 if (slave_data->phy_if < 0) {
3185 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3187 return slave_data->phy_if;
3191 mac_addr = of_get_mac_address(slave_node);
3193 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
3195 ret = ti_cm_get_macid(&pdev->dev, i,
3196 slave_data->mac_addr);
3200 if (data->dual_emac) {
3201 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
3203 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
3204 slave_data->dual_emac_res_vlan = i+1;
3205 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3206 slave_data->dual_emac_res_vlan, i);
3208 slave_data->dual_emac_res_vlan = prop;
3213 if (i == data->slaves)
3220 static void cpsw_remove_dt(struct platform_device *pdev)
3222 struct net_device *ndev = platform_get_drvdata(pdev);
3223 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3224 struct cpsw_platform_data *data = &cpsw->data;
3225 struct device_node *node = pdev->dev.of_node;
3226 struct device_node *slave_node;
3229 for_each_available_child_of_node(node, slave_node) {
3230 struct cpsw_slave_data *slave_data = &data->slave_data[i];
3232 if (strcmp(slave_node->name, "slave"))
3235 if (of_phy_is_fixed_link(slave_node))
3236 of_phy_deregister_fixed_link(slave_node);
3238 of_node_put(slave_data->phy_node);
3241 if (i == data->slaves)
3245 of_platform_depopulate(&pdev->dev);
3248 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
3250 struct cpsw_common *cpsw = priv->cpsw;
3251 struct cpsw_platform_data *data = &cpsw->data;
3252 struct net_device *ndev;
3253 struct cpsw_priv *priv_sl2;
3256 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3258 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
3262 priv_sl2 = netdev_priv(ndev);
3263 priv_sl2->cpsw = cpsw;
3264 priv_sl2->ndev = ndev;
3265 priv_sl2->dev = &ndev->dev;
3266 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3268 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3269 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3271 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3272 priv_sl2->mac_addr);
3274 eth_random_addr(priv_sl2->mac_addr);
3275 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3276 priv_sl2->mac_addr);
3278 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3280 priv_sl2->emac_port = 1;
3281 cpsw->slaves[1].ndev = ndev;
3282 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3284 ndev->netdev_ops = &cpsw_netdev_ops;
3285 ndev->ethtool_ops = &cpsw_ethtool_ops;
3287 /* register the network device */
3288 SET_NETDEV_DEV(ndev, cpsw->dev);
3289 ret = register_netdev(ndev);
3291 dev_err(cpsw->dev, "cpsw: error registering net device\n");
3299 static const struct of_device_id cpsw_of_mtable[] = {
3300 { .compatible = "ti,cpsw"},
3301 { .compatible = "ti,am335x-cpsw"},
3302 { .compatible = "ti,am4372-cpsw"},
3303 { .compatible = "ti,dra7-cpsw"},
3306 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3308 static const struct soc_device_attribute cpsw_soc_devices[] = {
3309 { .family = "AM33xx", .revision = "ES1.0"},
3313 static int cpsw_probe(struct platform_device *pdev)
3316 struct cpsw_platform_data *data;
3317 struct net_device *ndev;
3318 struct cpsw_priv *priv;
3319 struct cpdma_params dma_params;
3320 struct cpsw_ale_params ale_params;
3321 void __iomem *ss_regs;
3322 void __iomem *cpts_regs;
3323 struct resource *res, *ss_res;
3324 struct gpio_descs *mode;
3325 u32 slave_offset, sliver_offset, slave_size;
3326 const struct soc_device_attribute *soc;
3327 struct cpsw_common *cpsw;
3331 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
3335 cpsw->dev = &pdev->dev;
3337 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3339 dev_err(&pdev->dev, "error allocating net_device\n");
3343 platform_set_drvdata(pdev, ndev);
3344 priv = netdev_priv(ndev);
3347 priv->dev = &ndev->dev;
3348 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3349 cpsw->rx_packet_max = max(rx_packet_max, 128);
3351 mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
3353 ret = PTR_ERR(mode);
3354 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
3355 goto clean_ndev_ret;
3359 * This may be required here for child devices.
3361 pm_runtime_enable(&pdev->dev);
3363 /* Select default pin state */
3364 pinctrl_pm_select_default_state(&pdev->dev);
3366 /* Need to enable clocks with runtime PM api to access module
3369 ret = pm_runtime_get_sync(&pdev->dev);
3371 pm_runtime_put_noidle(&pdev->dev);
3372 goto clean_runtime_disable_ret;
3375 ret = cpsw_probe_dt(&cpsw->data, pdev);
3380 cpsw->rx_ch_num = 1;
3381 cpsw->tx_ch_num = 1;
3383 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3384 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
3385 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
3387 eth_random_addr(priv->mac_addr);
3388 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
3391 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3393 cpsw->slaves = devm_kcalloc(&pdev->dev,
3394 data->slaves, sizeof(struct cpsw_slave),
3396 if (!cpsw->slaves) {
3400 for (i = 0; i < data->slaves; i++)
3401 cpsw->slaves[i].slave_num = i;
3403 cpsw->slaves[0].ndev = ndev;
3404 priv->emac_port = 0;
3406 clk = devm_clk_get(&pdev->dev, "fck");
3408 dev_err(priv->dev, "fck is not found\n");
3412 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
3414 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3415 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
3416 if (IS_ERR(ss_regs)) {
3417 ret = PTR_ERR(ss_regs);
3420 cpsw->regs = ss_regs;
3422 cpsw->version = readl(&cpsw->regs->id_ver);
3424 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3425 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
3426 if (IS_ERR(cpsw->wr_regs)) {
3427 ret = PTR_ERR(cpsw->wr_regs);
3431 memset(&dma_params, 0, sizeof(dma_params));
3432 memset(&ale_params, 0, sizeof(ale_params));
3434 switch (cpsw->version) {
3435 case CPSW_VERSION_1:
3436 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
3437 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
3438 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
3439 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
3440 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
3441 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
3442 slave_offset = CPSW1_SLAVE_OFFSET;
3443 slave_size = CPSW1_SLAVE_SIZE;
3444 sliver_offset = CPSW1_SLIVER_OFFSET;
3445 dma_params.desc_mem_phys = 0;
3447 case CPSW_VERSION_2:
3448 case CPSW_VERSION_3:
3449 case CPSW_VERSION_4:
3450 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
3451 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
3452 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
3453 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
3454 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
3455 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
3456 slave_offset = CPSW2_SLAVE_OFFSET;
3457 slave_size = CPSW2_SLAVE_SIZE;
3458 sliver_offset = CPSW2_SLIVER_OFFSET;
3459 dma_params.desc_mem_phys =
3460 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
3463 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
3467 for (i = 0; i < cpsw->data.slaves; i++) {
3468 struct cpsw_slave *slave = &cpsw->slaves[i];
3470 cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
3471 slave_offset += slave_size;
3472 sliver_offset += SLIVER_SIZE;
3475 dma_params.dev = &pdev->dev;
3476 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
3477 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
3478 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
3479 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
3480 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
3482 dma_params.num_chan = data->channels;
3483 dma_params.has_soft_reset = true;
3484 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
3485 dma_params.desc_mem_size = data->bd_ram_size;
3486 dma_params.desc_align = 16;
3487 dma_params.has_ext_regs = true;
3488 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
3489 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
3490 dma_params.descs_pool_size = descs_pool_size;
3492 cpsw->dma = cpdma_ctlr_create(&dma_params);
3494 dev_err(priv->dev, "error initializing dma\n");
3499 soc = soc_device_match(cpsw_soc_devices);
3501 cpsw->quirk_irq = 1;
3503 ch = cpsw->quirk_irq ? 0 : 7;
3504 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
3505 if (IS_ERR(cpsw->txv[0].ch)) {
3506 dev_err(priv->dev, "error initializing tx dma channel\n");
3507 ret = PTR_ERR(cpsw->txv[0].ch);
3511 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3512 if (IS_ERR(cpsw->rxv[0].ch)) {
3513 dev_err(priv->dev, "error initializing rx dma channel\n");
3514 ret = PTR_ERR(cpsw->rxv[0].ch);
3518 ale_params.dev = &pdev->dev;
3519 ale_params.ale_ageout = ale_ageout;
3520 ale_params.ale_entries = data->ale_entries;
3521 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
3523 cpsw->ale = cpsw_ale_create(&ale_params);
3525 dev_err(priv->dev, "error initializing ale engine\n");
3530 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3531 if (IS_ERR(cpsw->cpts)) {
3532 ret = PTR_ERR(cpsw->cpts);
3536 ndev->irq = platform_get_irq(pdev, 1);
3537 if (ndev->irq < 0) {
3538 dev_err(priv->dev, "error getting irq resource\n");
3543 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3545 ndev->netdev_ops = &cpsw_netdev_ops;
3546 ndev->ethtool_ops = &cpsw_ethtool_ops;
3547 netif_napi_add(ndev, &cpsw->napi_rx,
3548 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3550 netif_tx_napi_add(ndev, &cpsw->napi_tx,
3551 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3553 cpsw_split_res(ndev);
3555 /* register the network device */
3556 SET_NETDEV_DEV(ndev, &pdev->dev);
3557 ret = register_netdev(ndev);
3559 dev_err(priv->dev, "error registering net device\n");
3564 if (cpsw->data.dual_emac) {
3565 ret = cpsw_probe_dual_emac(priv);
3567 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3568 goto clean_unregister_netdev_ret;
3572 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3573 * MISC IRQs which are always kept disabled with this driver so
3574 * we will not request them.
3576 * If anyone wants to implement support for those, make sure to
3577 * first request and append them to irqs_table array.
3581 irq = platform_get_irq(pdev, 1);
3587 cpsw->irqs_table[0] = irq;
3588 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
3589 0, dev_name(&pdev->dev), cpsw);
3591 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3596 irq = platform_get_irq(pdev, 2);
3602 cpsw->irqs_table[1] = irq;
3603 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
3604 0, dev_name(&pdev->dev), cpsw);
3606 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3610 cpsw_notice(priv, probe,
3611 "initialized device (regs %pa, irq %d, pool size %d)\n",
3612 &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3614 pm_runtime_put(&pdev->dev);
3618 clean_unregister_netdev_ret:
3619 unregister_netdev(ndev);
3621 cpdma_ctlr_destroy(cpsw->dma);
3623 cpsw_remove_dt(pdev);
3624 pm_runtime_put_sync(&pdev->dev);
3625 clean_runtime_disable_ret:
3626 pm_runtime_disable(&pdev->dev);
3628 free_netdev(priv->ndev);
3632 static int cpsw_remove(struct platform_device *pdev)
3634 struct net_device *ndev = platform_get_drvdata(pdev);
3635 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3638 ret = pm_runtime_get_sync(&pdev->dev);
3640 pm_runtime_put_noidle(&pdev->dev);
3644 if (cpsw->data.dual_emac)
3645 unregister_netdev(cpsw->slaves[1].ndev);
3646 unregister_netdev(ndev);
3648 cpts_release(cpsw->cpts);
3649 cpdma_ctlr_destroy(cpsw->dma);
3650 cpsw_remove_dt(pdev);
3651 pm_runtime_put_sync(&pdev->dev);
3652 pm_runtime_disable(&pdev->dev);
3653 if (cpsw->data.dual_emac)
3654 free_netdev(cpsw->slaves[1].ndev);
3659 #ifdef CONFIG_PM_SLEEP
3660 static int cpsw_suspend(struct device *dev)
3662 struct platform_device *pdev = to_platform_device(dev);
3663 struct net_device *ndev = platform_get_drvdata(pdev);
3664 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3666 if (cpsw->data.dual_emac) {
3669 for (i = 0; i < cpsw->data.slaves; i++) {
3670 if (netif_running(cpsw->slaves[i].ndev))
3671 cpsw_ndo_stop(cpsw->slaves[i].ndev);
3674 if (netif_running(ndev))
3675 cpsw_ndo_stop(ndev);
3678 /* Select sleep pin state */
3679 pinctrl_pm_select_sleep_state(dev);
3684 static int cpsw_resume(struct device *dev)
3686 struct platform_device *pdev = to_platform_device(dev);
3687 struct net_device *ndev = platform_get_drvdata(pdev);
3688 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3690 /* Select default pin state */
3691 pinctrl_pm_select_default_state(dev);
3693 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3695 if (cpsw->data.dual_emac) {
3698 for (i = 0; i < cpsw->data.slaves; i++) {
3699 if (netif_running(cpsw->slaves[i].ndev))
3700 cpsw_ndo_open(cpsw->slaves[i].ndev);
3703 if (netif_running(ndev))
3704 cpsw_ndo_open(ndev);
3712 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3714 static struct platform_driver cpsw_driver = {
3718 .of_match_table = cpsw_of_mtable,
3720 .probe = cpsw_probe,
3721 .remove = cpsw_remove,
3724 module_platform_driver(cpsw_driver);
3726 MODULE_LICENSE("GPL");
3727 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3728 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3729 MODULE_DESCRIPTION("TI CPSW Ethernet driver");