GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static const struct net_device_ops stmmac_netdev_ops;
118 static int stmmac_init_fs(struct net_device *dev);
119 static void stmmac_exit_fs(struct net_device *dev);
120 #endif
121
122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
124 /**
125  * stmmac_verify_args - verify the driver parameters.
126  * Description: it checks the driver parameters and set a default in case of
127  * errors.
128  */
129 static void stmmac_verify_args(void)
130 {
131         if (unlikely(watchdog < 0))
132                 watchdog = TX_TIMEO;
133         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
134                 buf_sz = DEFAULT_BUFSIZE;
135         if (unlikely(flow_ctrl > 1))
136                 flow_ctrl = FLOW_AUTO;
137         else if (likely(flow_ctrl < 0))
138                 flow_ctrl = FLOW_OFF;
139         if (unlikely((pause < 0) || (pause > 0xffff)))
140                 pause = PAUSE_TIME;
141         if (eee_timer < 0)
142                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
143 }
144
145 /**
146  * stmmac_disable_all_queues - Disable all queues
147  * @priv: driver private structure
148  */
149 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
150 {
151         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
152         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
153         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
154         u32 queue;
155
156         for (queue = 0; queue < maxq; queue++) {
157                 struct stmmac_channel *ch = &priv->channel[queue];
158
159                 napi_disable(&ch->napi);
160         }
161 }
162
163 /**
164  * stmmac_enable_all_queues - Enable all queues
165  * @priv: driver private structure
166  */
167 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
168 {
169         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
170         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
171         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
172         u32 queue;
173
174         for (queue = 0; queue < maxq; queue++) {
175                 struct stmmac_channel *ch = &priv->channel[queue];
176
177                 napi_enable(&ch->napi);
178         }
179 }
180
181 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
182 {
183         if (!test_bit(STMMAC_DOWN, &priv->state) &&
184             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
185                 queue_work(priv->wq, &priv->service_task);
186 }
187
188 static void stmmac_global_err(struct stmmac_priv *priv)
189 {
190         netif_carrier_off(priv->dev);
191         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
192         stmmac_service_event_schedule(priv);
193 }
194
195 /**
196  * stmmac_clk_csr_set - dynamically set the MDC clock
197  * @priv: driver private structure
198  * Description: this is to dynamically set the MDC clock according to the csr
199  * clock input.
200  * Note:
201  *      If a specific clk_csr value is passed from the platform
202  *      this means that the CSR Clock Range selection cannot be
203  *      changed at run-time and it is fixed (as reported in the driver
204  *      documentation). Viceversa the driver will try to set the MDC
205  *      clock dynamically according to the actual clock input.
206  */
207 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
208 {
209         u32 clk_rate;
210
211         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
212
213         /* Platform provided default clk_csr would be assumed valid
214          * for all other cases except for the below mentioned ones.
215          * For values higher than the IEEE 802.3 specified frequency
216          * we can not estimate the proper divider as it is not known
217          * the frequency of clk_csr_i. So we do not change the default
218          * divider.
219          */
220         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
221                 if (clk_rate < CSR_F_35M)
222                         priv->clk_csr = STMMAC_CSR_20_35M;
223                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
224                         priv->clk_csr = STMMAC_CSR_35_60M;
225                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
226                         priv->clk_csr = STMMAC_CSR_60_100M;
227                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
228                         priv->clk_csr = STMMAC_CSR_100_150M;
229                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
230                         priv->clk_csr = STMMAC_CSR_150_250M;
231                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
232                         priv->clk_csr = STMMAC_CSR_250_300M;
233         }
234
235         if (priv->plat->has_sun8i) {
236                 if (clk_rate > 160000000)
237                         priv->clk_csr = 0x03;
238                 else if (clk_rate > 80000000)
239                         priv->clk_csr = 0x02;
240                 else if (clk_rate > 40000000)
241                         priv->clk_csr = 0x01;
242                 else
243                         priv->clk_csr = 0;
244         }
245
246         if (priv->plat->has_xgmac) {
247                 if (clk_rate > 400000000)
248                         priv->clk_csr = 0x5;
249                 else if (clk_rate > 350000000)
250                         priv->clk_csr = 0x4;
251                 else if (clk_rate > 300000000)
252                         priv->clk_csr = 0x3;
253                 else if (clk_rate > 250000000)
254                         priv->clk_csr = 0x2;
255                 else if (clk_rate > 150000000)
256                         priv->clk_csr = 0x1;
257                 else
258                         priv->clk_csr = 0x0;
259         }
260 }
261
262 static void print_pkt(unsigned char *buf, int len)
263 {
264         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
265         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
266 }
267
268 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
269 {
270         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
271         u32 avail;
272
273         if (tx_q->dirty_tx > tx_q->cur_tx)
274                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
275         else
276                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
277
278         return avail;
279 }
280
281 /**
282  * stmmac_rx_dirty - Get RX queue dirty
283  * @priv: driver private structure
284  * @queue: RX queue index
285  */
286 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
287 {
288         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
289         u32 dirty;
290
291         if (rx_q->dirty_rx <= rx_q->cur_rx)
292                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
293         else
294                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
295
296         return dirty;
297 }
298
299 /**
300  * stmmac_hw_fix_mac_speed - callback for speed selection
301  * @priv: driver private structure
302  * Description: on some platforms (e.g. ST), some HW system configuration
303  * registers have to be set according to the link speed negotiated.
304  */
305 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
306 {
307         struct net_device *ndev = priv->dev;
308         struct phy_device *phydev = ndev->phydev;
309
310         if (likely(priv->plat->fix_mac_speed))
311                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
312 }
313
314 /**
315  * stmmac_enable_eee_mode - check and enter in LPI mode
316  * @priv: driver private structure
317  * Description: this function is to verify and enter in LPI mode in case of
318  * EEE.
319  */
320 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
321 {
322         u32 tx_cnt = priv->plat->tx_queues_to_use;
323         u32 queue;
324
325         /* check if all TX queues have the work finished */
326         for (queue = 0; queue < tx_cnt; queue++) {
327                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
328
329                 if (tx_q->dirty_tx != tx_q->cur_tx)
330                         return; /* still unfinished work */
331         }
332
333         /* Check and enter in LPI mode */
334         if (!priv->tx_path_in_lpi_mode)
335                 stmmac_set_eee_mode(priv, priv->hw,
336                                 priv->plat->en_tx_lpi_clockgating);
337 }
338
339 /**
340  * stmmac_disable_eee_mode - disable and exit from LPI mode
341  * @priv: driver private structure
342  * Description: this function is to exit and disable EEE in case of
343  * LPI state is true. This is called by the xmit.
344  */
345 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
346 {
347         stmmac_reset_eee_mode(priv, priv->hw);
348         del_timer_sync(&priv->eee_ctrl_timer);
349         priv->tx_path_in_lpi_mode = false;
350 }
351
352 /**
353  * stmmac_eee_ctrl_timer - EEE TX SW timer.
354  * @arg : data hook
355  * Description:
356  *  if there is no data transfer and if we are not in LPI state,
357  *  then MAC Transmitter can be moved to LPI state.
358  */
359 static void stmmac_eee_ctrl_timer(struct timer_list *t)
360 {
361         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
362
363         stmmac_enable_eee_mode(priv);
364         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
365 }
366
367 /**
368  * stmmac_eee_init - init EEE
369  * @priv: driver private structure
370  * Description:
371  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
372  *  can also manage EEE, this function enable the LPI state and start related
373  *  timer.
374  */
375 bool stmmac_eee_init(struct stmmac_priv *priv)
376 {
377         struct net_device *ndev = priv->dev;
378         int interface = priv->plat->interface;
379         bool ret = false;
380
381         if ((interface != PHY_INTERFACE_MODE_MII) &&
382             (interface != PHY_INTERFACE_MODE_GMII) &&
383             !phy_interface_mode_is_rgmii(interface))
384                 goto out;
385
386         /* Using PCS we cannot dial with the phy registers at this stage
387          * so we do not support extra feature like EEE.
388          */
389         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
390             (priv->hw->pcs == STMMAC_PCS_TBI) ||
391             (priv->hw->pcs == STMMAC_PCS_RTBI))
392                 goto out;
393
394         /* MAC core supports the EEE feature. */
395         if (priv->dma_cap.eee) {
396                 int tx_lpi_timer = priv->tx_lpi_timer;
397
398                 /* Check if the PHY supports EEE */
399                 if (phy_init_eee(ndev->phydev, 1)) {
400                         /* To manage at run-time if the EEE cannot be supported
401                          * anymore (for example because the lp caps have been
402                          * changed).
403                          * In that case the driver disable own timers.
404                          */
405                         mutex_lock(&priv->lock);
406                         if (priv->eee_active) {
407                                 netdev_dbg(priv->dev, "disable EEE\n");
408                                 del_timer_sync(&priv->eee_ctrl_timer);
409                                 stmmac_set_eee_timer(priv, priv->hw, 0,
410                                                 tx_lpi_timer);
411                         }
412                         priv->eee_active = 0;
413                         mutex_unlock(&priv->lock);
414                         goto out;
415                 }
416                 /* Activate the EEE and start timers */
417                 mutex_lock(&priv->lock);
418                 if (!priv->eee_active) {
419                         priv->eee_active = 1;
420                         timer_setup(&priv->eee_ctrl_timer,
421                                     stmmac_eee_ctrl_timer, 0);
422                         mod_timer(&priv->eee_ctrl_timer,
423                                   STMMAC_LPI_T(eee_timer));
424
425                         stmmac_set_eee_timer(priv, priv->hw,
426                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
427                 }
428                 /* Set HW EEE according to the speed */
429                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
430
431                 ret = true;
432                 mutex_unlock(&priv->lock);
433
434                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
435         }
436 out:
437         return ret;
438 }
439
440 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
441  * @priv: driver private structure
442  * @p : descriptor pointer
443  * @skb : the socket buffer
444  * Description :
445  * This function will read timestamp from the descriptor & pass it to stack.
446  * and also perform some sanity checks.
447  */
448 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
449                                    struct dma_desc *p, struct sk_buff *skb)
450 {
451         struct skb_shared_hwtstamps shhwtstamp;
452         u64 ns = 0;
453
454         if (!priv->hwts_tx_en)
455                 return;
456
457         /* exit if skb doesn't support hw tstamp */
458         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
459                 return;
460
461         /* check tx tstamp status */
462         if (stmmac_get_tx_timestamp_status(priv, p)) {
463                 /* get the valid tstamp */
464                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
465
466                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
467                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
468
469                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
470                 /* pass tstamp to stack */
471                 skb_tstamp_tx(skb, &shhwtstamp);
472         }
473
474         return;
475 }
476
477 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
478  * @priv: driver private structure
479  * @p : descriptor pointer
480  * @np : next descriptor pointer
481  * @skb : the socket buffer
482  * Description :
483  * This function will read received packet's timestamp from the descriptor
484  * and pass it to stack. It also perform some sanity checks.
485  */
486 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
487                                    struct dma_desc *np, struct sk_buff *skb)
488 {
489         struct skb_shared_hwtstamps *shhwtstamp = NULL;
490         struct dma_desc *desc = p;
491         u64 ns = 0;
492
493         if (!priv->hwts_rx_en)
494                 return;
495         /* For GMAC4, the valid timestamp is from CTX next desc. */
496         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
497                 desc = np;
498
499         /* Check if timestamp is available */
500         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
501                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
502                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
503                 shhwtstamp = skb_hwtstamps(skb);
504                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
505                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
506         } else  {
507                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
508         }
509 }
510
511 /**
512  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
513  *  @dev: device pointer.
514  *  @ifr: An IOCTL specific structure, that can contain a pointer to
515  *  a proprietary structure used to pass information to the driver.
516  *  Description:
517  *  This function configures the MAC to enable/disable both outgoing(TX)
518  *  and incoming(RX) packets time stamping based on user input.
519  *  Return Value:
520  *  0 on success and an appropriate -ve integer on failure.
521  */
522 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
523 {
524         struct stmmac_priv *priv = netdev_priv(dev);
525         struct hwtstamp_config config;
526         struct timespec64 now;
527         u64 temp = 0;
528         u32 ptp_v2 = 0;
529         u32 tstamp_all = 0;
530         u32 ptp_over_ipv4_udp = 0;
531         u32 ptp_over_ipv6_udp = 0;
532         u32 ptp_over_ethernet = 0;
533         u32 snap_type_sel = 0;
534         u32 ts_master_en = 0;
535         u32 ts_event_en = 0;
536         u32 sec_inc = 0;
537         u32 value = 0;
538         bool xmac;
539
540         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
541
542         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
543                 netdev_alert(priv->dev, "No support for HW time stamping\n");
544                 priv->hwts_tx_en = 0;
545                 priv->hwts_rx_en = 0;
546
547                 return -EOPNOTSUPP;
548         }
549
550         if (copy_from_user(&config, ifr->ifr_data,
551                            sizeof(struct hwtstamp_config)))
552                 return -EFAULT;
553
554         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
555                    __func__, config.flags, config.tx_type, config.rx_filter);
556
557         /* reserved for future extensions */
558         if (config.flags)
559                 return -EINVAL;
560
561         if (config.tx_type != HWTSTAMP_TX_OFF &&
562             config.tx_type != HWTSTAMP_TX_ON)
563                 return -ERANGE;
564
565         if (priv->adv_ts) {
566                 switch (config.rx_filter) {
567                 case HWTSTAMP_FILTER_NONE:
568                         /* time stamp no incoming packet at all */
569                         config.rx_filter = HWTSTAMP_FILTER_NONE;
570                         break;
571
572                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
573                         /* PTP v1, UDP, any kind of event packet */
574                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
575                         /* take time stamp for all event messages */
576                         if (xmac)
577                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
578                         else
579                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
580
581                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
582                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
583                         break;
584
585                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
586                         /* PTP v1, UDP, Sync packet */
587                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
588                         /* take time stamp for SYNC messages only */
589                         ts_event_en = PTP_TCR_TSEVNTENA;
590
591                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
592                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
593                         break;
594
595                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
596                         /* PTP v1, UDP, Delay_req packet */
597                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
598                         /* take time stamp for Delay_Req messages only */
599                         ts_master_en = PTP_TCR_TSMSTRENA;
600                         ts_event_en = PTP_TCR_TSEVNTENA;
601
602                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
603                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
604                         break;
605
606                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
607                         /* PTP v2, UDP, any kind of event packet */
608                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
609                         ptp_v2 = PTP_TCR_TSVER2ENA;
610                         /* take time stamp for all event messages */
611                         if (xmac)
612                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
613                         else
614                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
615
616                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
617                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
618                         break;
619
620                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
621                         /* PTP v2, UDP, Sync packet */
622                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
623                         ptp_v2 = PTP_TCR_TSVER2ENA;
624                         /* take time stamp for SYNC messages only */
625                         ts_event_en = PTP_TCR_TSEVNTENA;
626
627                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629                         break;
630
631                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
632                         /* PTP v2, UDP, Delay_req packet */
633                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
634                         ptp_v2 = PTP_TCR_TSVER2ENA;
635                         /* take time stamp for Delay_Req messages only */
636                         ts_master_en = PTP_TCR_TSMSTRENA;
637                         ts_event_en = PTP_TCR_TSEVNTENA;
638
639                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
640                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
641                         break;
642
643                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
644                         /* PTP v2/802.AS1 any layer, any kind of event packet */
645                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
646                         ptp_v2 = PTP_TCR_TSVER2ENA;
647                         /* take time stamp for all event messages */
648                         if (xmac)
649                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
650                         else
651                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
652
653                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655                         ptp_over_ethernet = PTP_TCR_TSIPENA;
656                         break;
657
658                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
659                         /* PTP v2/802.AS1, any layer, Sync packet */
660                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
661                         ptp_v2 = PTP_TCR_TSVER2ENA;
662                         /* take time stamp for SYNC messages only */
663                         ts_event_en = PTP_TCR_TSEVNTENA;
664
665                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
667                         ptp_over_ethernet = PTP_TCR_TSIPENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
671                         /* PTP v2/802.AS1, any layer, Delay_req packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
673                         ptp_v2 = PTP_TCR_TSVER2ENA;
674                         /* take time stamp for Delay_Req messages only */
675                         ts_master_en = PTP_TCR_TSMSTRENA;
676                         ts_event_en = PTP_TCR_TSEVNTENA;
677
678                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
679                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
680                         ptp_over_ethernet = PTP_TCR_TSIPENA;
681                         break;
682
683                 case HWTSTAMP_FILTER_NTP_ALL:
684                 case HWTSTAMP_FILTER_ALL:
685                         /* time stamp any incoming packet */
686                         config.rx_filter = HWTSTAMP_FILTER_ALL;
687                         tstamp_all = PTP_TCR_TSENALL;
688                         break;
689
690                 default:
691                         return -ERANGE;
692                 }
693         } else {
694                 switch (config.rx_filter) {
695                 case HWTSTAMP_FILTER_NONE:
696                         config.rx_filter = HWTSTAMP_FILTER_NONE;
697                         break;
698                 default:
699                         /* PTP v1, UDP, any kind of event packet */
700                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
701                         break;
702                 }
703         }
704         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
705         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
706
707         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
708                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
709         else {
710                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
711                          tstamp_all | ptp_v2 | ptp_over_ethernet |
712                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
713                          ts_master_en | snap_type_sel);
714                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
715
716                 /* program Sub Second Increment reg */
717                 stmmac_config_sub_second_increment(priv,
718                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
719                                 xmac, &sec_inc);
720                 temp = div_u64(1000000000ULL, sec_inc);
721
722                 /* Store sub second increment and flags for later use */
723                 priv->sub_second_inc = sec_inc;
724                 priv->systime_flags = value;
725
726                 /* calculate default added value:
727                  * formula is :
728                  * addend = (2^32)/freq_div_ratio;
729                  * where, freq_div_ratio = 1e9ns/sec_inc
730                  */
731                 temp = (u64)(temp << 32);
732                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
733                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
734
735                 /* initialize system time */
736                 ktime_get_real_ts64(&now);
737
738                 /* lower 32 bits of tv_sec are safe until y2106 */
739                 stmmac_init_systime(priv, priv->ptpaddr,
740                                 (u32)now.tv_sec, now.tv_nsec);
741         }
742
743         return copy_to_user(ifr->ifr_data, &config,
744                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
745 }
746
747 /**
748  * stmmac_init_ptp - init PTP
749  * @priv: driver private structure
750  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
751  * This is done by looking at the HW cap. register.
752  * This function also registers the ptp driver.
753  */
754 static int stmmac_init_ptp(struct stmmac_priv *priv)
755 {
756         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
757
758         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759                 return -EOPNOTSUPP;
760
761         priv->adv_ts = 0;
762         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
763         if (xmac && priv->dma_cap.atime_stamp)
764                 priv->adv_ts = 1;
765         /* Dwmac 3.x core with extend_desc can support adv_ts */
766         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767                 priv->adv_ts = 1;
768
769         if (priv->dma_cap.time_stamp)
770                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771
772         if (priv->adv_ts)
773                 netdev_info(priv->dev,
774                             "IEEE 1588-2008 Advanced Timestamp supported\n");
775
776         priv->hwts_tx_en = 0;
777         priv->hwts_rx_en = 0;
778
779         stmmac_ptp_register(priv);
780
781         return 0;
782 }
783
784 static void stmmac_release_ptp(struct stmmac_priv *priv)
785 {
786         if (priv->plat->clk_ptp_ref)
787                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
788         stmmac_ptp_unregister(priv);
789 }
790
791 /**
792  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
793  *  @priv: driver private structure
794  *  Description: It is used for configuring the flow control in all queues
795  */
796 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797 {
798         u32 tx_cnt = priv->plat->tx_queues_to_use;
799
800         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801                         priv->pause, tx_cnt);
802 }
803
804 /**
805  * stmmac_adjust_link - adjusts the link parameters
806  * @dev: net device structure
807  * Description: this is the helper called by the physical abstraction layer
808  * drivers to communicate the phy link status. According the speed and duplex
809  * this driver can invoke registered glue-logic as well.
810  * It also invoke the eee initialization because it could happen when switch
811  * on different networks (that are eee capable).
812  */
813 static void stmmac_adjust_link(struct net_device *dev)
814 {
815         struct stmmac_priv *priv = netdev_priv(dev);
816         struct phy_device *phydev = dev->phydev;
817         bool new_state = false;
818
819         if (!phydev)
820                 return;
821
822         mutex_lock(&priv->lock);
823
824         if (phydev->link) {
825                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826
827                 /* Now we make sure that we can be in full duplex mode.
828                  * If not, we operate in half-duplex mode. */
829                 if (phydev->duplex != priv->oldduplex) {
830                         new_state = true;
831                         if (!phydev->duplex)
832                                 ctrl &= ~priv->hw->link.duplex;
833                         else
834                                 ctrl |= priv->hw->link.duplex;
835                         priv->oldduplex = phydev->duplex;
836                 }
837                 /* Flow Control operation */
838                 if (phydev->pause)
839                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
840
841                 if (phydev->speed != priv->speed) {
842                         new_state = true;
843                         ctrl &= ~priv->hw->link.speed_mask;
844                         switch (phydev->speed) {
845                         case SPEED_1000:
846                                 ctrl |= priv->hw->link.speed1000;
847                                 break;
848                         case SPEED_100:
849                                 ctrl |= priv->hw->link.speed100;
850                                 break;
851                         case SPEED_10:
852                                 ctrl |= priv->hw->link.speed10;
853                                 break;
854                         default:
855                                 netif_warn(priv, link, priv->dev,
856                                            "broken speed: %d\n", phydev->speed);
857                                 phydev->speed = SPEED_UNKNOWN;
858                                 break;
859                         }
860                         if (phydev->speed != SPEED_UNKNOWN)
861                                 stmmac_hw_fix_mac_speed(priv);
862                         priv->speed = phydev->speed;
863                 }
864
865                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866
867                 if (!priv->oldlink) {
868                         new_state = true;
869                         priv->oldlink = true;
870                 }
871         } else if (priv->oldlink) {
872                 new_state = true;
873                 priv->oldlink = false;
874                 priv->speed = SPEED_UNKNOWN;
875                 priv->oldduplex = DUPLEX_UNKNOWN;
876         }
877
878         if (new_state && netif_msg_link(priv))
879                 phy_print_status(phydev);
880
881         mutex_unlock(&priv->lock);
882
883         if (phydev->is_pseudo_fixed_link)
884                 /* Stop PHY layer to call the hook to adjust the link in case
885                  * of a switch is attached to the stmmac driver.
886                  */
887                 phydev->irq = PHY_IGNORE_INTERRUPT;
888         else
889                 /* At this stage, init the EEE if supported.
890                  * Never called in case of fixed_link.
891                  */
892                 priv->eee_enabled = stmmac_eee_init(priv);
893 }
894
895 /**
896  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
897  * @priv: driver private structure
898  * Description: this is to verify if the HW supports the PCS.
899  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
900  * configured for the TBI, RTBI, or SGMII PHY interface.
901  */
902 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903 {
904         int interface = priv->plat->interface;
905
906         if (priv->dma_cap.pcs) {
907                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912                         priv->hw->pcs = STMMAC_PCS_RGMII;
913                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
914                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915                         priv->hw->pcs = STMMAC_PCS_SGMII;
916                 }
917         }
918 }
919
920 /**
921  * stmmac_init_phy - PHY initialization
922  * @dev: net device structure
923  * Description: it initializes the driver's PHY state, and attaches the PHY
924  * to the mac driver.
925  *  Return value:
926  *  0 on success
927  */
928 static int stmmac_init_phy(struct net_device *dev)
929 {
930         struct stmmac_priv *priv = netdev_priv(dev);
931         u32 tx_cnt = priv->plat->tx_queues_to_use;
932         struct phy_device *phydev;
933         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
934         char bus_id[MII_BUS_ID_SIZE];
935         int interface = priv->plat->interface;
936         int max_speed = priv->plat->max_speed;
937         priv->oldlink = false;
938         priv->speed = SPEED_UNKNOWN;
939         priv->oldduplex = DUPLEX_UNKNOWN;
940
941         if (priv->plat->phy_node) {
942                 phydev = of_phy_connect(dev, priv->plat->phy_node,
943                                         &stmmac_adjust_link, 0, interface);
944         } else {
945                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
946                          priv->plat->bus_id);
947
948                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
949                          priv->plat->phy_addr);
950                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
951                            phy_id_fmt);
952
953                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
954                                      interface);
955         }
956
957         if (IS_ERR_OR_NULL(phydev)) {
958                 netdev_err(priv->dev, "Could not attach to PHY\n");
959                 if (!phydev)
960                         return -ENODEV;
961
962                 return PTR_ERR(phydev);
963         }
964
965         /* Stop Advertising 1000BASE Capability if interface is not GMII */
966         if ((interface == PHY_INTERFACE_MODE_MII) ||
967             (interface == PHY_INTERFACE_MODE_RMII) ||
968                 (max_speed < 1000 && max_speed > 0))
969                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
970                                          SUPPORTED_1000baseT_Full);
971
972         /*
973          * Half-duplex mode not supported with multiqueue
974          * half-duplex can only works with single queue
975          */
976         if (tx_cnt > 1)
977                 phydev->supported &= ~(SUPPORTED_1000baseT_Half |
978                                        SUPPORTED_100baseT_Half |
979                                        SUPPORTED_10baseT_Half);
980
981         /*
982          * Broken HW is sometimes missing the pull-up resistor on the
983          * MDIO line, which results in reads to non-existent devices returning
984          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
985          * device as well.
986          * Note: phydev->phy_id is the result of reading the UID PHY registers.
987          */
988         if (!priv->plat->phy_node && phydev->phy_id == 0) {
989                 phy_disconnect(phydev);
990                 return -ENODEV;
991         }
992
993         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
994          * subsequent PHY polling, make sure we force a link transition if
995          * we have a UP/DOWN/UP transition
996          */
997         if (phydev->is_pseudo_fixed_link)
998                 phydev->irq = PHY_POLL;
999
1000         phy_attached_info(phydev);
1001         return 0;
1002 }
1003
1004 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1005 {
1006         u32 rx_cnt = priv->plat->rx_queues_to_use;
1007         void *head_rx;
1008         u32 queue;
1009
1010         /* Display RX rings */
1011         for (queue = 0; queue < rx_cnt; queue++) {
1012                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1013
1014                 pr_info("\tRX Queue %u rings\n", queue);
1015
1016                 if (priv->extend_desc)
1017                         head_rx = (void *)rx_q->dma_erx;
1018                 else
1019                         head_rx = (void *)rx_q->dma_rx;
1020
1021                 /* Display RX ring */
1022                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1023         }
1024 }
1025
1026 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1027 {
1028         u32 tx_cnt = priv->plat->tx_queues_to_use;
1029         void *head_tx;
1030         u32 queue;
1031
1032         /* Display TX rings */
1033         for (queue = 0; queue < tx_cnt; queue++) {
1034                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1035
1036                 pr_info("\tTX Queue %d rings\n", queue);
1037
1038                 if (priv->extend_desc)
1039                         head_tx = (void *)tx_q->dma_etx;
1040                 else
1041                         head_tx = (void *)tx_q->dma_tx;
1042
1043                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1044         }
1045 }
1046
1047 static void stmmac_display_rings(struct stmmac_priv *priv)
1048 {
1049         /* Display RX ring */
1050         stmmac_display_rx_rings(priv);
1051
1052         /* Display TX ring */
1053         stmmac_display_tx_rings(priv);
1054 }
1055
1056 static int stmmac_set_bfsize(int mtu, int bufsize)
1057 {
1058         int ret = bufsize;
1059
1060         if (mtu >= BUF_SIZE_8KiB)
1061                 ret = BUF_SIZE_16KiB;
1062         else if (mtu >= BUF_SIZE_4KiB)
1063                 ret = BUF_SIZE_8KiB;
1064         else if (mtu >= BUF_SIZE_2KiB)
1065                 ret = BUF_SIZE_4KiB;
1066         else if (mtu > DEFAULT_BUFSIZE)
1067                 ret = BUF_SIZE_2KiB;
1068         else
1069                 ret = DEFAULT_BUFSIZE;
1070
1071         return ret;
1072 }
1073
1074 /**
1075  * stmmac_clear_rx_descriptors - clear RX descriptors
1076  * @priv: driver private structure
1077  * @queue: RX queue index
1078  * Description: this function is called to clear the RX descriptors
1079  * in case of both basic and extended descriptors are used.
1080  */
1081 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1082 {
1083         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1084         int i;
1085
1086         /* Clear the RX descriptors */
1087         for (i = 0; i < DMA_RX_SIZE; i++)
1088                 if (priv->extend_desc)
1089                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1090                                         priv->use_riwt, priv->mode,
1091                                         (i == DMA_RX_SIZE - 1),
1092                                         priv->dma_buf_sz);
1093                 else
1094                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1095                                         priv->use_riwt, priv->mode,
1096                                         (i == DMA_RX_SIZE - 1),
1097                                         priv->dma_buf_sz);
1098 }
1099
1100 /**
1101  * stmmac_clear_tx_descriptors - clear tx descriptors
1102  * @priv: driver private structure
1103  * @queue: TX queue index.
1104  * Description: this function is called to clear the TX descriptors
1105  * in case of both basic and extended descriptors are used.
1106  */
1107 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1108 {
1109         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1110         int i;
1111
1112         /* Clear the TX descriptors */
1113         for (i = 0; i < DMA_TX_SIZE; i++)
1114                 if (priv->extend_desc)
1115                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1116                                         priv->mode, (i == DMA_TX_SIZE - 1));
1117                 else
1118                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1119                                         priv->mode, (i == DMA_TX_SIZE - 1));
1120 }
1121
1122 /**
1123  * stmmac_clear_descriptors - clear descriptors
1124  * @priv: driver private structure
1125  * Description: this function is called to clear the TX and RX descriptors
1126  * in case of both basic and extended descriptors are used.
1127  */
1128 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1129 {
1130         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1131         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1132         u32 queue;
1133
1134         /* Clear the RX descriptors */
1135         for (queue = 0; queue < rx_queue_cnt; queue++)
1136                 stmmac_clear_rx_descriptors(priv, queue);
1137
1138         /* Clear the TX descriptors */
1139         for (queue = 0; queue < tx_queue_cnt; queue++)
1140                 stmmac_clear_tx_descriptors(priv, queue);
1141 }
1142
1143 /**
1144  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1145  * @priv: driver private structure
1146  * @p: descriptor pointer
1147  * @i: descriptor index
1148  * @flags: gfp flag
1149  * @queue: RX queue index
1150  * Description: this function is called to allocate a receive buffer, perform
1151  * the DMA mapping and init the descriptor.
1152  */
1153 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1154                                   int i, gfp_t flags, u32 queue)
1155 {
1156         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1157         struct sk_buff *skb;
1158
1159         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1160         if (!skb) {
1161                 netdev_err(priv->dev,
1162                            "%s: Rx init fails; skb is NULL\n", __func__);
1163                 return -ENOMEM;
1164         }
1165         rx_q->rx_skbuff[i] = skb;
1166         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1167                                                 priv->dma_buf_sz,
1168                                                 DMA_FROM_DEVICE);
1169         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1170                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1171                 dev_kfree_skb_any(skb);
1172                 return -EINVAL;
1173         }
1174
1175         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1176
1177         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1178                 stmmac_init_desc3(priv, p);
1179
1180         return 0;
1181 }
1182
1183 /**
1184  * stmmac_free_rx_buffer - free RX dma buffers
1185  * @priv: private structure
1186  * @queue: RX queue index
1187  * @i: buffer index.
1188  */
1189 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1190 {
1191         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1192
1193         if (rx_q->rx_skbuff[i]) {
1194                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1195                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1196                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1197         }
1198         rx_q->rx_skbuff[i] = NULL;
1199 }
1200
1201 /**
1202  * stmmac_free_tx_buffer - free RX dma buffers
1203  * @priv: private structure
1204  * @queue: RX queue index
1205  * @i: buffer index.
1206  */
1207 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1208 {
1209         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1210
1211         if (tx_q->tx_skbuff_dma[i].buf) {
1212                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1213                         dma_unmap_page(priv->device,
1214                                        tx_q->tx_skbuff_dma[i].buf,
1215                                        tx_q->tx_skbuff_dma[i].len,
1216                                        DMA_TO_DEVICE);
1217                 else
1218                         dma_unmap_single(priv->device,
1219                                          tx_q->tx_skbuff_dma[i].buf,
1220                                          tx_q->tx_skbuff_dma[i].len,
1221                                          DMA_TO_DEVICE);
1222         }
1223
1224         if (tx_q->tx_skbuff[i]) {
1225                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1226                 tx_q->tx_skbuff[i] = NULL;
1227                 tx_q->tx_skbuff_dma[i].buf = 0;
1228                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1229         }
1230 }
1231
1232 /**
1233  * init_dma_rx_desc_rings - init the RX descriptor rings
1234  * @dev: net device structure
1235  * @flags: gfp flag.
1236  * Description: this function initializes the DMA RX descriptors
1237  * and allocates the socket buffers. It supports the chained and ring
1238  * modes.
1239  */
1240 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1241 {
1242         struct stmmac_priv *priv = netdev_priv(dev);
1243         u32 rx_count = priv->plat->rx_queues_to_use;
1244         int ret = -ENOMEM;
1245         int bfsize = 0;
1246         int queue;
1247         int i;
1248
1249         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1250         if (bfsize < 0)
1251                 bfsize = 0;
1252
1253         if (bfsize < BUF_SIZE_16KiB)
1254                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1255
1256         priv->dma_buf_sz = bfsize;
1257
1258         /* RX INITIALIZATION */
1259         netif_dbg(priv, probe, priv->dev,
1260                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1261
1262         for (queue = 0; queue < rx_count; queue++) {
1263                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1264
1265                 netif_dbg(priv, probe, priv->dev,
1266                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1267                           (u32)rx_q->dma_rx_phy);
1268
1269                 for (i = 0; i < DMA_RX_SIZE; i++) {
1270                         struct dma_desc *p;
1271
1272                         if (priv->extend_desc)
1273                                 p = &((rx_q->dma_erx + i)->basic);
1274                         else
1275                                 p = rx_q->dma_rx + i;
1276
1277                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1278                                                      queue);
1279                         if (ret)
1280                                 goto err_init_rx_buffers;
1281
1282                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1283                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1284                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1285                 }
1286
1287                 rx_q->cur_rx = 0;
1288                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1289
1290                 stmmac_clear_rx_descriptors(priv, queue);
1291
1292                 /* Setup the chained descriptor addresses */
1293                 if (priv->mode == STMMAC_CHAIN_MODE) {
1294                         if (priv->extend_desc)
1295                                 stmmac_mode_init(priv, rx_q->dma_erx,
1296                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1297                         else
1298                                 stmmac_mode_init(priv, rx_q->dma_rx,
1299                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1300                 }
1301         }
1302
1303         buf_sz = bfsize;
1304
1305         return 0;
1306
1307 err_init_rx_buffers:
1308         while (queue >= 0) {
1309                 while (--i >= 0)
1310                         stmmac_free_rx_buffer(priv, queue, i);
1311
1312                 if (queue == 0)
1313                         break;
1314
1315                 i = DMA_RX_SIZE;
1316                 queue--;
1317         }
1318
1319         return ret;
1320 }
1321
1322 /**
1323  * init_dma_tx_desc_rings - init the TX descriptor rings
1324  * @dev: net device structure.
1325  * Description: this function initializes the DMA TX descriptors
1326  * and allocates the socket buffers. It supports the chained and ring
1327  * modes.
1328  */
1329 static int init_dma_tx_desc_rings(struct net_device *dev)
1330 {
1331         struct stmmac_priv *priv = netdev_priv(dev);
1332         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1333         u32 queue;
1334         int i;
1335
1336         for (queue = 0; queue < tx_queue_cnt; queue++) {
1337                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1338
1339                 netif_dbg(priv, probe, priv->dev,
1340                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1341                          (u32)tx_q->dma_tx_phy);
1342
1343                 /* Setup the chained descriptor addresses */
1344                 if (priv->mode == STMMAC_CHAIN_MODE) {
1345                         if (priv->extend_desc)
1346                                 stmmac_mode_init(priv, tx_q->dma_etx,
1347                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1348                         else
1349                                 stmmac_mode_init(priv, tx_q->dma_tx,
1350                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1351                 }
1352
1353                 for (i = 0; i < DMA_TX_SIZE; i++) {
1354                         struct dma_desc *p;
1355                         if (priv->extend_desc)
1356                                 p = &((tx_q->dma_etx + i)->basic);
1357                         else
1358                                 p = tx_q->dma_tx + i;
1359
1360                         stmmac_clear_desc(priv, p);
1361
1362                         tx_q->tx_skbuff_dma[i].buf = 0;
1363                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1364                         tx_q->tx_skbuff_dma[i].len = 0;
1365                         tx_q->tx_skbuff_dma[i].last_segment = false;
1366                         tx_q->tx_skbuff[i] = NULL;
1367                 }
1368
1369                 tx_q->dirty_tx = 0;
1370                 tx_q->cur_tx = 0;
1371                 tx_q->mss = 0;
1372
1373                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1374         }
1375
1376         return 0;
1377 }
1378
1379 /**
1380  * init_dma_desc_rings - init the RX/TX descriptor rings
1381  * @dev: net device structure
1382  * @flags: gfp flag.
1383  * Description: this function initializes the DMA RX/TX descriptors
1384  * and allocates the socket buffers. It supports the chained and ring
1385  * modes.
1386  */
1387 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1388 {
1389         struct stmmac_priv *priv = netdev_priv(dev);
1390         int ret;
1391
1392         ret = init_dma_rx_desc_rings(dev, flags);
1393         if (ret)
1394                 return ret;
1395
1396         ret = init_dma_tx_desc_rings(dev);
1397
1398         stmmac_clear_descriptors(priv);
1399
1400         if (netif_msg_hw(priv))
1401                 stmmac_display_rings(priv);
1402
1403         return ret;
1404 }
1405
1406 /**
1407  * dma_free_rx_skbufs - free RX dma buffers
1408  * @priv: private structure
1409  * @queue: RX queue index
1410  */
1411 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1412 {
1413         int i;
1414
1415         for (i = 0; i < DMA_RX_SIZE; i++)
1416                 stmmac_free_rx_buffer(priv, queue, i);
1417 }
1418
1419 /**
1420  * dma_free_tx_skbufs - free TX dma buffers
1421  * @priv: private structure
1422  * @queue: TX queue index
1423  */
1424 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1425 {
1426         int i;
1427
1428         for (i = 0; i < DMA_TX_SIZE; i++)
1429                 stmmac_free_tx_buffer(priv, queue, i);
1430 }
1431
1432 /**
1433  * stmmac_free_tx_skbufs - free TX skb buffers
1434  * @priv: private structure
1435  */
1436 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1437 {
1438         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1439         u32 queue;
1440
1441         for (queue = 0; queue < tx_queue_cnt; queue++)
1442                 dma_free_tx_skbufs(priv, queue);
1443 }
1444
1445 /**
1446  * free_dma_rx_desc_resources - free RX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1450 {
1451         u32 rx_count = priv->plat->rx_queues_to_use;
1452         u32 queue;
1453
1454         /* Free RX queue resources */
1455         for (queue = 0; queue < rx_count; queue++) {
1456                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1457
1458                 /* Release the DMA RX socket buffers */
1459                 dma_free_rx_skbufs(priv, queue);
1460
1461                 /* Free DMA regions of consistent memory previously allocated */
1462                 if (!priv->extend_desc)
1463                         dma_free_coherent(priv->device,
1464                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1465                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1466                 else
1467                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1468                                           sizeof(struct dma_extended_desc),
1469                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1470
1471                 kfree(rx_q->rx_skbuff_dma);
1472                 kfree(rx_q->rx_skbuff);
1473         }
1474 }
1475
1476 /**
1477  * free_dma_tx_desc_resources - free TX dma desc resources
1478  * @priv: private structure
1479  */
1480 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1481 {
1482         u32 tx_count = priv->plat->tx_queues_to_use;
1483         u32 queue;
1484
1485         /* Free TX queue resources */
1486         for (queue = 0; queue < tx_count; queue++) {
1487                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1488
1489                 /* Release the DMA TX socket buffers */
1490                 dma_free_tx_skbufs(priv, queue);
1491
1492                 /* Free DMA regions of consistent memory previously allocated */
1493                 if (!priv->extend_desc)
1494                         dma_free_coherent(priv->device,
1495                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1496                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1497                 else
1498                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1499                                           sizeof(struct dma_extended_desc),
1500                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1501
1502                 kfree(tx_q->tx_skbuff_dma);
1503                 kfree(tx_q->tx_skbuff);
1504         }
1505 }
1506
1507 /**
1508  * alloc_dma_rx_desc_resources - alloc RX resources.
1509  * @priv: private structure
1510  * Description: according to which descriptor can be used (extend or basic)
1511  * this function allocates the resources for TX and RX paths. In case of
1512  * reception, for example, it pre-allocated the RX socket buffer in order to
1513  * allow zero-copy mechanism.
1514  */
1515 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1516 {
1517         u32 rx_count = priv->plat->rx_queues_to_use;
1518         int ret = -ENOMEM;
1519         u32 queue;
1520
1521         /* RX queues buffers and DMA */
1522         for (queue = 0; queue < rx_count; queue++) {
1523                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1524
1525                 rx_q->queue_index = queue;
1526                 rx_q->priv_data = priv;
1527
1528                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1529                                                     sizeof(dma_addr_t),
1530                                                     GFP_KERNEL);
1531                 if (!rx_q->rx_skbuff_dma)
1532                         goto err_dma;
1533
1534                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1535                                                 sizeof(struct sk_buff *),
1536                                                 GFP_KERNEL);
1537                 if (!rx_q->rx_skbuff)
1538                         goto err_dma;
1539
1540                 if (priv->extend_desc) {
1541                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1542                                                             DMA_RX_SIZE *
1543                                                             sizeof(struct
1544                                                             dma_extended_desc),
1545                                                             &rx_q->dma_rx_phy,
1546                                                             GFP_KERNEL);
1547                         if (!rx_q->dma_erx)
1548                                 goto err_dma;
1549
1550                 } else {
1551                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1552                                                            DMA_RX_SIZE *
1553                                                            sizeof(struct
1554                                                            dma_desc),
1555                                                            &rx_q->dma_rx_phy,
1556                                                            GFP_KERNEL);
1557                         if (!rx_q->dma_rx)
1558                                 goto err_dma;
1559                 }
1560         }
1561
1562         return 0;
1563
1564 err_dma:
1565         free_dma_rx_desc_resources(priv);
1566
1567         return ret;
1568 }
1569
1570 /**
1571  * alloc_dma_tx_desc_resources - alloc TX resources.
1572  * @priv: private structure
1573  * Description: according to which descriptor can be used (extend or basic)
1574  * this function allocates the resources for TX and RX paths. In case of
1575  * reception, for example, it pre-allocated the RX socket buffer in order to
1576  * allow zero-copy mechanism.
1577  */
1578 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1579 {
1580         u32 tx_count = priv->plat->tx_queues_to_use;
1581         int ret = -ENOMEM;
1582         u32 queue;
1583
1584         /* TX queues buffers and DMA */
1585         for (queue = 0; queue < tx_count; queue++) {
1586                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1587
1588                 tx_q->queue_index = queue;
1589                 tx_q->priv_data = priv;
1590
1591                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1592                                                     sizeof(*tx_q->tx_skbuff_dma),
1593                                                     GFP_KERNEL);
1594                 if (!tx_q->tx_skbuff_dma)
1595                         goto err_dma;
1596
1597                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1598                                                 sizeof(struct sk_buff *),
1599                                                 GFP_KERNEL);
1600                 if (!tx_q->tx_skbuff)
1601                         goto err_dma;
1602
1603                 if (priv->extend_desc) {
1604                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1605                                                             DMA_TX_SIZE *
1606                                                             sizeof(struct
1607                                                             dma_extended_desc),
1608                                                             &tx_q->dma_tx_phy,
1609                                                             GFP_KERNEL);
1610                         if (!tx_q->dma_etx)
1611                                 goto err_dma;
1612                 } else {
1613                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1614                                                            DMA_TX_SIZE *
1615                                                            sizeof(struct
1616                                                                   dma_desc),
1617                                                            &tx_q->dma_tx_phy,
1618                                                            GFP_KERNEL);
1619                         if (!tx_q->dma_tx)
1620                                 goto err_dma;
1621                 }
1622         }
1623
1624         return 0;
1625
1626 err_dma:
1627         free_dma_tx_desc_resources(priv);
1628
1629         return ret;
1630 }
1631
1632 /**
1633  * alloc_dma_desc_resources - alloc TX/RX resources.
1634  * @priv: private structure
1635  * Description: according to which descriptor can be used (extend or basic)
1636  * this function allocates the resources for TX and RX paths. In case of
1637  * reception, for example, it pre-allocated the RX socket buffer in order to
1638  * allow zero-copy mechanism.
1639  */
1640 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1641 {
1642         /* RX Allocation */
1643         int ret = alloc_dma_rx_desc_resources(priv);
1644
1645         if (ret)
1646                 return ret;
1647
1648         ret = alloc_dma_tx_desc_resources(priv);
1649
1650         return ret;
1651 }
1652
1653 /**
1654  * free_dma_desc_resources - free dma desc resources
1655  * @priv: private structure
1656  */
1657 static void free_dma_desc_resources(struct stmmac_priv *priv)
1658 {
1659         /* Release the DMA RX socket buffers */
1660         free_dma_rx_desc_resources(priv);
1661
1662         /* Release the DMA TX socket buffers */
1663         free_dma_tx_desc_resources(priv);
1664 }
1665
1666 /**
1667  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1668  *  @priv: driver private structure
1669  *  Description: It is used for enabling the rx queues in the MAC
1670  */
1671 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1672 {
1673         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1674         int queue;
1675         u8 mode;
1676
1677         for (queue = 0; queue < rx_queues_count; queue++) {
1678                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1679                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1680         }
1681 }
1682
1683 /**
1684  * stmmac_start_rx_dma - start RX DMA channel
1685  * @priv: driver private structure
1686  * @chan: RX channel index
1687  * Description:
1688  * This starts a RX DMA channel
1689  */
1690 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1691 {
1692         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1693         stmmac_start_rx(priv, priv->ioaddr, chan);
1694 }
1695
1696 /**
1697  * stmmac_start_tx_dma - start TX DMA channel
1698  * @priv: driver private structure
1699  * @chan: TX channel index
1700  * Description:
1701  * This starts a TX DMA channel
1702  */
1703 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1704 {
1705         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1706         stmmac_start_tx(priv, priv->ioaddr, chan);
1707 }
1708
1709 /**
1710  * stmmac_stop_rx_dma - stop RX DMA channel
1711  * @priv: driver private structure
1712  * @chan: RX channel index
1713  * Description:
1714  * This stops a RX DMA channel
1715  */
1716 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1717 {
1718         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1719         stmmac_stop_rx(priv, priv->ioaddr, chan);
1720 }
1721
1722 /**
1723  * stmmac_stop_tx_dma - stop TX DMA channel
1724  * @priv: driver private structure
1725  * @chan: TX channel index
1726  * Description:
1727  * This stops a TX DMA channel
1728  */
1729 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1730 {
1731         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1732         stmmac_stop_tx(priv, priv->ioaddr, chan);
1733 }
1734
1735 /**
1736  * stmmac_start_all_dma - start all RX and TX DMA channels
1737  * @priv: driver private structure
1738  * Description:
1739  * This starts all the RX and TX DMA channels
1740  */
1741 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1742 {
1743         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1744         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1745         u32 chan = 0;
1746
1747         for (chan = 0; chan < rx_channels_count; chan++)
1748                 stmmac_start_rx_dma(priv, chan);
1749
1750         for (chan = 0; chan < tx_channels_count; chan++)
1751                 stmmac_start_tx_dma(priv, chan);
1752 }
1753
1754 /**
1755  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1756  * @priv: driver private structure
1757  * Description:
1758  * This stops the RX and TX DMA channels
1759  */
1760 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1761 {
1762         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1763         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1764         u32 chan = 0;
1765
1766         for (chan = 0; chan < rx_channels_count; chan++)
1767                 stmmac_stop_rx_dma(priv, chan);
1768
1769         for (chan = 0; chan < tx_channels_count; chan++)
1770                 stmmac_stop_tx_dma(priv, chan);
1771 }
1772
1773 /**
1774  *  stmmac_dma_operation_mode - HW DMA operation mode
1775  *  @priv: driver private structure
1776  *  Description: it is used for configuring the DMA operation mode register in
1777  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1778  */
1779 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1780 {
1781         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1782         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1783         int rxfifosz = priv->plat->rx_fifo_size;
1784         int txfifosz = priv->plat->tx_fifo_size;
1785         u32 txmode = 0;
1786         u32 rxmode = 0;
1787         u32 chan = 0;
1788         u8 qmode = 0;
1789
1790         if (rxfifosz == 0)
1791                 rxfifosz = priv->dma_cap.rx_fifo_size;
1792         if (txfifosz == 0)
1793                 txfifosz = priv->dma_cap.tx_fifo_size;
1794
1795         /* Adjust for real per queue fifo size */
1796         rxfifosz /= rx_channels_count;
1797         txfifosz /= tx_channels_count;
1798
1799         if (priv->plat->force_thresh_dma_mode) {
1800                 txmode = tc;
1801                 rxmode = tc;
1802         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1803                 /*
1804                  * In case of GMAC, SF mode can be enabled
1805                  * to perform the TX COE in HW. This depends on:
1806                  * 1) TX COE if actually supported
1807                  * 2) There is no bugged Jumbo frame support
1808                  *    that needs to not insert csum in the TDES.
1809                  */
1810                 txmode = SF_DMA_MODE;
1811                 rxmode = SF_DMA_MODE;
1812                 priv->xstats.threshold = SF_DMA_MODE;
1813         } else {
1814                 txmode = tc;
1815                 rxmode = SF_DMA_MODE;
1816         }
1817
1818         /* configure all channels */
1819         for (chan = 0; chan < rx_channels_count; chan++) {
1820                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1821
1822                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1823                                 rxfifosz, qmode);
1824                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1825                                 chan);
1826         }
1827
1828         for (chan = 0; chan < tx_channels_count; chan++) {
1829                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1830
1831                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1832                                 txfifosz, qmode);
1833         }
1834 }
1835
1836 /**
1837  * stmmac_tx_clean - to manage the transmission completion
1838  * @priv: driver private structure
1839  * @queue: TX queue index
1840  * Description: it reclaims the transmit resources after transmission completes.
1841  */
1842 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1843 {
1844         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1845         unsigned int bytes_compl = 0, pkts_compl = 0;
1846         unsigned int entry, count = 0;
1847
1848         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1849
1850         priv->xstats.tx_clean++;
1851
1852         entry = tx_q->dirty_tx;
1853         while ((entry != tx_q->cur_tx) && (count < budget)) {
1854                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1855                 struct dma_desc *p;
1856                 int status;
1857
1858                 if (priv->extend_desc)
1859                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1860                 else
1861                         p = tx_q->dma_tx + entry;
1862
1863                 status = stmmac_tx_status(priv, &priv->dev->stats,
1864                                 &priv->xstats, p, priv->ioaddr);
1865                 /* Check if the descriptor is owned by the DMA */
1866                 if (unlikely(status & tx_dma_own))
1867                         break;
1868
1869                 count++;
1870
1871                 /* Make sure descriptor fields are read after reading
1872                  * the own bit.
1873                  */
1874                 dma_rmb();
1875
1876                 /* Just consider the last segment and ...*/
1877                 if (likely(!(status & tx_not_ls))) {
1878                         /* ... verify the status error condition */
1879                         if (unlikely(status & tx_err)) {
1880                                 priv->dev->stats.tx_errors++;
1881                         } else {
1882                                 priv->dev->stats.tx_packets++;
1883                                 priv->xstats.tx_pkt_n++;
1884                         }
1885                         stmmac_get_tx_hwtstamp(priv, p, skb);
1886                 }
1887
1888                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1889                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1890                                 dma_unmap_page(priv->device,
1891                                                tx_q->tx_skbuff_dma[entry].buf,
1892                                                tx_q->tx_skbuff_dma[entry].len,
1893                                                DMA_TO_DEVICE);
1894                         else
1895                                 dma_unmap_single(priv->device,
1896                                                  tx_q->tx_skbuff_dma[entry].buf,
1897                                                  tx_q->tx_skbuff_dma[entry].len,
1898                                                  DMA_TO_DEVICE);
1899                         tx_q->tx_skbuff_dma[entry].buf = 0;
1900                         tx_q->tx_skbuff_dma[entry].len = 0;
1901                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1902                 }
1903
1904                 stmmac_clean_desc3(priv, tx_q, p);
1905
1906                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1907                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1908
1909                 if (likely(skb != NULL)) {
1910                         pkts_compl++;
1911                         bytes_compl += skb->len;
1912                         dev_consume_skb_any(skb);
1913                         tx_q->tx_skbuff[entry] = NULL;
1914                 }
1915
1916                 stmmac_release_tx_desc(priv, p, priv->mode);
1917
1918                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1919         }
1920         tx_q->dirty_tx = entry;
1921
1922         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1923                                   pkts_compl, bytes_compl);
1924
1925         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1926                                                                 queue))) &&
1927             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1928
1929                 netif_dbg(priv, tx_done, priv->dev,
1930                           "%s: restart transmit\n", __func__);
1931                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1932         }
1933
1934         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1935                 stmmac_enable_eee_mode(priv);
1936                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1937         }
1938
1939         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1940
1941         return count;
1942 }
1943
1944 /**
1945  * stmmac_tx_err - to manage the tx error
1946  * @priv: driver private structure
1947  * @chan: channel index
1948  * Description: it cleans the descriptors and restarts the transmission
1949  * in case of transmission errors.
1950  */
1951 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1952 {
1953         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1954         int i;
1955
1956         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1957
1958         stmmac_stop_tx_dma(priv, chan);
1959         dma_free_tx_skbufs(priv, chan);
1960         for (i = 0; i < DMA_TX_SIZE; i++)
1961                 if (priv->extend_desc)
1962                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1963                                         priv->mode, (i == DMA_TX_SIZE - 1));
1964                 else
1965                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1966                                         priv->mode, (i == DMA_TX_SIZE - 1));
1967         tx_q->dirty_tx = 0;
1968         tx_q->cur_tx = 0;
1969         tx_q->mss = 0;
1970         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1971         stmmac_start_tx_dma(priv, chan);
1972
1973         priv->dev->stats.tx_errors++;
1974         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1975 }
1976
1977 /**
1978  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1979  *  @priv: driver private structure
1980  *  @txmode: TX operating mode
1981  *  @rxmode: RX operating mode
1982  *  @chan: channel index
1983  *  Description: it is used for configuring of the DMA operation mode in
1984  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1985  *  mode.
1986  */
1987 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1988                                           u32 rxmode, u32 chan)
1989 {
1990         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1991         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1992         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1993         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1994         int rxfifosz = priv->plat->rx_fifo_size;
1995         int txfifosz = priv->plat->tx_fifo_size;
1996
1997         if (rxfifosz == 0)
1998                 rxfifosz = priv->dma_cap.rx_fifo_size;
1999         if (txfifosz == 0)
2000                 txfifosz = priv->dma_cap.tx_fifo_size;
2001
2002         /* Adjust for real per queue fifo size */
2003         rxfifosz /= rx_channels_count;
2004         txfifosz /= tx_channels_count;
2005
2006         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2007         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2008 }
2009
2010 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2011 {
2012         int ret;
2013
2014         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2015                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2016         if (ret && (ret != -EINVAL)) {
2017                 stmmac_global_err(priv);
2018                 return true;
2019         }
2020
2021         return false;
2022 }
2023
2024 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2025 {
2026         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2027                                                  &priv->xstats, chan);
2028         struct stmmac_channel *ch = &priv->channel[chan];
2029         bool needs_work = false;
2030
2031         if ((status & handle_rx) && ch->has_rx) {
2032                 needs_work = true;
2033         } else {
2034                 status &= ~handle_rx;
2035         }
2036
2037         if ((status & handle_tx) && ch->has_tx) {
2038                 needs_work = true;
2039         } else {
2040                 status &= ~handle_tx;
2041         }
2042
2043         if (needs_work && napi_schedule_prep(&ch->napi)) {
2044                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2045                 __napi_schedule(&ch->napi);
2046         }
2047
2048         return status;
2049 }
2050
2051 /**
2052  * stmmac_dma_interrupt - DMA ISR
2053  * @priv: driver private structure
2054  * Description: this is the DMA ISR. It is called by the main ISR.
2055  * It calls the dwmac dma routine and schedule poll method in case of some
2056  * work can be done.
2057  */
2058 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2059 {
2060         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2061         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2062         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2063                                 tx_channel_count : rx_channel_count;
2064         u32 chan;
2065         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2066
2067         /* Make sure we never check beyond our status buffer. */
2068         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2069                 channels_to_check = ARRAY_SIZE(status);
2070
2071         for (chan = 0; chan < channels_to_check; chan++)
2072                 status[chan] = stmmac_napi_check(priv, chan);
2073
2074         for (chan = 0; chan < tx_channel_count; chan++) {
2075                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2076                         /* Try to bump up the dma threshold on this failure */
2077                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2078                             (tc <= 256)) {
2079                                 tc += 64;
2080                                 if (priv->plat->force_thresh_dma_mode)
2081                                         stmmac_set_dma_operation_mode(priv,
2082                                                                       tc,
2083                                                                       tc,
2084                                                                       chan);
2085                                 else
2086                                         stmmac_set_dma_operation_mode(priv,
2087                                                                     tc,
2088                                                                     SF_DMA_MODE,
2089                                                                     chan);
2090                                 priv->xstats.threshold = tc;
2091                         }
2092                 } else if (unlikely(status[chan] == tx_hard_error)) {
2093                         stmmac_tx_err(priv, chan);
2094                 }
2095         }
2096 }
2097
2098 /**
2099  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2100  * @priv: driver private structure
2101  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2102  */
2103 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2104 {
2105         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2106                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2107
2108         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2109
2110         if (priv->dma_cap.rmon) {
2111                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2112                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2113         } else
2114                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2115 }
2116
2117 /**
2118  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2119  * @priv: driver private structure
2120  * Description:
2121  *  new GMAC chip generations have a new register to indicate the
2122  *  presence of the optional feature/functions.
2123  *  This can be also used to override the value passed through the
2124  *  platform and necessary for old MAC10/100 and GMAC chips.
2125  */
2126 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2127 {
2128         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2129 }
2130
2131 /**
2132  * stmmac_check_ether_addr - check if the MAC addr is valid
2133  * @priv: driver private structure
2134  * Description:
2135  * it is to verify if the MAC address is valid, in case of failures it
2136  * generates a random MAC address
2137  */
2138 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2139 {
2140         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2141                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2142                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2143                         eth_hw_addr_random(priv->dev);
2144                 netdev_info(priv->dev, "device MAC address %pM\n",
2145                             priv->dev->dev_addr);
2146         }
2147 }
2148
2149 /**
2150  * stmmac_init_dma_engine - DMA init.
2151  * @priv: driver private structure
2152  * Description:
2153  * It inits the DMA invoking the specific MAC/GMAC callback.
2154  * Some DMA parameters can be passed from the platform;
2155  * in case of these are not passed a default is kept for the MAC or GMAC.
2156  */
2157 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2158 {
2159         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2160         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2161         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2162         struct stmmac_rx_queue *rx_q;
2163         struct stmmac_tx_queue *tx_q;
2164         u32 chan = 0;
2165         int atds = 0;
2166         int ret = 0;
2167
2168         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2169                 dev_err(priv->device, "Invalid DMA configuration\n");
2170                 return -EINVAL;
2171         }
2172
2173         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2174                 atds = 1;
2175
2176         ret = stmmac_reset(priv, priv->ioaddr);
2177         if (ret) {
2178                 dev_err(priv->device, "Failed to reset the dma\n");
2179                 return ret;
2180         }
2181
2182         /* DMA Configuration */
2183         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2184
2185         if (priv->plat->axi)
2186                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2187
2188         /* DMA CSR Channel configuration */
2189         for (chan = 0; chan < dma_csr_ch; chan++)
2190                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2191
2192         /* DMA RX Channel Configuration */
2193         for (chan = 0; chan < rx_channels_count; chan++) {
2194                 rx_q = &priv->rx_queue[chan];
2195
2196                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2197                                     rx_q->dma_rx_phy, chan);
2198
2199                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2200                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2201                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2202                                        rx_q->rx_tail_addr, chan);
2203         }
2204
2205         /* DMA TX Channel Configuration */
2206         for (chan = 0; chan < tx_channels_count; chan++) {
2207                 tx_q = &priv->tx_queue[chan];
2208
2209                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2210                                     tx_q->dma_tx_phy, chan);
2211
2212                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2213                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2214                                        tx_q->tx_tail_addr, chan);
2215         }
2216
2217         return ret;
2218 }
2219
2220 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2221 {
2222         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2223
2224         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2225 }
2226
2227 /**
2228  * stmmac_tx_timer - mitigation sw timer for tx.
2229  * @data: data pointer
2230  * Description:
2231  * This is the timer handler to directly invoke the stmmac_tx_clean.
2232  */
2233 static void stmmac_tx_timer(struct timer_list *t)
2234 {
2235         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2236         struct stmmac_priv *priv = tx_q->priv_data;
2237         struct stmmac_channel *ch;
2238
2239         ch = &priv->channel[tx_q->queue_index];
2240
2241         if (likely(napi_schedule_prep(&ch->napi)))
2242                 __napi_schedule(&ch->napi);
2243 }
2244
2245 /**
2246  * stmmac_init_tx_coalesce - init tx mitigation options.
2247  * @priv: driver private structure
2248  * Description:
2249  * This inits the transmit coalesce parameters: i.e. timer rate,
2250  * timer handler and default threshold used for enabling the
2251  * interrupt on completion bit.
2252  */
2253 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2254 {
2255         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2256         u32 chan;
2257
2258         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2259         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2260
2261         for (chan = 0; chan < tx_channel_count; chan++) {
2262                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2263
2264                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2265         }
2266 }
2267
2268 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2269 {
2270         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2271         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2272         u32 chan;
2273
2274         /* set TX ring length */
2275         for (chan = 0; chan < tx_channels_count; chan++)
2276                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2277                                 (DMA_TX_SIZE - 1), chan);
2278
2279         /* set RX ring length */
2280         for (chan = 0; chan < rx_channels_count; chan++)
2281                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2282                                 (DMA_RX_SIZE - 1), chan);
2283 }
2284
2285 /**
2286  *  stmmac_set_tx_queue_weight - Set TX queue weight
2287  *  @priv: driver private structure
2288  *  Description: It is used for setting TX queues weight
2289  */
2290 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2291 {
2292         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2293         u32 weight;
2294         u32 queue;
2295
2296         for (queue = 0; queue < tx_queues_count; queue++) {
2297                 weight = priv->plat->tx_queues_cfg[queue].weight;
2298                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2299         }
2300 }
2301
2302 /**
2303  *  stmmac_configure_cbs - Configure CBS in TX queue
2304  *  @priv: driver private structure
2305  *  Description: It is used for configuring CBS in AVB TX queues
2306  */
2307 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2308 {
2309         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2310         u32 mode_to_use;
2311         u32 queue;
2312
2313         /* queue 0 is reserved for legacy traffic */
2314         for (queue = 1; queue < tx_queues_count; queue++) {
2315                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2316                 if (mode_to_use == MTL_QUEUE_DCB)
2317                         continue;
2318
2319                 stmmac_config_cbs(priv, priv->hw,
2320                                 priv->plat->tx_queues_cfg[queue].send_slope,
2321                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2322                                 priv->plat->tx_queues_cfg[queue].high_credit,
2323                                 priv->plat->tx_queues_cfg[queue].low_credit,
2324                                 queue);
2325         }
2326 }
2327
2328 /**
2329  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2330  *  @priv: driver private structure
2331  *  Description: It is used for mapping RX queues to RX dma channels
2332  */
2333 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2334 {
2335         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2336         u32 queue;
2337         u32 chan;
2338
2339         for (queue = 0; queue < rx_queues_count; queue++) {
2340                 chan = priv->plat->rx_queues_cfg[queue].chan;
2341                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2342         }
2343 }
2344
2345 /**
2346  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2347  *  @priv: driver private structure
2348  *  Description: It is used for configuring the RX Queue Priority
2349  */
2350 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2351 {
2352         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2353         u32 queue;
2354         u32 prio;
2355
2356         for (queue = 0; queue < rx_queues_count; queue++) {
2357                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2358                         continue;
2359
2360                 prio = priv->plat->rx_queues_cfg[queue].prio;
2361                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2362         }
2363 }
2364
2365 /**
2366  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2367  *  @priv: driver private structure
2368  *  Description: It is used for configuring the TX Queue Priority
2369  */
2370 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2371 {
2372         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2373         u32 queue;
2374         u32 prio;
2375
2376         for (queue = 0; queue < tx_queues_count; queue++) {
2377                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2378                         continue;
2379
2380                 prio = priv->plat->tx_queues_cfg[queue].prio;
2381                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2382         }
2383 }
2384
2385 /**
2386  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2387  *  @priv: driver private structure
2388  *  Description: It is used for configuring the RX queue routing
2389  */
2390 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2391 {
2392         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2393         u32 queue;
2394         u8 packet;
2395
2396         for (queue = 0; queue < rx_queues_count; queue++) {
2397                 /* no specific packet type routing specified for the queue */
2398                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2399                         continue;
2400
2401                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2402                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2403         }
2404 }
2405
2406 /**
2407  *  stmmac_mtl_configuration - Configure MTL
2408  *  @priv: driver private structure
2409  *  Description: It is used for configurring MTL
2410  */
2411 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2412 {
2413         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2414         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2415
2416         if (tx_queues_count > 1)
2417                 stmmac_set_tx_queue_weight(priv);
2418
2419         /* Configure MTL RX algorithms */
2420         if (rx_queues_count > 1)
2421                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2422                                 priv->plat->rx_sched_algorithm);
2423
2424         /* Configure MTL TX algorithms */
2425         if (tx_queues_count > 1)
2426                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2427                                 priv->plat->tx_sched_algorithm);
2428
2429         /* Configure CBS in AVB TX queues */
2430         if (tx_queues_count > 1)
2431                 stmmac_configure_cbs(priv);
2432
2433         /* Map RX MTL to DMA channels */
2434         stmmac_rx_queue_dma_chan_map(priv);
2435
2436         /* Enable MAC RX Queues */
2437         stmmac_mac_enable_rx_queues(priv);
2438
2439         /* Set RX priorities */
2440         if (rx_queues_count > 1)
2441                 stmmac_mac_config_rx_queues_prio(priv);
2442
2443         /* Set TX priorities */
2444         if (tx_queues_count > 1)
2445                 stmmac_mac_config_tx_queues_prio(priv);
2446
2447         /* Set RX routing */
2448         if (rx_queues_count > 1)
2449                 stmmac_mac_config_rx_queues_routing(priv);
2450 }
2451
2452 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2453 {
2454         if (priv->dma_cap.asp) {
2455                 netdev_info(priv->dev, "Enabling Safety Features\n");
2456                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2457         } else {
2458                 netdev_info(priv->dev, "No Safety Features support found\n");
2459         }
2460 }
2461
2462 /**
2463  * stmmac_hw_setup - setup mac in a usable state.
2464  *  @dev : pointer to the device structure.
2465  *  Description:
2466  *  this is the main function to setup the HW in a usable state because the
2467  *  dma engine is reset, the core registers are configured (e.g. AXI,
2468  *  Checksum features, timers). The DMA is ready to start receiving and
2469  *  transmitting.
2470  *  Return value:
2471  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2472  *  file on failure.
2473  */
2474 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2475 {
2476         struct stmmac_priv *priv = netdev_priv(dev);
2477         u32 rx_cnt = priv->plat->rx_queues_to_use;
2478         u32 tx_cnt = priv->plat->tx_queues_to_use;
2479         u32 chan;
2480         int ret;
2481
2482         /* DMA initialization and SW reset */
2483         ret = stmmac_init_dma_engine(priv);
2484         if (ret < 0) {
2485                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2486                            __func__);
2487                 return ret;
2488         }
2489
2490         /* Copy the MAC addr into the HW  */
2491         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2492
2493         /* PS and related bits will be programmed according to the speed */
2494         if (priv->hw->pcs) {
2495                 int speed = priv->plat->mac_port_sel_speed;
2496
2497                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2498                     (speed == SPEED_1000)) {
2499                         priv->hw->ps = speed;
2500                 } else {
2501                         dev_warn(priv->device, "invalid port speed\n");
2502                         priv->hw->ps = 0;
2503                 }
2504         }
2505
2506         /* Initialize the MAC Core */
2507         stmmac_core_init(priv, priv->hw, dev);
2508
2509         /* Initialize MTL*/
2510         stmmac_mtl_configuration(priv);
2511
2512         /* Initialize Safety Features */
2513         stmmac_safety_feat_configuration(priv);
2514
2515         ret = stmmac_rx_ipc(priv, priv->hw);
2516         if (!ret) {
2517                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2518                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2519                 priv->hw->rx_csum = 0;
2520         }
2521
2522         /* Enable the MAC Rx/Tx */
2523         stmmac_mac_set(priv, priv->ioaddr, true);
2524
2525         /* Set the HW DMA mode and the COE */
2526         stmmac_dma_operation_mode(priv);
2527
2528         stmmac_mmc_setup(priv);
2529
2530         if (init_ptp) {
2531                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2532                 if (ret < 0)
2533                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2534
2535                 ret = stmmac_init_ptp(priv);
2536                 if (ret == -EOPNOTSUPP)
2537                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2538                 else if (ret)
2539                         netdev_warn(priv->dev, "PTP init failed\n");
2540         }
2541
2542         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2543
2544         if (priv->use_riwt) {
2545                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2546                 if (!ret)
2547                         priv->rx_riwt = MAX_DMA_RIWT;
2548         }
2549
2550         if (priv->hw->pcs)
2551                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2552
2553         /* set TX and RX rings length */
2554         stmmac_set_rings_length(priv);
2555
2556         /* Enable TSO */
2557         if (priv->tso) {
2558                 for (chan = 0; chan < tx_cnt; chan++)
2559                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2560         }
2561
2562         /* Start the ball rolling... */
2563         stmmac_start_all_dma(priv);
2564
2565         return 0;
2566 }
2567
2568 static void stmmac_hw_teardown(struct net_device *dev)
2569 {
2570         struct stmmac_priv *priv = netdev_priv(dev);
2571
2572         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2573 }
2574
2575 /**
2576  *  stmmac_open - open entry point of the driver
2577  *  @dev : pointer to the device structure.
2578  *  Description:
2579  *  This function is the open entry point of the driver.
2580  *  Return value:
2581  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2582  *  file on failure.
2583  */
2584 static int stmmac_open(struct net_device *dev)
2585 {
2586         struct stmmac_priv *priv = netdev_priv(dev);
2587         u32 chan;
2588         int ret;
2589
2590         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2591             priv->hw->pcs != STMMAC_PCS_TBI &&
2592             priv->hw->pcs != STMMAC_PCS_RTBI) {
2593                 ret = stmmac_init_phy(dev);
2594                 if (ret) {
2595                         netdev_err(priv->dev,
2596                                    "%s: Cannot attach to PHY (error: %d)\n",
2597                                    __func__, ret);
2598                         return ret;
2599                 }
2600         }
2601
2602         /* Extra statistics */
2603         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2604         priv->xstats.threshold = tc;
2605
2606         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2607         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2608
2609         ret = alloc_dma_desc_resources(priv);
2610         if (ret < 0) {
2611                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2612                            __func__);
2613                 goto dma_desc_error;
2614         }
2615
2616         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2617         if (ret < 0) {
2618                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2619                            __func__);
2620                 goto init_error;
2621         }
2622
2623         ret = stmmac_hw_setup(dev, true);
2624         if (ret < 0) {
2625                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2626                 goto init_error;
2627         }
2628
2629         stmmac_init_tx_coalesce(priv);
2630
2631         if (dev->phydev)
2632                 phy_start(dev->phydev);
2633
2634         /* Request the IRQ lines */
2635         ret = request_irq(dev->irq, stmmac_interrupt,
2636                           IRQF_SHARED, dev->name, dev);
2637         if (unlikely(ret < 0)) {
2638                 netdev_err(priv->dev,
2639                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2640                            __func__, dev->irq, ret);
2641                 goto irq_error;
2642         }
2643
2644         /* Request the Wake IRQ in case of another line is used for WoL */
2645         if (priv->wol_irq != dev->irq) {
2646                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2647                                   IRQF_SHARED, dev->name, dev);
2648                 if (unlikely(ret < 0)) {
2649                         netdev_err(priv->dev,
2650                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2651                                    __func__, priv->wol_irq, ret);
2652                         goto wolirq_error;
2653                 }
2654         }
2655
2656         /* Request the IRQ lines */
2657         if (priv->lpi_irq > 0) {
2658                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2659                                   dev->name, dev);
2660                 if (unlikely(ret < 0)) {
2661                         netdev_err(priv->dev,
2662                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2663                                    __func__, priv->lpi_irq, ret);
2664                         goto lpiirq_error;
2665                 }
2666         }
2667
2668         stmmac_enable_all_queues(priv);
2669         netif_tx_start_all_queues(priv->dev);
2670
2671         return 0;
2672
2673 lpiirq_error:
2674         if (priv->wol_irq != dev->irq)
2675                 free_irq(priv->wol_irq, dev);
2676 wolirq_error:
2677         free_irq(dev->irq, dev);
2678 irq_error:
2679         if (dev->phydev)
2680                 phy_stop(dev->phydev);
2681
2682         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2683                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2684
2685         stmmac_hw_teardown(dev);
2686 init_error:
2687         free_dma_desc_resources(priv);
2688 dma_desc_error:
2689         if (dev->phydev)
2690                 phy_disconnect(dev->phydev);
2691
2692         return ret;
2693 }
2694
2695 /**
2696  *  stmmac_release - close entry point of the driver
2697  *  @dev : device pointer.
2698  *  Description:
2699  *  This is the stop entry point of the driver.
2700  */
2701 static int stmmac_release(struct net_device *dev)
2702 {
2703         struct stmmac_priv *priv = netdev_priv(dev);
2704         u32 chan;
2705
2706         /* Stop and disconnect the PHY */
2707         if (dev->phydev) {
2708                 phy_stop(dev->phydev);
2709                 phy_disconnect(dev->phydev);
2710         }
2711
2712         stmmac_disable_all_queues(priv);
2713
2714         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2715                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2716
2717         /* Free the IRQ lines */
2718         free_irq(dev->irq, dev);
2719         if (priv->wol_irq != dev->irq)
2720                 free_irq(priv->wol_irq, dev);
2721         if (priv->lpi_irq > 0)
2722                 free_irq(priv->lpi_irq, dev);
2723
2724         if (priv->eee_enabled) {
2725                 priv->tx_path_in_lpi_mode = false;
2726                 del_timer_sync(&priv->eee_ctrl_timer);
2727         }
2728
2729         /* Stop TX/RX DMA and clear the descriptors */
2730         stmmac_stop_all_dma(priv);
2731
2732         /* Release and free the Rx/Tx resources */
2733         free_dma_desc_resources(priv);
2734
2735         /* Disable the MAC Rx/Tx */
2736         stmmac_mac_set(priv, priv->ioaddr, false);
2737
2738         netif_carrier_off(dev);
2739
2740         stmmac_release_ptp(priv);
2741
2742         return 0;
2743 }
2744
2745 /**
2746  *  stmmac_tso_allocator - close entry point of the driver
2747  *  @priv: driver private structure
2748  *  @des: buffer start address
2749  *  @total_len: total length to fill in descriptors
2750  *  @last_segmant: condition for the last descriptor
2751  *  @queue: TX queue index
2752  *  Description:
2753  *  This function fills descriptor and request new descriptors according to
2754  *  buffer length to fill
2755  */
2756 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2757                                  int total_len, bool last_segment, u32 queue)
2758 {
2759         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2760         struct dma_desc *desc;
2761         u32 buff_size;
2762         int tmp_len;
2763
2764         tmp_len = total_len;
2765
2766         while (tmp_len > 0) {
2767                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2768                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2769                 desc = tx_q->dma_tx + tx_q->cur_tx;
2770
2771                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2772                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2773                             TSO_MAX_BUFF_SIZE : tmp_len;
2774
2775                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2776                                 0, 1,
2777                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2778                                 0, 0);
2779
2780                 tmp_len -= TSO_MAX_BUFF_SIZE;
2781         }
2782 }
2783
2784 /**
2785  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2786  *  @skb : the socket buffer
2787  *  @dev : device pointer
2788  *  Description: this is the transmit function that is called on TSO frames
2789  *  (support available on GMAC4 and newer chips).
2790  *  Diagram below show the ring programming in case of TSO frames:
2791  *
2792  *  First Descriptor
2793  *   --------
2794  *   | DES0 |---> buffer1 = L2/L3/L4 header
2795  *   | DES1 |---> TCP Payload (can continue on next descr...)
2796  *   | DES2 |---> buffer 1 and 2 len
2797  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2798  *   --------
2799  *      |
2800  *     ...
2801  *      |
2802  *   --------
2803  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2804  *   | DES1 | --|
2805  *   | DES2 | --> buffer 1 and 2 len
2806  *   | DES3 |
2807  *   --------
2808  *
2809  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2810  */
2811 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2812 {
2813         struct dma_desc *desc, *first, *mss_desc = NULL;
2814         struct stmmac_priv *priv = netdev_priv(dev);
2815         int nfrags = skb_shinfo(skb)->nr_frags;
2816         u32 queue = skb_get_queue_mapping(skb);
2817         unsigned int first_entry, des;
2818         struct stmmac_tx_queue *tx_q;
2819         int tmp_pay_len = 0;
2820         u32 pay_len, mss;
2821         u8 proto_hdr_len;
2822         int i;
2823
2824         tx_q = &priv->tx_queue[queue];
2825
2826         /* Compute header lengths */
2827         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2828
2829         /* Desc availability based on threshold should be enough safe */
2830         if (unlikely(stmmac_tx_avail(priv, queue) <
2831                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2832                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2833                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2834                                                                 queue));
2835                         /* This is a hard error, log it. */
2836                         netdev_err(priv->dev,
2837                                    "%s: Tx Ring full when queue awake\n",
2838                                    __func__);
2839                 }
2840                 return NETDEV_TX_BUSY;
2841         }
2842
2843         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2844
2845         mss = skb_shinfo(skb)->gso_size;
2846
2847         /* set new MSS value if needed */
2848         if (mss != tx_q->mss) {
2849                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2850                 stmmac_set_mss(priv, mss_desc, mss);
2851                 tx_q->mss = mss;
2852                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2853                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2854         }
2855
2856         if (netif_msg_tx_queued(priv)) {
2857                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2858                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2859                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2860                         skb->data_len);
2861         }
2862
2863         first_entry = tx_q->cur_tx;
2864         WARN_ON(tx_q->tx_skbuff[first_entry]);
2865
2866         desc = tx_q->dma_tx + first_entry;
2867         first = desc;
2868
2869         /* first descriptor: fill Headers on Buf1 */
2870         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2871                              DMA_TO_DEVICE);
2872         if (dma_mapping_error(priv->device, des))
2873                 goto dma_map_err;
2874
2875         tx_q->tx_skbuff_dma[first_entry].buf = des;
2876         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2877
2878         first->des0 = cpu_to_le32(des);
2879
2880         /* Fill start of payload in buff2 of first descriptor */
2881         if (pay_len)
2882                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2883
2884         /* If needed take extra descriptors to fill the remaining payload */
2885         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2886
2887         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2888
2889         /* Prepare fragments */
2890         for (i = 0; i < nfrags; i++) {
2891                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2892
2893                 des = skb_frag_dma_map(priv->device, frag, 0,
2894                                        skb_frag_size(frag),
2895                                        DMA_TO_DEVICE);
2896                 if (dma_mapping_error(priv->device, des))
2897                         goto dma_map_err;
2898
2899                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2900                                      (i == nfrags - 1), queue);
2901
2902                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2903                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2904                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2905         }
2906
2907         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2908
2909         /* Only the last descriptor gets to point to the skb. */
2910         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2911
2912         /* We've used all descriptors we need for this skb, however,
2913          * advance cur_tx so that it references a fresh descriptor.
2914          * ndo_start_xmit will fill this descriptor the next time it's
2915          * called and stmmac_tx_clean may clean up to this descriptor.
2916          */
2917         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2918
2919         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2920                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2921                           __func__);
2922                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2923         }
2924
2925         dev->stats.tx_bytes += skb->len;
2926         priv->xstats.tx_tso_frames++;
2927         priv->xstats.tx_tso_nfrags += nfrags;
2928
2929         /* Manage tx mitigation */
2930         tx_q->tx_count_frames += nfrags + 1;
2931         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2932             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2933             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2934             priv->hwts_tx_en)) {
2935                 stmmac_tx_timer_arm(priv, queue);
2936         } else {
2937                 tx_q->tx_count_frames = 0;
2938                 stmmac_set_tx_ic(priv, desc);
2939                 priv->xstats.tx_set_ic_bit++;
2940         }
2941
2942         skb_tx_timestamp(skb);
2943
2944         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2945                      priv->hwts_tx_en)) {
2946                 /* declare that device is doing timestamping */
2947                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2948                 stmmac_enable_tx_timestamp(priv, first);
2949         }
2950
2951         /* Complete the first descriptor before granting the DMA */
2952         stmmac_prepare_tso_tx_desc(priv, first, 1,
2953                         proto_hdr_len,
2954                         pay_len,
2955                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2956                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2957
2958         /* If context desc is used to change MSS */
2959         if (mss_desc) {
2960                 /* Make sure that first descriptor has been completely
2961                  * written, including its own bit. This is because MSS is
2962                  * actually before first descriptor, so we need to make
2963                  * sure that MSS's own bit is the last thing written.
2964                  */
2965                 dma_wmb();
2966                 stmmac_set_tx_owner(priv, mss_desc);
2967         }
2968
2969         /* The own bit must be the latest setting done when prepare the
2970          * descriptor and then barrier is needed to make sure that
2971          * all is coherent before granting the DMA engine.
2972          */
2973         wmb();
2974
2975         if (netif_msg_pktdata(priv)) {
2976                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2977                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2978                         tx_q->cur_tx, first, nfrags);
2979
2980                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2981
2982                 pr_info(">>> frame to be transmitted: ");
2983                 print_pkt(skb->data, skb_headlen(skb));
2984         }
2985
2986         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2987
2988         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2989         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2990         stmmac_tx_timer_arm(priv, queue);
2991
2992         return NETDEV_TX_OK;
2993
2994 dma_map_err:
2995         dev_err(priv->device, "Tx dma map failed\n");
2996         dev_kfree_skb(skb);
2997         priv->dev->stats.tx_dropped++;
2998         return NETDEV_TX_OK;
2999 }
3000
3001 /**
3002  *  stmmac_xmit - Tx entry point of the driver
3003  *  @skb : the socket buffer
3004  *  @dev : device pointer
3005  *  Description : this is the tx entry point of the driver.
3006  *  It programs the chain or the ring and supports oversized frames
3007  *  and SG feature.
3008  */
3009 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3010 {
3011         struct stmmac_priv *priv = netdev_priv(dev);
3012         unsigned int nopaged_len = skb_headlen(skb);
3013         int i, csum_insertion = 0, is_jumbo = 0;
3014         u32 queue = skb_get_queue_mapping(skb);
3015         int nfrags = skb_shinfo(skb)->nr_frags;
3016         int entry;
3017         unsigned int first_entry;
3018         struct dma_desc *desc, *first;
3019         struct stmmac_tx_queue *tx_q;
3020         unsigned int enh_desc;
3021         unsigned int des;
3022
3023         tx_q = &priv->tx_queue[queue];
3024
3025         if (priv->tx_path_in_lpi_mode)
3026                 stmmac_disable_eee_mode(priv);
3027
3028         /* Manage oversized TCP frames for GMAC4 device */
3029         if (skb_is_gso(skb) && priv->tso) {
3030                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3031                         return stmmac_tso_xmit(skb, dev);
3032         }
3033
3034         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3035                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3036                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3037                                                                 queue));
3038                         /* This is a hard error, log it. */
3039                         netdev_err(priv->dev,
3040                                    "%s: Tx Ring full when queue awake\n",
3041                                    __func__);
3042                 }
3043                 return NETDEV_TX_BUSY;
3044         }
3045
3046         entry = tx_q->cur_tx;
3047         first_entry = entry;
3048         WARN_ON(tx_q->tx_skbuff[first_entry]);
3049
3050         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3051
3052         if (likely(priv->extend_desc))
3053                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3054         else
3055                 desc = tx_q->dma_tx + entry;
3056
3057         first = desc;
3058
3059         enh_desc = priv->plat->enh_desc;
3060         /* To program the descriptors according to the size of the frame */
3061         if (enh_desc)
3062                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3063
3064         if (unlikely(is_jumbo)) {
3065                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3066                 if (unlikely(entry < 0) && (entry != -EINVAL))
3067                         goto dma_map_err;
3068         }
3069
3070         for (i = 0; i < nfrags; i++) {
3071                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3072                 int len = skb_frag_size(frag);
3073                 bool last_segment = (i == (nfrags - 1));
3074
3075                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3076                 WARN_ON(tx_q->tx_skbuff[entry]);
3077
3078                 if (likely(priv->extend_desc))
3079                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3080                 else
3081                         desc = tx_q->dma_tx + entry;
3082
3083                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3084                                        DMA_TO_DEVICE);
3085                 if (dma_mapping_error(priv->device, des))
3086                         goto dma_map_err; /* should reuse desc w/o issues */
3087
3088                 tx_q->tx_skbuff_dma[entry].buf = des;
3089
3090                 stmmac_set_desc_addr(priv, desc, des);
3091
3092                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3093                 tx_q->tx_skbuff_dma[entry].len = len;
3094                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3095
3096                 /* Prepare the descriptor and set the own bit too */
3097                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3098                                 priv->mode, 1, last_segment, skb->len);
3099         }
3100
3101         /* Only the last descriptor gets to point to the skb. */
3102         tx_q->tx_skbuff[entry] = skb;
3103
3104         /* We've used all descriptors we need for this skb, however,
3105          * advance cur_tx so that it references a fresh descriptor.
3106          * ndo_start_xmit will fill this descriptor the next time it's
3107          * called and stmmac_tx_clean may clean up to this descriptor.
3108          */
3109         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3110         tx_q->cur_tx = entry;
3111
3112         if (netif_msg_pktdata(priv)) {
3113                 void *tx_head;
3114
3115                 netdev_dbg(priv->dev,
3116                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3117                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3118                            entry, first, nfrags);
3119
3120                 if (priv->extend_desc)
3121                         tx_head = (void *)tx_q->dma_etx;
3122                 else
3123                         tx_head = (void *)tx_q->dma_tx;
3124
3125                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3126
3127                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3128                 print_pkt(skb->data, skb->len);
3129         }
3130
3131         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3132                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3133                           __func__);
3134                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3135         }
3136
3137         dev->stats.tx_bytes += skb->len;
3138
3139         /* According to the coalesce parameter the IC bit for the latest
3140          * segment is reset and the timer re-started to clean the tx status.
3141          * This approach takes care about the fragments: desc is the first
3142          * element in case of no SG.
3143          */
3144         tx_q->tx_count_frames += nfrags + 1;
3145         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3146             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3147             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3148             priv->hwts_tx_en)) {
3149                 stmmac_tx_timer_arm(priv, queue);
3150         } else {
3151                 tx_q->tx_count_frames = 0;
3152                 stmmac_set_tx_ic(priv, desc);
3153                 priv->xstats.tx_set_ic_bit++;
3154         }
3155
3156         skb_tx_timestamp(skb);
3157
3158         /* Ready to fill the first descriptor and set the OWN bit w/o any
3159          * problems because all the descriptors are actually ready to be
3160          * passed to the DMA engine.
3161          */
3162         if (likely(!is_jumbo)) {
3163                 bool last_segment = (nfrags == 0);
3164
3165                 des = dma_map_single(priv->device, skb->data,
3166                                      nopaged_len, DMA_TO_DEVICE);
3167                 if (dma_mapping_error(priv->device, des))
3168                         goto dma_map_err;
3169
3170                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3171
3172                 stmmac_set_desc_addr(priv, first, des);
3173
3174                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3175                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3176
3177                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3178                              priv->hwts_tx_en)) {
3179                         /* declare that device is doing timestamping */
3180                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3181                         stmmac_enable_tx_timestamp(priv, first);
3182                 }
3183
3184                 /* Prepare the first descriptor setting the OWN bit too */
3185                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3186                                 csum_insertion, priv->mode, 1, last_segment,
3187                                 skb->len);
3188         } else {
3189                 stmmac_set_tx_owner(priv, first);
3190         }
3191
3192         /* The own bit must be the latest setting done when prepare the
3193          * descriptor and then barrier is needed to make sure that
3194          * all is coherent before granting the DMA engine.
3195          */
3196         wmb();
3197
3198         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3199
3200         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3201
3202         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3203         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3204         stmmac_tx_timer_arm(priv, queue);
3205
3206         return NETDEV_TX_OK;
3207
3208 dma_map_err:
3209         netdev_err(priv->dev, "Tx DMA map failed\n");
3210         dev_kfree_skb(skb);
3211         priv->dev->stats.tx_dropped++;
3212         return NETDEV_TX_OK;
3213 }
3214
3215 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3216 {
3217         struct vlan_ethhdr *veth;
3218         __be16 vlan_proto;
3219         u16 vlanid;
3220
3221         veth = (struct vlan_ethhdr *)skb->data;
3222         vlan_proto = veth->h_vlan_proto;
3223
3224         if ((vlan_proto == htons(ETH_P_8021Q) &&
3225              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3226             (vlan_proto == htons(ETH_P_8021AD) &&
3227              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3228                 /* pop the vlan tag */
3229                 vlanid = ntohs(veth->h_vlan_TCI);
3230                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3231                 skb_pull(skb, VLAN_HLEN);
3232                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3233         }
3234 }
3235
3236
3237 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3238 {
3239         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3240                 return 0;
3241
3242         return 1;
3243 }
3244
3245 /**
3246  * stmmac_rx_refill - refill used skb preallocated buffers
3247  * @priv: driver private structure
3248  * @queue: RX queue index
3249  * Description : this is to reallocate the skb for the reception process
3250  * that is based on zero-copy.
3251  */
3252 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3253 {
3254         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3255         int dirty = stmmac_rx_dirty(priv, queue);
3256         unsigned int entry = rx_q->dirty_rx;
3257
3258         int bfsize = priv->dma_buf_sz;
3259
3260         while (dirty-- > 0) {
3261                 struct dma_desc *p;
3262
3263                 if (priv->extend_desc)
3264                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3265                 else
3266                         p = rx_q->dma_rx + entry;
3267
3268                 if (likely(!rx_q->rx_skbuff[entry])) {
3269                         struct sk_buff *skb;
3270
3271                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3272                         if (unlikely(!skb)) {
3273                                 /* so for a while no zero-copy! */
3274                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3275                                 if (unlikely(net_ratelimit()))
3276                                         dev_err(priv->device,
3277                                                 "fail to alloc skb entry %d\n",
3278                                                 entry);
3279                                 break;
3280                         }
3281
3282                         rx_q->rx_skbuff[entry] = skb;
3283                         rx_q->rx_skbuff_dma[entry] =
3284                             dma_map_single(priv->device, skb->data, bfsize,
3285                                            DMA_FROM_DEVICE);
3286                         if (dma_mapping_error(priv->device,
3287                                               rx_q->rx_skbuff_dma[entry])) {
3288                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3289                                 dev_kfree_skb(skb);
3290                                 break;
3291                         }
3292
3293                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3294                         stmmac_refill_desc3(priv, rx_q, p);
3295
3296                         if (rx_q->rx_zeroc_thresh > 0)
3297                                 rx_q->rx_zeroc_thresh--;
3298
3299                         netif_dbg(priv, rx_status, priv->dev,
3300                                   "refill entry #%d\n", entry);
3301                 }
3302                 dma_wmb();
3303
3304                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3305
3306                 dma_wmb();
3307
3308                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3309         }
3310         rx_q->dirty_rx = entry;
3311         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3312 }
3313
3314 /**
3315  * stmmac_rx - manage the receive process
3316  * @priv: driver private structure
3317  * @limit: napi bugget
3318  * @queue: RX queue index.
3319  * Description :  this the function called by the napi poll method.
3320  * It gets all the frames inside the ring.
3321  */
3322 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3323 {
3324         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3325         struct stmmac_channel *ch = &priv->channel[queue];
3326         unsigned int next_entry = rx_q->cur_rx;
3327         int coe = priv->hw->rx_csum;
3328         unsigned int count = 0;
3329         bool xmac;
3330
3331         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3332
3333         if (netif_msg_rx_status(priv)) {
3334                 void *rx_head;
3335
3336                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3337                 if (priv->extend_desc)
3338                         rx_head = (void *)rx_q->dma_erx;
3339                 else
3340                         rx_head = (void *)rx_q->dma_rx;
3341
3342                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3343         }
3344         while (count < limit) {
3345                 int entry, status;
3346                 struct dma_desc *p;
3347                 struct dma_desc *np;
3348
3349                 entry = next_entry;
3350
3351                 if (priv->extend_desc)
3352                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3353                 else
3354                         p = rx_q->dma_rx + entry;
3355
3356                 /* read the status of the incoming frame */
3357                 status = stmmac_rx_status(priv, &priv->dev->stats,
3358                                 &priv->xstats, p);
3359                 /* check if managed by the DMA otherwise go ahead */
3360                 if (unlikely(status & dma_own))
3361                         break;
3362
3363                 count++;
3364
3365                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3366                 next_entry = rx_q->cur_rx;
3367
3368                 if (priv->extend_desc)
3369                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3370                 else
3371                         np = rx_q->dma_rx + next_entry;
3372
3373                 prefetch(np);
3374
3375                 if (priv->extend_desc)
3376                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3377                                         &priv->xstats, rx_q->dma_erx + entry);
3378                 if (unlikely(status == discard_frame)) {
3379                         priv->dev->stats.rx_errors++;
3380                         if (priv->hwts_rx_en && !priv->extend_desc) {
3381                                 /* DESC2 & DESC3 will be overwritten by device
3382                                  * with timestamp value, hence reinitialize
3383                                  * them in stmmac_rx_refill() function so that
3384                                  * device can reuse it.
3385                                  */
3386                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3387                                 rx_q->rx_skbuff[entry] = NULL;
3388                                 dma_unmap_single(priv->device,
3389                                                  rx_q->rx_skbuff_dma[entry],
3390                                                  priv->dma_buf_sz,
3391                                                  DMA_FROM_DEVICE);
3392                         }
3393                 } else {
3394                         struct sk_buff *skb;
3395                         int frame_len;
3396                         unsigned int des;
3397
3398                         stmmac_get_desc_addr(priv, p, &des);
3399                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3400
3401                         /*  If frame length is greater than skb buffer size
3402                          *  (preallocated during init) then the packet is
3403                          *  ignored
3404                          */
3405                         if (frame_len > priv->dma_buf_sz) {
3406                                 if (net_ratelimit())
3407                                         netdev_err(priv->dev,
3408                                                    "len %d larger than size (%d)\n",
3409                                                    frame_len, priv->dma_buf_sz);
3410                                 priv->dev->stats.rx_length_errors++;
3411                                 continue;
3412                         }
3413
3414                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3415                          * Type frames (LLC/LLC-SNAP)
3416                          *
3417                          * llc_snap is never checked in GMAC >= 4, so this ACS
3418                          * feature is always disabled and packets need to be
3419                          * stripped manually.
3420                          */
3421                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3422                             unlikely(status != llc_snap))
3423                                 frame_len -= ETH_FCS_LEN;
3424
3425                         if (netif_msg_rx_status(priv)) {
3426                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3427                                            p, entry, des);
3428                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3429                                            frame_len, status);
3430                         }
3431
3432                         /* The zero-copy is always used for all the sizes
3433                          * in case of GMAC4 because it needs
3434                          * to refill the used descriptors, always.
3435                          */
3436                         if (unlikely(!xmac &&
3437                                      ((frame_len < priv->rx_copybreak) ||
3438                                      stmmac_rx_threshold_count(rx_q)))) {
3439                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3440                                                                 frame_len);
3441                                 if (unlikely(!skb)) {
3442                                         if (net_ratelimit())
3443                                                 dev_warn(priv->device,
3444                                                          "packet dropped\n");
3445                                         priv->dev->stats.rx_dropped++;
3446                                         continue;
3447                                 }
3448
3449                                 dma_sync_single_for_cpu(priv->device,
3450                                                         rx_q->rx_skbuff_dma
3451                                                         [entry], frame_len,
3452                                                         DMA_FROM_DEVICE);
3453                                 skb_copy_to_linear_data(skb,
3454                                                         rx_q->
3455                                                         rx_skbuff[entry]->data,
3456                                                         frame_len);
3457
3458                                 skb_put(skb, frame_len);
3459                                 dma_sync_single_for_device(priv->device,
3460                                                            rx_q->rx_skbuff_dma
3461                                                            [entry], frame_len,
3462                                                            DMA_FROM_DEVICE);
3463                         } else {
3464                                 skb = rx_q->rx_skbuff[entry];
3465                                 if (unlikely(!skb)) {
3466                                         if (net_ratelimit())
3467                                                 netdev_err(priv->dev,
3468                                                            "%s: Inconsistent Rx chain\n",
3469                                                            priv->dev->name);
3470                                         priv->dev->stats.rx_dropped++;
3471                                         continue;
3472                                 }
3473                                 prefetch(skb->data - NET_IP_ALIGN);
3474                                 rx_q->rx_skbuff[entry] = NULL;
3475                                 rx_q->rx_zeroc_thresh++;
3476
3477                                 skb_put(skb, frame_len);
3478                                 dma_unmap_single(priv->device,
3479                                                  rx_q->rx_skbuff_dma[entry],
3480                                                  priv->dma_buf_sz,
3481                                                  DMA_FROM_DEVICE);
3482                         }
3483
3484                         if (netif_msg_pktdata(priv)) {
3485                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3486                                            frame_len);
3487                                 print_pkt(skb->data, frame_len);
3488                         }
3489
3490                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3491
3492                         stmmac_rx_vlan(priv->dev, skb);
3493
3494                         skb->protocol = eth_type_trans(skb, priv->dev);
3495
3496                         if (unlikely(!coe))
3497                                 skb_checksum_none_assert(skb);
3498                         else
3499                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3500
3501                         napi_gro_receive(&ch->napi, skb);
3502
3503                         priv->dev->stats.rx_packets++;
3504                         priv->dev->stats.rx_bytes += frame_len;
3505                 }
3506         }
3507
3508         stmmac_rx_refill(priv, queue);
3509
3510         priv->xstats.rx_pkt_n += count;
3511
3512         return count;
3513 }
3514
3515 /**
3516  *  stmmac_poll - stmmac poll method (NAPI)
3517  *  @napi : pointer to the napi structure.
3518  *  @budget : maximum number of packets that the current CPU can receive from
3519  *            all interfaces.
3520  *  Description :
3521  *  To look at the incoming frames and clear the tx resources.
3522  */
3523 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3524 {
3525         struct stmmac_channel *ch =
3526                 container_of(napi, struct stmmac_channel, napi);
3527         struct stmmac_priv *priv = ch->priv_data;
3528         int work_done, rx_done = 0, tx_done = 0;
3529         u32 chan = ch->index;
3530
3531         priv->xstats.napi_poll++;
3532
3533         if (ch->has_tx)
3534                 tx_done = stmmac_tx_clean(priv, budget, chan);
3535         if (ch->has_rx)
3536                 rx_done = stmmac_rx(priv, budget, chan);
3537
3538         work_done = max(rx_done, tx_done);
3539         work_done = min(work_done, budget);
3540
3541         if (work_done < budget && napi_complete_done(napi, work_done)) {
3542                 int stat;
3543
3544                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3545                 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3546                                                    &priv->xstats, chan);
3547                 if (stat && napi_reschedule(napi))
3548                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3549         }
3550
3551         return work_done;
3552 }
3553
3554 /**
3555  *  stmmac_tx_timeout
3556  *  @dev : Pointer to net device structure
3557  *  Description: this function is called when a packet transmission fails to
3558  *   complete within a reasonable time. The driver will mark the error in the
3559  *   netdev structure and arrange for the device to be reset to a sane state
3560  *   in order to transmit a new packet.
3561  */
3562 static void stmmac_tx_timeout(struct net_device *dev)
3563 {
3564         struct stmmac_priv *priv = netdev_priv(dev);
3565
3566         stmmac_global_err(priv);
3567 }
3568
3569 /**
3570  *  stmmac_set_rx_mode - entry point for multicast addressing
3571  *  @dev : pointer to the device structure
3572  *  Description:
3573  *  This function is a driver entry point which gets called by the kernel
3574  *  whenever multicast addresses must be enabled/disabled.
3575  *  Return value:
3576  *  void.
3577  */
3578 static void stmmac_set_rx_mode(struct net_device *dev)
3579 {
3580         struct stmmac_priv *priv = netdev_priv(dev);
3581
3582         stmmac_set_filter(priv, priv->hw, dev);
3583 }
3584
3585 /**
3586  *  stmmac_change_mtu - entry point to change MTU size for the device.
3587  *  @dev : device pointer.
3588  *  @new_mtu : the new MTU size for the device.
3589  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3590  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3591  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3592  *  Return value:
3593  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3594  *  file on failure.
3595  */
3596 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3597 {
3598         struct stmmac_priv *priv = netdev_priv(dev);
3599         int txfifosz = priv->plat->tx_fifo_size;
3600         const int mtu = new_mtu;
3601
3602         if (txfifosz == 0)
3603                 txfifosz = priv->dma_cap.tx_fifo_size;
3604
3605         txfifosz /= priv->plat->tx_queues_to_use;
3606
3607         if (netif_running(dev)) {
3608                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3609                 return -EBUSY;
3610         }
3611
3612         new_mtu = STMMAC_ALIGN(new_mtu);
3613
3614         /* If condition true, FIFO is too small or MTU too large */
3615         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3616                 return -EINVAL;
3617
3618         dev->mtu = mtu;
3619
3620         netdev_update_features(dev);
3621
3622         return 0;
3623 }
3624
3625 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3626                                              netdev_features_t features)
3627 {
3628         struct stmmac_priv *priv = netdev_priv(dev);
3629
3630         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3631                 features &= ~NETIF_F_RXCSUM;
3632
3633         if (!priv->plat->tx_coe)
3634                 features &= ~NETIF_F_CSUM_MASK;
3635
3636         /* Some GMAC devices have a bugged Jumbo frame support that
3637          * needs to have the Tx COE disabled for oversized frames
3638          * (due to limited buffer sizes). In this case we disable
3639          * the TX csum insertion in the TDES and not use SF.
3640          */
3641         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3642                 features &= ~NETIF_F_CSUM_MASK;
3643
3644         /* Disable tso if asked by ethtool */
3645         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3646                 if (features & NETIF_F_TSO)
3647                         priv->tso = true;
3648                 else
3649                         priv->tso = false;
3650         }
3651
3652         return features;
3653 }
3654
3655 static int stmmac_set_features(struct net_device *netdev,
3656                                netdev_features_t features)
3657 {
3658         struct stmmac_priv *priv = netdev_priv(netdev);
3659
3660         /* Keep the COE Type in case of csum is supporting */
3661         if (features & NETIF_F_RXCSUM)
3662                 priv->hw->rx_csum = priv->plat->rx_coe;
3663         else
3664                 priv->hw->rx_csum = 0;
3665         /* No check needed because rx_coe has been set before and it will be
3666          * fixed in case of issue.
3667          */
3668         stmmac_rx_ipc(priv, priv->hw);
3669
3670         return 0;
3671 }
3672
3673 /**
3674  *  stmmac_interrupt - main ISR
3675  *  @irq: interrupt number.
3676  *  @dev_id: to pass the net device pointer (must be valid).
3677  *  Description: this is the main driver interrupt service routine.
3678  *  It can call:
3679  *  o DMA service routine (to manage incoming frame reception and transmission
3680  *    status)
3681  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3682  *    interrupts.
3683  */
3684 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3685 {
3686         struct net_device *dev = (struct net_device *)dev_id;
3687         struct stmmac_priv *priv = netdev_priv(dev);
3688         u32 rx_cnt = priv->plat->rx_queues_to_use;
3689         u32 tx_cnt = priv->plat->tx_queues_to_use;
3690         u32 queues_count;
3691         u32 queue;
3692         bool xmac;
3693
3694         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3695         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3696
3697         if (priv->irq_wake)
3698                 pm_wakeup_event(priv->device, 0);
3699
3700         /* Check if adapter is up */
3701         if (test_bit(STMMAC_DOWN, &priv->state))
3702                 return IRQ_HANDLED;
3703         /* Check if a fatal error happened */
3704         if (stmmac_safety_feat_interrupt(priv))
3705                 return IRQ_HANDLED;
3706
3707         /* To handle GMAC own interrupts */
3708         if ((priv->plat->has_gmac) || xmac) {
3709                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3710
3711                 if (unlikely(status)) {
3712                         /* For LPI we need to save the tx status */
3713                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3714                                 priv->tx_path_in_lpi_mode = true;
3715                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3716                                 priv->tx_path_in_lpi_mode = false;
3717                 }
3718
3719                 for (queue = 0; queue < queues_count; queue++) {
3720                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
3721                                                             queue);
3722                 }
3723
3724                 /* PCS link status */
3725                 if (priv->hw->pcs) {
3726                         if (priv->xstats.pcs_link)
3727                                 netif_carrier_on(dev);
3728                         else
3729                                 netif_carrier_off(dev);
3730                 }
3731         }
3732
3733         /* To handle DMA interrupts */
3734         stmmac_dma_interrupt(priv);
3735
3736         return IRQ_HANDLED;
3737 }
3738
3739 #ifdef CONFIG_NET_POLL_CONTROLLER
3740 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3741  * to allow network I/O with interrupts disabled.
3742  */
3743 static void stmmac_poll_controller(struct net_device *dev)
3744 {
3745         disable_irq(dev->irq);
3746         stmmac_interrupt(dev->irq, dev);
3747         enable_irq(dev->irq);
3748 }
3749 #endif
3750
3751 /**
3752  *  stmmac_ioctl - Entry point for the Ioctl
3753  *  @dev: Device pointer.
3754  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3755  *  a proprietary structure used to pass information to the driver.
3756  *  @cmd: IOCTL command
3757  *  Description:
3758  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3759  */
3760 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3761 {
3762         int ret = -EOPNOTSUPP;
3763
3764         if (!netif_running(dev))
3765                 return -EINVAL;
3766
3767         switch (cmd) {
3768         case SIOCGMIIPHY:
3769         case SIOCGMIIREG:
3770         case SIOCSMIIREG:
3771                 if (!dev->phydev)
3772                         return -EINVAL;
3773                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3774                 break;
3775         case SIOCSHWTSTAMP:
3776                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3777                 break;
3778         default:
3779                 break;
3780         }
3781
3782         return ret;
3783 }
3784
3785 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3786                                     void *cb_priv)
3787 {
3788         struct stmmac_priv *priv = cb_priv;
3789         int ret = -EOPNOTSUPP;
3790
3791         stmmac_disable_all_queues(priv);
3792
3793         switch (type) {
3794         case TC_SETUP_CLSU32:
3795                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3796                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3797                 break;
3798         default:
3799                 break;
3800         }
3801
3802         stmmac_enable_all_queues(priv);
3803         return ret;
3804 }
3805
3806 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3807                                  struct tc_block_offload *f)
3808 {
3809         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3810                 return -EOPNOTSUPP;
3811
3812         switch (f->command) {
3813         case TC_BLOCK_BIND:
3814                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3815                                 priv, priv, f->extack);
3816         case TC_BLOCK_UNBIND:
3817                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3818                 return 0;
3819         default:
3820                 return -EOPNOTSUPP;
3821         }
3822 }
3823
3824 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3825                            void *type_data)
3826 {
3827         struct stmmac_priv *priv = netdev_priv(ndev);
3828
3829         switch (type) {
3830         case TC_SETUP_BLOCK:
3831                 return stmmac_setup_tc_block(priv, type_data);
3832         case TC_SETUP_QDISC_CBS:
3833                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3834         default:
3835                 return -EOPNOTSUPP;
3836         }
3837 }
3838
3839 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3840                                struct net_device *sb_dev,
3841                                select_queue_fallback_t fallback)
3842 {
3843         if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3844                 /*
3845                  * There is no way to determine the number of TSO
3846                  * capable Queues. Let's use always the Queue 0
3847                  * because if TSO is supported then at least this
3848                  * one will be capable.
3849                  */
3850                 return 0;
3851         }
3852
3853         return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
3854 }
3855
3856 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3857 {
3858         struct stmmac_priv *priv = netdev_priv(ndev);
3859         int ret = 0;
3860
3861         ret = eth_mac_addr(ndev, addr);
3862         if (ret)
3863                 return ret;
3864
3865         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3866
3867         return ret;
3868 }
3869
3870 #ifdef CONFIG_DEBUG_FS
3871 static struct dentry *stmmac_fs_dir;
3872
3873 static void sysfs_display_ring(void *head, int size, int extend_desc,
3874                                struct seq_file *seq)
3875 {
3876         int i;
3877         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3878         struct dma_desc *p = (struct dma_desc *)head;
3879
3880         for (i = 0; i < size; i++) {
3881                 if (extend_desc) {
3882                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3883                                    i, (unsigned int)virt_to_phys(ep),
3884                                    le32_to_cpu(ep->basic.des0),
3885                                    le32_to_cpu(ep->basic.des1),
3886                                    le32_to_cpu(ep->basic.des2),
3887                                    le32_to_cpu(ep->basic.des3));
3888                         ep++;
3889                 } else {
3890                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3891                                    i, (unsigned int)virt_to_phys(p),
3892                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3893                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3894                         p++;
3895                 }
3896                 seq_printf(seq, "\n");
3897         }
3898 }
3899
3900 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3901 {
3902         struct net_device *dev = seq->private;
3903         struct stmmac_priv *priv = netdev_priv(dev);
3904         u32 rx_count = priv->plat->rx_queues_to_use;
3905         u32 tx_count = priv->plat->tx_queues_to_use;
3906         u32 queue;
3907
3908         if ((dev->flags & IFF_UP) == 0)
3909                 return 0;
3910
3911         for (queue = 0; queue < rx_count; queue++) {
3912                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3913
3914                 seq_printf(seq, "RX Queue %d:\n", queue);
3915
3916                 if (priv->extend_desc) {
3917                         seq_printf(seq, "Extended descriptor ring:\n");
3918                         sysfs_display_ring((void *)rx_q->dma_erx,
3919                                            DMA_RX_SIZE, 1, seq);
3920                 } else {
3921                         seq_printf(seq, "Descriptor ring:\n");
3922                         sysfs_display_ring((void *)rx_q->dma_rx,
3923                                            DMA_RX_SIZE, 0, seq);
3924                 }
3925         }
3926
3927         for (queue = 0; queue < tx_count; queue++) {
3928                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3929
3930                 seq_printf(seq, "TX Queue %d:\n", queue);
3931
3932                 if (priv->extend_desc) {
3933                         seq_printf(seq, "Extended descriptor ring:\n");
3934                         sysfs_display_ring((void *)tx_q->dma_etx,
3935                                            DMA_TX_SIZE, 1, seq);
3936                 } else {
3937                         seq_printf(seq, "Descriptor ring:\n");
3938                         sysfs_display_ring((void *)tx_q->dma_tx,
3939                                            DMA_TX_SIZE, 0, seq);
3940                 }
3941         }
3942
3943         return 0;
3944 }
3945
3946 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3947 {
3948         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3949 }
3950
3951 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3952
3953 static const struct file_operations stmmac_rings_status_fops = {
3954         .owner = THIS_MODULE,
3955         .open = stmmac_sysfs_ring_open,
3956         .read = seq_read,
3957         .llseek = seq_lseek,
3958         .release = single_release,
3959 };
3960
3961 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3962 {
3963         struct net_device *dev = seq->private;
3964         struct stmmac_priv *priv = netdev_priv(dev);
3965
3966         if (!priv->hw_cap_support) {
3967                 seq_printf(seq, "DMA HW features not supported\n");
3968                 return 0;
3969         }
3970
3971         seq_printf(seq, "==============================\n");
3972         seq_printf(seq, "\tDMA HW features\n");
3973         seq_printf(seq, "==============================\n");
3974
3975         seq_printf(seq, "\t10/100 Mbps: %s\n",
3976                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3977         seq_printf(seq, "\t1000 Mbps: %s\n",
3978                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3979         seq_printf(seq, "\tHalf duplex: %s\n",
3980                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3981         seq_printf(seq, "\tHash Filter: %s\n",
3982                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3983         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3984                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3985         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3986                    (priv->dma_cap.pcs) ? "Y" : "N");
3987         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3988                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3989         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3990                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3991         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3992                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3993         seq_printf(seq, "\tRMON module: %s\n",
3994                    (priv->dma_cap.rmon) ? "Y" : "N");
3995         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3996                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3997         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3998                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3999         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4000                    (priv->dma_cap.eee) ? "Y" : "N");
4001         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4002         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4003                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4004         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4005                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4006                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4007         } else {
4008                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4009                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4010                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4011                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4012         }
4013         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4014                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4015         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4016                    priv->dma_cap.number_rx_channel);
4017         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4018                    priv->dma_cap.number_tx_channel);
4019         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4020                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4021
4022         return 0;
4023 }
4024
4025 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4026 {
4027         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4028 }
4029
4030 static const struct file_operations stmmac_dma_cap_fops = {
4031         .owner = THIS_MODULE,
4032         .open = stmmac_sysfs_dma_cap_open,
4033         .read = seq_read,
4034         .llseek = seq_lseek,
4035         .release = single_release,
4036 };
4037
4038 /* Use network device events to rename debugfs file entries.
4039  */
4040 static int stmmac_device_event(struct notifier_block *unused,
4041                                unsigned long event, void *ptr)
4042 {
4043         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4044         struct stmmac_priv *priv = netdev_priv(dev);
4045
4046         if (dev->netdev_ops != &stmmac_netdev_ops)
4047                 goto done;
4048
4049         switch (event) {
4050         case NETDEV_CHANGENAME:
4051                 if (priv->dbgfs_dir)
4052                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4053                                                          priv->dbgfs_dir,
4054                                                          stmmac_fs_dir,
4055                                                          dev->name);
4056                 break;
4057         }
4058 done:
4059         return NOTIFY_DONE;
4060 }
4061
4062 static struct notifier_block stmmac_notifier = {
4063         .notifier_call = stmmac_device_event,
4064 };
4065
4066 static int stmmac_init_fs(struct net_device *dev)
4067 {
4068         struct stmmac_priv *priv = netdev_priv(dev);
4069
4070         /* Create per netdev entries */
4071         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4072
4073         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4074                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4075
4076                 return -ENOMEM;
4077         }
4078
4079         /* Entry to report DMA RX/TX rings */
4080         priv->dbgfs_rings_status =
4081                 debugfs_create_file("descriptors_status", 0444,
4082                                     priv->dbgfs_dir, dev,
4083                                     &stmmac_rings_status_fops);
4084
4085         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4086                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4087                 debugfs_remove_recursive(priv->dbgfs_dir);
4088
4089                 return -ENOMEM;
4090         }
4091
4092         /* Entry to report the DMA HW features */
4093         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4094                                                   priv->dbgfs_dir,
4095                                                   dev, &stmmac_dma_cap_fops);
4096
4097         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4098                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4099                 debugfs_remove_recursive(priv->dbgfs_dir);
4100
4101                 return -ENOMEM;
4102         }
4103
4104         register_netdevice_notifier(&stmmac_notifier);
4105
4106         return 0;
4107 }
4108
4109 static void stmmac_exit_fs(struct net_device *dev)
4110 {
4111         struct stmmac_priv *priv = netdev_priv(dev);
4112
4113         unregister_netdevice_notifier(&stmmac_notifier);
4114         debugfs_remove_recursive(priv->dbgfs_dir);
4115 }
4116 #endif /* CONFIG_DEBUG_FS */
4117
4118 static const struct net_device_ops stmmac_netdev_ops = {
4119         .ndo_open = stmmac_open,
4120         .ndo_start_xmit = stmmac_xmit,
4121         .ndo_stop = stmmac_release,
4122         .ndo_change_mtu = stmmac_change_mtu,
4123         .ndo_fix_features = stmmac_fix_features,
4124         .ndo_set_features = stmmac_set_features,
4125         .ndo_set_rx_mode = stmmac_set_rx_mode,
4126         .ndo_tx_timeout = stmmac_tx_timeout,
4127         .ndo_do_ioctl = stmmac_ioctl,
4128         .ndo_setup_tc = stmmac_setup_tc,
4129         .ndo_select_queue = stmmac_select_queue,
4130 #ifdef CONFIG_NET_POLL_CONTROLLER
4131         .ndo_poll_controller = stmmac_poll_controller,
4132 #endif
4133         .ndo_set_mac_address = stmmac_set_mac_address,
4134 };
4135
4136 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4137 {
4138         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4139                 return;
4140         if (test_bit(STMMAC_DOWN, &priv->state))
4141                 return;
4142
4143         netdev_err(priv->dev, "Reset adapter.\n");
4144
4145         rtnl_lock();
4146         netif_trans_update(priv->dev);
4147         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4148                 usleep_range(1000, 2000);
4149
4150         set_bit(STMMAC_DOWN, &priv->state);
4151         dev_close(priv->dev);
4152         dev_open(priv->dev);
4153         clear_bit(STMMAC_DOWN, &priv->state);
4154         clear_bit(STMMAC_RESETING, &priv->state);
4155         rtnl_unlock();
4156 }
4157
4158 static void stmmac_service_task(struct work_struct *work)
4159 {
4160         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4161                         service_task);
4162
4163         stmmac_reset_subtask(priv);
4164         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4165 }
4166
4167 /**
4168  *  stmmac_hw_init - Init the MAC device
4169  *  @priv: driver private structure
4170  *  Description: this function is to configure the MAC device according to
4171  *  some platform parameters or the HW capability register. It prepares the
4172  *  driver to use either ring or chain modes and to setup either enhanced or
4173  *  normal descriptors.
4174  */
4175 static int stmmac_hw_init(struct stmmac_priv *priv)
4176 {
4177         int ret;
4178
4179         /* dwmac-sun8i only work in chain mode */
4180         if (priv->plat->has_sun8i)
4181                 chain_mode = 1;
4182         priv->chain_mode = chain_mode;
4183
4184         /* Initialize HW Interface */
4185         ret = stmmac_hwif_init(priv);
4186         if (ret)
4187                 return ret;
4188
4189         /* Get the HW capability (new GMAC newer than 3.50a) */
4190         priv->hw_cap_support = stmmac_get_hw_features(priv);
4191         if (priv->hw_cap_support) {
4192                 dev_info(priv->device, "DMA HW capability register supported\n");
4193
4194                 /* We can override some gmac/dma configuration fields: e.g.
4195                  * enh_desc, tx_coe (e.g. that are passed through the
4196                  * platform) with the values from the HW capability
4197                  * register (if supported).
4198                  */
4199                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4200                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4201                 priv->hw->pmt = priv->plat->pmt;
4202
4203                 /* TXCOE doesn't work in thresh DMA mode */
4204                 if (priv->plat->force_thresh_dma_mode)
4205                         priv->plat->tx_coe = 0;
4206                 else
4207                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4208
4209                 /* In case of GMAC4 rx_coe is from HW cap register. */
4210                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4211
4212                 if (priv->dma_cap.rx_coe_type2)
4213                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4214                 else if (priv->dma_cap.rx_coe_type1)
4215                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4216
4217         } else {
4218                 dev_info(priv->device, "No HW DMA feature register supported\n");
4219         }
4220
4221         if (priv->plat->rx_coe) {
4222                 priv->hw->rx_csum = priv->plat->rx_coe;
4223                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4224                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4225                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4226         }
4227         if (priv->plat->tx_coe)
4228                 dev_info(priv->device, "TX Checksum insertion supported\n");
4229
4230         if (priv->plat->pmt) {
4231                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4232                 device_set_wakeup_capable(priv->device, 1);
4233         }
4234
4235         if (priv->dma_cap.tsoen)
4236                 dev_info(priv->device, "TSO supported\n");
4237
4238         /* Run HW quirks, if any */
4239         if (priv->hwif_quirks) {
4240                 ret = priv->hwif_quirks(priv);
4241                 if (ret)
4242                         return ret;
4243         }
4244
4245         /* Rx Watchdog is available in the COREs newer than the 3.40.
4246          * In some case, for example on bugged HW this feature
4247          * has to be disable and this can be done by passing the
4248          * riwt_off field from the platform.
4249          */
4250         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4251             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4252                 priv->use_riwt = 1;
4253                 dev_info(priv->device,
4254                          "Enable RX Mitigation via HW Watchdog Timer\n");
4255         }
4256
4257         return 0;
4258 }
4259
4260 /**
4261  * stmmac_dvr_probe
4262  * @device: device pointer
4263  * @plat_dat: platform data pointer
4264  * @res: stmmac resource pointer
4265  * Description: this is the main probe function used to
4266  * call the alloc_etherdev, allocate the priv structure.
4267  * Return:
4268  * returns 0 on success, otherwise errno.
4269  */
4270 int stmmac_dvr_probe(struct device *device,
4271                      struct plat_stmmacenet_data *plat_dat,
4272                      struct stmmac_resources *res)
4273 {
4274         struct net_device *ndev = NULL;
4275         struct stmmac_priv *priv;
4276         u32 queue, maxq;
4277         int ret = 0;
4278
4279         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4280                                   MTL_MAX_TX_QUEUES,
4281                                   MTL_MAX_RX_QUEUES);
4282         if (!ndev)
4283                 return -ENOMEM;
4284
4285         SET_NETDEV_DEV(ndev, device);
4286
4287         priv = netdev_priv(ndev);
4288         priv->device = device;
4289         priv->dev = ndev;
4290
4291         stmmac_set_ethtool_ops(ndev);
4292         priv->pause = pause;
4293         priv->plat = plat_dat;
4294         priv->ioaddr = res->addr;
4295         priv->dev->base_addr = (unsigned long)res->addr;
4296
4297         priv->dev->irq = res->irq;
4298         priv->wol_irq = res->wol_irq;
4299         priv->lpi_irq = res->lpi_irq;
4300
4301         if (res->mac)
4302                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4303
4304         dev_set_drvdata(device, priv->dev);
4305
4306         /* Verify driver arguments */
4307         stmmac_verify_args();
4308
4309         /* Allocate workqueue */
4310         priv->wq = create_singlethread_workqueue("stmmac_wq");
4311         if (!priv->wq) {
4312                 dev_err(priv->device, "failed to create workqueue\n");
4313                 ret = -ENOMEM;
4314                 goto error_wq;
4315         }
4316
4317         INIT_WORK(&priv->service_task, stmmac_service_task);
4318
4319         /* Override with kernel parameters if supplied XXX CRS XXX
4320          * this needs to have multiple instances
4321          */
4322         if ((phyaddr >= 0) && (phyaddr <= 31))
4323                 priv->plat->phy_addr = phyaddr;
4324
4325         if (priv->plat->stmmac_rst) {
4326                 ret = reset_control_assert(priv->plat->stmmac_rst);
4327                 reset_control_deassert(priv->plat->stmmac_rst);
4328                 /* Some reset controllers have only reset callback instead of
4329                  * assert + deassert callbacks pair.
4330                  */
4331                 if (ret == -ENOTSUPP)
4332                         reset_control_reset(priv->plat->stmmac_rst);
4333         }
4334
4335         /* Init MAC and get the capabilities */
4336         ret = stmmac_hw_init(priv);
4337         if (ret)
4338                 goto error_hw_init;
4339
4340         stmmac_check_ether_addr(priv);
4341
4342         /* Configure real RX and TX queues */
4343         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4344         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4345
4346         ndev->netdev_ops = &stmmac_netdev_ops;
4347
4348         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4349                             NETIF_F_RXCSUM;
4350
4351         ret = stmmac_tc_init(priv, priv);
4352         if (!ret) {
4353                 ndev->hw_features |= NETIF_F_HW_TC;
4354         }
4355
4356         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4357                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4358                 priv->tso = true;
4359                 dev_info(priv->device, "TSO feature enabled\n");
4360         }
4361         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4362         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4363 #ifdef STMMAC_VLAN_TAG_USED
4364         /* Both mac100 and gmac support receive VLAN tag detection */
4365         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4366 #endif
4367         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4368
4369         /* MTU range: 46 - hw-specific max */
4370         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4371         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4372                 ndev->max_mtu = JUMBO_LEN;
4373         else if (priv->plat->has_xgmac)
4374                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4375         else
4376                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4377         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4378          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4379          */
4380         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4381             (priv->plat->maxmtu >= ndev->min_mtu))
4382                 ndev->max_mtu = priv->plat->maxmtu;
4383         else if (priv->plat->maxmtu < ndev->min_mtu)
4384                 dev_warn(priv->device,
4385                          "%s: warning: maxmtu having invalid value (%d)\n",
4386                          __func__, priv->plat->maxmtu);
4387
4388         if (flow_ctrl)
4389                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4390
4391         /* Setup channels NAPI */
4392         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4393
4394         for (queue = 0; queue < maxq; queue++) {
4395                 struct stmmac_channel *ch = &priv->channel[queue];
4396
4397                 ch->priv_data = priv;
4398                 ch->index = queue;
4399
4400                 if (queue < priv->plat->rx_queues_to_use)
4401                         ch->has_rx = true;
4402                 if (queue < priv->plat->tx_queues_to_use)
4403                         ch->has_tx = true;
4404
4405                 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4406                                NAPI_POLL_WEIGHT);
4407         }
4408
4409         mutex_init(&priv->lock);
4410
4411         /* If a specific clk_csr value is passed from the platform
4412          * this means that the CSR Clock Range selection cannot be
4413          * changed at run-time and it is fixed. Viceversa the driver'll try to
4414          * set the MDC clock dynamically according to the csr actual
4415          * clock input.
4416          */
4417         if (!priv->plat->clk_csr)
4418                 stmmac_clk_csr_set(priv);
4419         else
4420                 priv->clk_csr = priv->plat->clk_csr;
4421
4422         stmmac_check_pcs_mode(priv);
4423
4424         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4425             priv->hw->pcs != STMMAC_PCS_TBI &&
4426             priv->hw->pcs != STMMAC_PCS_RTBI) {
4427                 /* MDIO bus Registration */
4428                 ret = stmmac_mdio_register(ndev);
4429                 if (ret < 0) {
4430                         dev_err(priv->device,
4431                                 "%s: MDIO bus (id: %d) registration failed",
4432                                 __func__, priv->plat->bus_id);
4433                         goto error_mdio_register;
4434                 }
4435         }
4436
4437         ret = register_netdev(ndev);
4438         if (ret) {
4439                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4440                         __func__, ret);
4441                 goto error_netdev_register;
4442         }
4443
4444 #ifdef CONFIG_DEBUG_FS
4445         ret = stmmac_init_fs(ndev);
4446         if (ret < 0)
4447                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4448                             __func__);
4449 #endif
4450
4451         return ret;
4452
4453 error_netdev_register:
4454         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4455             priv->hw->pcs != STMMAC_PCS_TBI &&
4456             priv->hw->pcs != STMMAC_PCS_RTBI)
4457                 stmmac_mdio_unregister(ndev);
4458 error_mdio_register:
4459         for (queue = 0; queue < maxq; queue++) {
4460                 struct stmmac_channel *ch = &priv->channel[queue];
4461
4462                 netif_napi_del(&ch->napi);
4463         }
4464 error_hw_init:
4465         destroy_workqueue(priv->wq);
4466 error_wq:
4467         free_netdev(ndev);
4468
4469         return ret;
4470 }
4471 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4472
4473 /**
4474  * stmmac_dvr_remove
4475  * @dev: device pointer
4476  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4477  * changes the link status, releases the DMA descriptor rings.
4478  */
4479 int stmmac_dvr_remove(struct device *dev)
4480 {
4481         struct net_device *ndev = dev_get_drvdata(dev);
4482         struct stmmac_priv *priv = netdev_priv(ndev);
4483
4484         netdev_info(priv->dev, "%s: removing driver", __func__);
4485
4486 #ifdef CONFIG_DEBUG_FS
4487         stmmac_exit_fs(ndev);
4488 #endif
4489         stmmac_stop_all_dma(priv);
4490
4491         stmmac_mac_set(priv, priv->ioaddr, false);
4492         netif_carrier_off(ndev);
4493         unregister_netdev(ndev);
4494         if (priv->plat->stmmac_rst)
4495                 reset_control_assert(priv->plat->stmmac_rst);
4496         clk_disable_unprepare(priv->plat->pclk);
4497         clk_disable_unprepare(priv->plat->stmmac_clk);
4498         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4499             priv->hw->pcs != STMMAC_PCS_TBI &&
4500             priv->hw->pcs != STMMAC_PCS_RTBI)
4501                 stmmac_mdio_unregister(ndev);
4502         destroy_workqueue(priv->wq);
4503         mutex_destroy(&priv->lock);
4504         free_netdev(ndev);
4505
4506         return 0;
4507 }
4508 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4509
4510 /**
4511  * stmmac_suspend - suspend callback
4512  * @dev: device pointer
4513  * Description: this is the function to suspend the device and it is called
4514  * by the platform driver to stop the network queue, release the resources,
4515  * program the PMT register (for WoL), clean and release driver resources.
4516  */
4517 int stmmac_suspend(struct device *dev)
4518 {
4519         struct net_device *ndev = dev_get_drvdata(dev);
4520         struct stmmac_priv *priv = netdev_priv(ndev);
4521         u32 chan;
4522
4523         if (!ndev || !netif_running(ndev))
4524                 return 0;
4525
4526         if (ndev->phydev)
4527                 phy_stop(ndev->phydev);
4528
4529         mutex_lock(&priv->lock);
4530
4531         netif_device_detach(ndev);
4532
4533         stmmac_disable_all_queues(priv);
4534
4535         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4536                 del_timer_sync(&priv->tx_queue[chan].txtimer);
4537
4538         if (priv->eee_enabled) {
4539                 priv->tx_path_in_lpi_mode = false;
4540                 del_timer_sync(&priv->eee_ctrl_timer);
4541         }
4542
4543         /* Stop TX/RX DMA */
4544         stmmac_stop_all_dma(priv);
4545
4546         /* Enable Power down mode by programming the PMT regs */
4547         if (device_may_wakeup(priv->device)) {
4548                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4549                 priv->irq_wake = 1;
4550         } else {
4551                 stmmac_mac_set(priv, priv->ioaddr, false);
4552                 pinctrl_pm_select_sleep_state(priv->device);
4553                 /* Disable clock in case of PWM is off */
4554                 if (priv->plat->clk_ptp_ref)
4555                         clk_disable_unprepare(priv->plat->clk_ptp_ref);
4556                 clk_disable_unprepare(priv->plat->pclk);
4557                 clk_disable_unprepare(priv->plat->stmmac_clk);
4558         }
4559         mutex_unlock(&priv->lock);
4560
4561         priv->oldlink = false;
4562         priv->speed = SPEED_UNKNOWN;
4563         priv->oldduplex = DUPLEX_UNKNOWN;
4564         return 0;
4565 }
4566 EXPORT_SYMBOL_GPL(stmmac_suspend);
4567
4568 /**
4569  * stmmac_reset_queues_param - reset queue parameters
4570  * @dev: device pointer
4571  */
4572 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4573 {
4574         u32 rx_cnt = priv->plat->rx_queues_to_use;
4575         u32 tx_cnt = priv->plat->tx_queues_to_use;
4576         u32 queue;
4577
4578         for (queue = 0; queue < rx_cnt; queue++) {
4579                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4580
4581                 rx_q->cur_rx = 0;
4582                 rx_q->dirty_rx = 0;
4583         }
4584
4585         for (queue = 0; queue < tx_cnt; queue++) {
4586                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4587
4588                 tx_q->cur_tx = 0;
4589                 tx_q->dirty_tx = 0;
4590                 tx_q->mss = 0;
4591
4592                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
4593         }
4594 }
4595
4596 /**
4597  * stmmac_resume - resume callback
4598  * @dev: device pointer
4599  * Description: when resume this function is invoked to setup the DMA and CORE
4600  * in a usable state.
4601  */
4602 int stmmac_resume(struct device *dev)
4603 {
4604         struct net_device *ndev = dev_get_drvdata(dev);
4605         struct stmmac_priv *priv = netdev_priv(ndev);
4606
4607         if (!netif_running(ndev))
4608                 return 0;
4609
4610         /* Power Down bit, into the PM register, is cleared
4611          * automatically as soon as a magic packet or a Wake-up frame
4612          * is received. Anyway, it's better to manually clear
4613          * this bit because it can generate problems while resuming
4614          * from another devices (e.g. serial console).
4615          */
4616         if (device_may_wakeup(priv->device)) {
4617                 mutex_lock(&priv->lock);
4618                 stmmac_pmt(priv, priv->hw, 0);
4619                 mutex_unlock(&priv->lock);
4620                 priv->irq_wake = 0;
4621         } else {
4622                 pinctrl_pm_select_default_state(priv->device);
4623                 /* enable the clk previously disabled */
4624                 clk_prepare_enable(priv->plat->stmmac_clk);
4625                 clk_prepare_enable(priv->plat->pclk);
4626                 if (priv->plat->clk_ptp_ref)
4627                         clk_prepare_enable(priv->plat->clk_ptp_ref);
4628                 /* reset the phy so that it's ready */
4629                 if (priv->mii)
4630                         stmmac_mdio_reset(priv->mii);
4631         }
4632
4633         netif_device_attach(ndev);
4634
4635         mutex_lock(&priv->lock);
4636
4637         stmmac_reset_queues_param(priv);
4638
4639         stmmac_free_tx_skbufs(priv);
4640         stmmac_clear_descriptors(priv);
4641
4642         stmmac_hw_setup(ndev, false);
4643         stmmac_init_tx_coalesce(priv);
4644         stmmac_set_rx_mode(ndev);
4645
4646         stmmac_enable_all_queues(priv);
4647
4648         mutex_unlock(&priv->lock);
4649
4650         if (ndev->phydev)
4651                 phy_start(ndev->phydev);
4652
4653         return 0;
4654 }
4655 EXPORT_SYMBOL_GPL(stmmac_resume);
4656
4657 #ifndef MODULE
4658 static int __init stmmac_cmdline_opt(char *str)
4659 {
4660         char *opt;
4661
4662         if (!str || !*str)
4663                 return 1;
4664         while ((opt = strsep(&str, ",")) != NULL) {
4665                 if (!strncmp(opt, "debug:", 6)) {
4666                         if (kstrtoint(opt + 6, 0, &debug))
4667                                 goto err;
4668                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4669                         if (kstrtoint(opt + 8, 0, &phyaddr))
4670                                 goto err;
4671                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4672                         if (kstrtoint(opt + 7, 0, &buf_sz))
4673                                 goto err;
4674                 } else if (!strncmp(opt, "tc:", 3)) {
4675                         if (kstrtoint(opt + 3, 0, &tc))
4676                                 goto err;
4677                 } else if (!strncmp(opt, "watchdog:", 9)) {
4678                         if (kstrtoint(opt + 9, 0, &watchdog))
4679                                 goto err;
4680                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4681                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4682                                 goto err;
4683                 } else if (!strncmp(opt, "pause:", 6)) {
4684                         if (kstrtoint(opt + 6, 0, &pause))
4685                                 goto err;
4686                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4687                         if (kstrtoint(opt + 10, 0, &eee_timer))
4688                                 goto err;
4689                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4690                         if (kstrtoint(opt + 11, 0, &chain_mode))
4691                                 goto err;
4692                 }
4693         }
4694         return 1;
4695
4696 err:
4697         pr_err("%s: ERROR broken module parameter conversion", __func__);
4698         return 1;
4699 }
4700
4701 __setup("stmmaceth=", stmmac_cmdline_opt);
4702 #endif /* MODULE */
4703
4704 static int __init stmmac_init(void)
4705 {
4706 #ifdef CONFIG_DEBUG_FS
4707         /* Create debugfs main directory if it doesn't exist yet */
4708         if (!stmmac_fs_dir) {
4709                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4710
4711                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4712                         pr_err("ERROR %s, debugfs create directory failed\n",
4713                                STMMAC_RESOURCE_NAME);
4714
4715                         return -ENOMEM;
4716                 }
4717         }
4718 #endif
4719
4720         return 0;
4721 }
4722
4723 static void __exit stmmac_exit(void)
4724 {
4725 #ifdef CONFIG_DEBUG_FS
4726         debugfs_remove_recursive(stmmac_fs_dir);
4727 #endif
4728 }
4729
4730 module_init(stmmac_init)
4731 module_exit(stmmac_exit)
4732
4733 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4734 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4735 MODULE_LICENSE("GPL");