GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / staging / octeon / ethernet.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file is based on code from OCTEON SDK by Cavium Networks.
4  *
5  * Copyright (c) 2003-2007 Cavium Networks
6  */
7
8 #include <linux/platform_device.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/phy.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
20
21 #include <net/dst.h>
22
23 #include <asm/octeon/octeon.h>
24
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
32
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
38 #include <asm/octeon/cvmx-asxx-defs.h>
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
41
42 #define OCTEON_MAX_MTU 65392
43
44 static int num_packet_buffers = 1024;
45 module_param(num_packet_buffers, int, 0444);
46 MODULE_PARM_DESC(num_packet_buffers, "\n"
47         "\tNumber of packet buffers to allocate and store in the\n"
48         "\tFPA. By default, 1024 packet buffers are used.\n");
49
50 static int pow_receive_group = 15;
51 module_param(pow_receive_group, int, 0444);
52 MODULE_PARM_DESC(pow_receive_group, "\n"
53         "\tPOW group to receive packets from. All ethernet hardware\n"
54         "\twill be configured to send incoming packets to this POW\n"
55         "\tgroup. Also any other software can submit packets to this\n"
56         "\tgroup for the kernel to process.");
57
58 static int receive_group_order;
59 module_param(receive_group_order, int, 0444);
60 MODULE_PARM_DESC(receive_group_order, "\n"
61         "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
62         "\twill be configured to send incoming packets to multiple POW\n"
63         "\tgroups. pow_receive_group parameter is ignored when multiple\n"
64         "\tgroups are taken into use and groups are allocated starting\n"
65         "\tfrom 0. By default, a single group is used.\n");
66
67 int pow_send_group = -1;
68 module_param(pow_send_group, int, 0644);
69 MODULE_PARM_DESC(pow_send_group, "\n"
70         "\tPOW group to send packets to other software on. This\n"
71         "\tcontrols the creation of the virtual device pow0.\n"
72         "\talways_use_pow also depends on this value.");
73
74 int always_use_pow;
75 module_param(always_use_pow, int, 0444);
76 MODULE_PARM_DESC(always_use_pow, "\n"
77         "\tWhen set, always send to the pow group. This will cause\n"
78         "\tpackets sent to real ethernet devices to be sent to the\n"
79         "\tPOW group instead of the hardware. Unless some other\n"
80         "\tapplication changes the config, packets will still be\n"
81         "\treceived from the low level hardware. Use this option\n"
82         "\tto allow a CVMX app to intercept all packets from the\n"
83         "\tlinux kernel. You must specify pow_send_group along with\n"
84         "\tthis option.");
85
86 char pow_send_list[128] = "";
87 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
88 MODULE_PARM_DESC(pow_send_list, "\n"
89         "\tComma separated list of ethernet devices that should use the\n"
90         "\tPOW for transmit instead of the actual ethernet hardware. This\n"
91         "\tis a per port version of always_use_pow. always_use_pow takes\n"
92         "\tprecedence over this list. For example, setting this to\n"
93         "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
94         "\tusing the pow_send_group.");
95
96 int rx_napi_weight = 32;
97 module_param(rx_napi_weight, int, 0444);
98 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
99
100 /* Mask indicating which receive groups are in use. */
101 int pow_receive_groups;
102
103 /*
104  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
105  *
106  * Set to one right before cvm_oct_poll_queue is destroyed.
107  */
108 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
109
110 /*
111  * Array of every ethernet device owned by this driver indexed by
112  * the ipd input port number.
113  */
114 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
115
116 u64 cvm_oct_tx_poll_interval;
117
118 static void cvm_oct_rx_refill_worker(struct work_struct *work);
119 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
120
121 static void cvm_oct_rx_refill_worker(struct work_struct *work)
122 {
123         /*
124          * FPA 0 may have been drained, try to refill it if we need
125          * more than num_packet_buffers / 2, otherwise normal receive
126          * processing will refill it.  If it were drained, no packets
127          * could be received so cvm_oct_napi_poll would never be
128          * invoked to do the refill.
129          */
130         cvm_oct_rx_refill_pool(num_packet_buffers / 2);
131
132         if (!atomic_read(&cvm_oct_poll_queue_stopping))
133                 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
134 }
135
136 static void cvm_oct_periodic_worker(struct work_struct *work)
137 {
138         struct octeon_ethernet *priv = container_of(work,
139                                                     struct octeon_ethernet,
140                                                     port_periodic_work.work);
141
142         if (priv->poll)
143                 priv->poll(cvm_oct_device[priv->port]);
144
145         cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
146                                                 cvm_oct_device[priv->port]);
147
148         if (!atomic_read(&cvm_oct_poll_queue_stopping))
149                 schedule_delayed_work(&priv->port_periodic_work, HZ);
150 }
151
152 static void cvm_oct_configure_common_hw(void)
153 {
154         /* Setup the FPA */
155         cvmx_fpa_enable();
156         cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
157                              num_packet_buffers);
158         cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
159                              num_packet_buffers);
160         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
161                 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
162                                      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
163
164 #ifdef __LITTLE_ENDIAN
165         {
166                 union cvmx_ipd_ctl_status ipd_ctl_status;
167
168                 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
169                 ipd_ctl_status.s.pkt_lend = 1;
170                 ipd_ctl_status.s.wqe_lend = 1;
171                 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
172         }
173 #endif
174
175         cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
176 }
177
178 /**
179  * cvm_oct_free_work- Free a work queue entry
180  *
181  * @work_queue_entry: Work queue entry to free
182  *
183  * Returns Zero on success, Negative on failure.
184  */
185 int cvm_oct_free_work(void *work_queue_entry)
186 {
187         cvmx_wqe_t *work = work_queue_entry;
188
189         int segments = work->word2.s.bufs;
190         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
191
192         while (segments--) {
193                 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
194                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
195                 if (unlikely(!segment_ptr.s.i))
196                         cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
197                                       segment_ptr.s.pool,
198                                       CVMX_FPA_PACKET_POOL_SIZE / 128);
199                 segment_ptr = next_ptr;
200         }
201         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
202
203         return 0;
204 }
205 EXPORT_SYMBOL(cvm_oct_free_work);
206
207 /**
208  * cvm_oct_common_get_stats - get the low level ethernet statistics
209  * @dev:    Device to get the statistics from
210  *
211  * Returns Pointer to the statistics
212  */
213 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
214 {
215         cvmx_pip_port_status_t rx_status;
216         cvmx_pko_port_status_t tx_status;
217         struct octeon_ethernet *priv = netdev_priv(dev);
218
219         if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
220                 if (octeon_is_simulation()) {
221                         /* The simulator doesn't support statistics */
222                         memset(&rx_status, 0, sizeof(rx_status));
223                         memset(&tx_status, 0, sizeof(tx_status));
224                 } else {
225                         cvmx_pip_get_port_status(priv->port, 1, &rx_status);
226                         cvmx_pko_get_port_status(priv->port, 1, &tx_status);
227                 }
228
229                 dev->stats.rx_packets += rx_status.inb_packets;
230                 dev->stats.tx_packets += tx_status.packets;
231                 dev->stats.rx_bytes += rx_status.inb_octets;
232                 dev->stats.tx_bytes += tx_status.octets;
233                 dev->stats.multicast += rx_status.multicast_packets;
234                 dev->stats.rx_crc_errors += rx_status.inb_errors;
235                 dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
236                 dev->stats.rx_dropped += rx_status.dropped_packets;
237         }
238
239         return &dev->stats;
240 }
241
242 /**
243  * cvm_oct_common_change_mtu - change the link MTU
244  * @dev:     Device to change
245  * @new_mtu: The new MTU
246  *
247  * Returns Zero on success
248  */
249 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
250 {
251         struct octeon_ethernet *priv = netdev_priv(dev);
252         int interface = INTERFACE(priv->port);
253 #if IS_ENABLED(CONFIG_VLAN_8021Q)
254         int vlan_bytes = VLAN_HLEN;
255 #else
256         int vlan_bytes = 0;
257 #endif
258         int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
259
260         dev->mtu = new_mtu;
261
262         if ((interface < 2) &&
263             (cvmx_helper_interface_get_mode(interface) !=
264                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
265                 int index = INDEX(priv->port);
266                 /* Add ethernet header and FCS, and VLAN if configured. */
267                 int max_packet = new_mtu + mtu_overhead;
268
269                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
270                     OCTEON_IS_MODEL(OCTEON_CN58XX)) {
271                         /* Signal errors on packets larger than the MTU */
272                         cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
273                                        max_packet);
274                 } else {
275                         /*
276                          * Set the hardware to truncate packets larger
277                          * than the MTU and smaller the 64 bytes.
278                          */
279                         union cvmx_pip_frm_len_chkx frm_len_chk;
280
281                         frm_len_chk.u64 = 0;
282                         frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
283                         frm_len_chk.s.maxlen = max_packet;
284                         cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
285                                        frm_len_chk.u64);
286                 }
287                 /*
288                  * Set the hardware to truncate packets larger than
289                  * the MTU. The jabber register must be set to a
290                  * multiple of 8 bytes, so round up.
291                  */
292                 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
293                                (max_packet + 7) & ~7u);
294         }
295         return 0;
296 }
297
298 /**
299  * cvm_oct_common_set_multicast_list - set the multicast list
300  * @dev:    Device to work on
301  */
302 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
303 {
304         union cvmx_gmxx_prtx_cfg gmx_cfg;
305         struct octeon_ethernet *priv = netdev_priv(dev);
306         int interface = INTERFACE(priv->port);
307
308         if ((interface < 2) &&
309             (cvmx_helper_interface_get_mode(interface) !=
310                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
311                 union cvmx_gmxx_rxx_adr_ctl control;
312                 int index = INDEX(priv->port);
313
314                 control.u64 = 0;
315                 control.s.bcst = 1;     /* Allow broadcast MAC addresses */
316
317                 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
318                     (dev->flags & IFF_PROMISC))
319                         /* Force accept multicast packets */
320                         control.s.mcst = 2;
321                 else
322                         /* Force reject multicast packets */
323                         control.s.mcst = 1;
324
325                 if (dev->flags & IFF_PROMISC)
326                         /*
327                          * Reject matches if promisc. Since CAM is
328                          * shut off, should accept everything.
329                          */
330                         control.s.cam_mode = 0;
331                 else
332                         /* Filter packets based on the CAM */
333                         control.s.cam_mode = 1;
334
335                 gmx_cfg.u64 =
336                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
337                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
338                                gmx_cfg.u64 & ~1ull);
339
340                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
341                                control.u64);
342                 if (dev->flags & IFF_PROMISC)
343                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
344                                        (index, interface), 0);
345                 else
346                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
347                                        (index, interface), 1);
348
349                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
350                                gmx_cfg.u64);
351         }
352 }
353
354 static int cvm_oct_set_mac_filter(struct net_device *dev)
355 {
356         struct octeon_ethernet *priv = netdev_priv(dev);
357         union cvmx_gmxx_prtx_cfg gmx_cfg;
358         int interface = INTERFACE(priv->port);
359
360         if ((interface < 2) &&
361             (cvmx_helper_interface_get_mode(interface) !=
362                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
363                 int i;
364                 u8 *ptr = dev->dev_addr;
365                 u64 mac = 0;
366                 int index = INDEX(priv->port);
367
368                 for (i = 0; i < 6; i++)
369                         mac = (mac << 8) | (u64)ptr[i];
370
371                 gmx_cfg.u64 =
372                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
373                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
374                                gmx_cfg.u64 & ~1ull);
375
376                 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
377                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
378                                ptr[0]);
379                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
380                                ptr[1]);
381                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
382                                ptr[2]);
383                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
384                                ptr[3]);
385                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
386                                ptr[4]);
387                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
388                                ptr[5]);
389                 cvm_oct_common_set_multicast_list(dev);
390                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
391                                gmx_cfg.u64);
392         }
393         return 0;
394 }
395
396 /**
397  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
398  * @dev:    The device in question.
399  * @addr:   Socket address.
400  *
401  * Returns Zero on success
402  */
403 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
404 {
405         int r = eth_mac_addr(dev, addr);
406
407         if (r)
408                 return r;
409         return cvm_oct_set_mac_filter(dev);
410 }
411
412 /**
413  * cvm_oct_common_init - per network device initialization
414  * @dev:    Device to initialize
415  *
416  * Returns Zero on success
417  */
418 int cvm_oct_common_init(struct net_device *dev)
419 {
420         struct octeon_ethernet *priv = netdev_priv(dev);
421         const u8 *mac = NULL;
422
423         if (priv->of_node)
424                 mac = of_get_mac_address(priv->of_node);
425
426         if (mac)
427                 ether_addr_copy(dev->dev_addr, mac);
428         else
429                 eth_hw_addr_random(dev);
430
431         /*
432          * Force the interface to use the POW send if always_use_pow
433          * was specified or it is in the pow send list.
434          */
435         if ((pow_send_group != -1) &&
436             (always_use_pow || strstr(pow_send_list, dev->name)))
437                 priv->queue = -1;
438
439         if (priv->queue != -1)
440                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
441
442         /* We do our own locking, Linux doesn't need to */
443         dev->features |= NETIF_F_LLTX;
444         dev->ethtool_ops = &cvm_oct_ethtool_ops;
445
446         cvm_oct_set_mac_filter(dev);
447         dev_set_mtu(dev, dev->mtu);
448
449         /*
450          * Zero out stats for port so we won't mistakenly show
451          * counters from the bootloader.
452          */
453         memset(dev->netdev_ops->ndo_get_stats(dev), 0,
454                sizeof(struct net_device_stats));
455
456         if (dev->netdev_ops->ndo_stop)
457                 dev->netdev_ops->ndo_stop(dev);
458
459         return 0;
460 }
461
462 void cvm_oct_common_uninit(struct net_device *dev)
463 {
464         if (dev->phydev)
465                 phy_disconnect(dev->phydev);
466 }
467
468 int cvm_oct_common_open(struct net_device *dev,
469                         void (*link_poll)(struct net_device *))
470 {
471         union cvmx_gmxx_prtx_cfg gmx_cfg;
472         struct octeon_ethernet *priv = netdev_priv(dev);
473         int interface = INTERFACE(priv->port);
474         int index = INDEX(priv->port);
475         cvmx_helper_link_info_t link_info;
476         int rv;
477
478         rv = cvm_oct_phy_setup_device(dev);
479         if (rv)
480                 return rv;
481
482         gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
483         gmx_cfg.s.en = 1;
484         if (octeon_has_feature(OCTEON_FEATURE_PKND))
485                 gmx_cfg.s.pknd = priv->port;
486         cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
487
488         if (octeon_is_simulation())
489                 return 0;
490
491         if (dev->phydev) {
492                 int r = phy_read_status(dev->phydev);
493
494                 if (r == 0 && dev->phydev->link == 0)
495                         netif_carrier_off(dev);
496                 cvm_oct_adjust_link(dev);
497         } else {
498                 link_info = cvmx_helper_link_get(priv->port);
499                 if (!link_info.s.link_up)
500                         netif_carrier_off(dev);
501                 priv->poll = link_poll;
502                 link_poll(dev);
503         }
504
505         return 0;
506 }
507
508 void cvm_oct_link_poll(struct net_device *dev)
509 {
510         struct octeon_ethernet *priv = netdev_priv(dev);
511         cvmx_helper_link_info_t link_info;
512
513         link_info = cvmx_helper_link_get(priv->port);
514         if (link_info.u64 == priv->link_info)
515                 return;
516
517         if (cvmx_helper_link_set(priv->port, link_info))
518                 link_info.u64 = priv->link_info;
519         else
520                 priv->link_info = link_info.u64;
521
522         if (link_info.s.link_up) {
523                 if (!netif_carrier_ok(dev))
524                         netif_carrier_on(dev);
525         } else if (netif_carrier_ok(dev)) {
526                 netif_carrier_off(dev);
527         }
528         cvm_oct_note_carrier(priv, link_info);
529 }
530
531 static int cvm_oct_xaui_open(struct net_device *dev)
532 {
533         return cvm_oct_common_open(dev, cvm_oct_link_poll);
534 }
535
536 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
537         .ndo_init               = cvm_oct_common_init,
538         .ndo_uninit             = cvm_oct_common_uninit,
539         .ndo_start_xmit         = cvm_oct_xmit,
540         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
541         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
542         .ndo_do_ioctl           = cvm_oct_ioctl,
543         .ndo_change_mtu         = cvm_oct_common_change_mtu,
544         .ndo_get_stats          = cvm_oct_common_get_stats,
545 #ifdef CONFIG_NET_POLL_CONTROLLER
546         .ndo_poll_controller    = cvm_oct_poll_controller,
547 #endif
548 };
549
550 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
551         .ndo_init               = cvm_oct_common_init,
552         .ndo_uninit             = cvm_oct_common_uninit,
553         .ndo_open               = cvm_oct_xaui_open,
554         .ndo_stop               = cvm_oct_common_stop,
555         .ndo_start_xmit         = cvm_oct_xmit,
556         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
557         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
558         .ndo_do_ioctl           = cvm_oct_ioctl,
559         .ndo_change_mtu         = cvm_oct_common_change_mtu,
560         .ndo_get_stats          = cvm_oct_common_get_stats,
561 #ifdef CONFIG_NET_POLL_CONTROLLER
562         .ndo_poll_controller    = cvm_oct_poll_controller,
563 #endif
564 };
565
566 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
567         .ndo_init               = cvm_oct_sgmii_init,
568         .ndo_uninit             = cvm_oct_common_uninit,
569         .ndo_open               = cvm_oct_sgmii_open,
570         .ndo_stop               = cvm_oct_common_stop,
571         .ndo_start_xmit         = cvm_oct_xmit,
572         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
573         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
574         .ndo_do_ioctl           = cvm_oct_ioctl,
575         .ndo_change_mtu         = cvm_oct_common_change_mtu,
576         .ndo_get_stats          = cvm_oct_common_get_stats,
577 #ifdef CONFIG_NET_POLL_CONTROLLER
578         .ndo_poll_controller    = cvm_oct_poll_controller,
579 #endif
580 };
581
582 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
583         .ndo_init               = cvm_oct_spi_init,
584         .ndo_uninit             = cvm_oct_spi_uninit,
585         .ndo_start_xmit         = cvm_oct_xmit,
586         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
587         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
588         .ndo_do_ioctl           = cvm_oct_ioctl,
589         .ndo_change_mtu         = cvm_oct_common_change_mtu,
590         .ndo_get_stats          = cvm_oct_common_get_stats,
591 #ifdef CONFIG_NET_POLL_CONTROLLER
592         .ndo_poll_controller    = cvm_oct_poll_controller,
593 #endif
594 };
595
596 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
597         .ndo_init               = cvm_oct_common_init,
598         .ndo_uninit             = cvm_oct_common_uninit,
599         .ndo_open               = cvm_oct_rgmii_open,
600         .ndo_stop               = cvm_oct_common_stop,
601         .ndo_start_xmit         = cvm_oct_xmit,
602         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
603         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
604         .ndo_do_ioctl           = cvm_oct_ioctl,
605         .ndo_change_mtu         = cvm_oct_common_change_mtu,
606         .ndo_get_stats          = cvm_oct_common_get_stats,
607 #ifdef CONFIG_NET_POLL_CONTROLLER
608         .ndo_poll_controller    = cvm_oct_poll_controller,
609 #endif
610 };
611
612 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
613         .ndo_init               = cvm_oct_common_init,
614         .ndo_start_xmit         = cvm_oct_xmit_pow,
615         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
616         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
617         .ndo_do_ioctl           = cvm_oct_ioctl,
618         .ndo_change_mtu         = cvm_oct_common_change_mtu,
619         .ndo_get_stats          = cvm_oct_common_get_stats,
620 #ifdef CONFIG_NET_POLL_CONTROLLER
621         .ndo_poll_controller    = cvm_oct_poll_controller,
622 #endif
623 };
624
625 static struct device_node *cvm_oct_of_get_child(
626                                 const struct device_node *parent, int reg_val)
627 {
628         struct device_node *node = NULL;
629         int size;
630         const __be32 *addr;
631
632         for (;;) {
633                 node = of_get_next_child(parent, node);
634                 if (!node)
635                         break;
636                 addr = of_get_property(node, "reg", &size);
637                 if (addr && (be32_to_cpu(*addr) == reg_val))
638                         break;
639         }
640         return node;
641 }
642
643 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
644                                                  int interface, int port)
645 {
646         struct device_node *ni, *np;
647
648         ni = cvm_oct_of_get_child(pip, interface);
649         if (!ni)
650                 return NULL;
651
652         np = cvm_oct_of_get_child(ni, port);
653         of_node_put(ni);
654
655         return np;
656 }
657
658 static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
659 {
660         u32 delay_value;
661
662         if (!of_property_read_u32(np, "rx-delay", &delay_value))
663                 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
664         if (!of_property_read_u32(np, "tx-delay", &delay_value))
665                 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
666 }
667
668 static int cvm_oct_probe(struct platform_device *pdev)
669 {
670         int num_interfaces;
671         int interface;
672         int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
673         int qos;
674         struct device_node *pip;
675         int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
676
677 #if IS_ENABLED(CONFIG_VLAN_8021Q)
678         mtu_overhead += VLAN_HLEN;
679 #endif
680
681         octeon_mdiobus_force_mod_depencency();
682
683         pip = pdev->dev.of_node;
684         if (!pip) {
685                 pr_err("Error: No 'pip' in /aliases\n");
686                 return -EINVAL;
687         }
688
689         cvm_oct_configure_common_hw();
690
691         cvmx_helper_initialize_packet_io_global();
692
693         if (receive_group_order) {
694                 if (receive_group_order > 4)
695                         receive_group_order = 4;
696                 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
697         } else {
698                 pow_receive_groups = BIT(pow_receive_group);
699         }
700
701         /* Change the input group for all ports before input is enabled */
702         num_interfaces = cvmx_helper_get_number_of_interfaces();
703         for (interface = 0; interface < num_interfaces; interface++) {
704                 int num_ports = cvmx_helper_ports_on_interface(interface);
705                 int port;
706
707                 for (port = cvmx_helper_get_ipd_port(interface, 0);
708                      port < cvmx_helper_get_ipd_port(interface, num_ports);
709                      port++) {
710                         union cvmx_pip_prt_tagx pip_prt_tagx;
711
712                         pip_prt_tagx.u64 =
713                             cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
714
715                         if (receive_group_order) {
716                                 int tag_mask;
717
718                                 /* We support only 16 groups at the moment, so
719                                  * always disable the two additional "hidden"
720                                  * tag_mask bits on CN68XX.
721                                  */
722                                 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
723                                         pip_prt_tagx.u64 |= 0x3ull << 44;
724
725                                 tag_mask = ~((1 << receive_group_order) - 1);
726                                 pip_prt_tagx.s.grptagbase       = 0;
727                                 pip_prt_tagx.s.grptagmask       = tag_mask;
728                                 pip_prt_tagx.s.grptag           = 1;
729                                 pip_prt_tagx.s.tag_mode         = 0;
730                                 pip_prt_tagx.s.inc_prt_flag     = 1;
731                                 pip_prt_tagx.s.ip6_dprt_flag    = 1;
732                                 pip_prt_tagx.s.ip4_dprt_flag    = 1;
733                                 pip_prt_tagx.s.ip6_sprt_flag    = 1;
734                                 pip_prt_tagx.s.ip4_sprt_flag    = 1;
735                                 pip_prt_tagx.s.ip6_dst_flag     = 1;
736                                 pip_prt_tagx.s.ip4_dst_flag     = 1;
737                                 pip_prt_tagx.s.ip6_src_flag     = 1;
738                                 pip_prt_tagx.s.ip4_src_flag     = 1;
739                                 pip_prt_tagx.s.grp              = 0;
740                         } else {
741                                 pip_prt_tagx.s.grptag   = 0;
742                                 pip_prt_tagx.s.grp      = pow_receive_group;
743                         }
744
745                         cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
746                                        pip_prt_tagx.u64);
747                 }
748         }
749
750         cvmx_helper_ipd_and_packet_input_enable();
751
752         memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
753
754         /*
755          * Initialize the FAU used for counting packet buffers that
756          * need to be freed.
757          */
758         cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
759
760         /* Initialize the FAU used for counting tx SKBs that need to be freed */
761         cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
762
763         if ((pow_send_group != -1)) {
764                 struct net_device *dev;
765
766                 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
767                 if (dev) {
768                         /* Initialize the device private structure. */
769                         struct octeon_ethernet *priv = netdev_priv(dev);
770
771                         SET_NETDEV_DEV(dev, &pdev->dev);
772                         dev->netdev_ops = &cvm_oct_pow_netdev_ops;
773                         priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
774                         priv->port = CVMX_PIP_NUM_INPUT_PORTS;
775                         priv->queue = -1;
776                         strcpy(dev->name, "pow%d");
777                         for (qos = 0; qos < 16; qos++)
778                                 skb_queue_head_init(&priv->tx_free_list[qos]);
779                         dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
780                         dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
781
782                         if (register_netdev(dev) < 0) {
783                                 pr_err("Failed to register ethernet device for POW\n");
784                                 free_netdev(dev);
785                         } else {
786                                 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
787                                 pr_info("%s: POW send group %d, receive group %d\n",
788                                         dev->name, pow_send_group,
789                                         pow_receive_group);
790                         }
791                 } else {
792                         pr_err("Failed to allocate ethernet device for POW\n");
793                 }
794         }
795
796         num_interfaces = cvmx_helper_get_number_of_interfaces();
797         for (interface = 0; interface < num_interfaces; interface++) {
798                 cvmx_helper_interface_mode_t imode =
799                     cvmx_helper_interface_get_mode(interface);
800                 int num_ports = cvmx_helper_ports_on_interface(interface);
801                 int port;
802                 int port_index;
803
804                 for (port_index = 0,
805                      port = cvmx_helper_get_ipd_port(interface, 0);
806                      port < cvmx_helper_get_ipd_port(interface, num_ports);
807                      port_index++, port++) {
808                         struct octeon_ethernet *priv;
809                         struct net_device *dev =
810                             alloc_etherdev(sizeof(struct octeon_ethernet));
811                         if (!dev) {
812                                 pr_err("Failed to allocate ethernet device for port %d\n",
813                                        port);
814                                 continue;
815                         }
816
817                         /* Initialize the device private structure. */
818                         SET_NETDEV_DEV(dev, &pdev->dev);
819                         priv = netdev_priv(dev);
820                         priv->netdev = dev;
821                         priv->of_node = cvm_oct_node_for_port(pip, interface,
822                                                                 port_index);
823
824                         INIT_DELAYED_WORK(&priv->port_periodic_work,
825                                           cvm_oct_periodic_worker);
826                         priv->imode = imode;
827                         priv->port = port;
828                         priv->queue = cvmx_pko_get_base_queue(priv->port);
829                         priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
830                         for (qos = 0; qos < 16; qos++)
831                                 skb_queue_head_init(&priv->tx_free_list[qos]);
832                         for (qos = 0; qos < cvmx_pko_get_num_queues(port);
833                              qos++)
834                                 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
835                         dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
836                         dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
837
838                         switch (priv->imode) {
839                         /* These types don't support ports to IPD/PKO */
840                         case CVMX_HELPER_INTERFACE_MODE_DISABLED:
841                         case CVMX_HELPER_INTERFACE_MODE_PCIE:
842                         case CVMX_HELPER_INTERFACE_MODE_PICMG:
843                                 break;
844
845                         case CVMX_HELPER_INTERFACE_MODE_NPI:
846                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
847                                 strcpy(dev->name, "npi%d");
848                                 break;
849
850                         case CVMX_HELPER_INTERFACE_MODE_XAUI:
851                                 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
852                                 strcpy(dev->name, "xaui%d");
853                                 break;
854
855                         case CVMX_HELPER_INTERFACE_MODE_LOOP:
856                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
857                                 strcpy(dev->name, "loop%d");
858                                 break;
859
860                         case CVMX_HELPER_INTERFACE_MODE_SGMII:
861                                 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
862                                 strcpy(dev->name, "eth%d");
863                                 break;
864
865                         case CVMX_HELPER_INTERFACE_MODE_SPI:
866                                 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
867                                 strcpy(dev->name, "spi%d");
868                                 break;
869
870                         case CVMX_HELPER_INTERFACE_MODE_RGMII:
871                         case CVMX_HELPER_INTERFACE_MODE_GMII:
872                                 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
873                                 strcpy(dev->name, "eth%d");
874                                 cvm_set_rgmii_delay(priv->of_node, interface,
875                                                     port_index);
876                                 break;
877                         }
878
879                         if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
880                                 if (of_phy_register_fixed_link(priv->of_node)) {
881                                         netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
882                                                    interface, priv->port);
883                                         dev->netdev_ops = NULL;
884                                 }
885                         }
886
887                         if (!dev->netdev_ops) {
888                                 free_netdev(dev);
889                         } else if (register_netdev(dev) < 0) {
890                                 pr_err("Failed to register ethernet device for interface %d, port %d\n",
891                                        interface, priv->port);
892                                 free_netdev(dev);
893                         } else {
894                                 cvm_oct_device[priv->port] = dev;
895                                 fau -=
896                                     cvmx_pko_get_num_queues(priv->port) *
897                                     sizeof(u32);
898                                 schedule_delayed_work(&priv->port_periodic_work,
899                                                       HZ);
900                         }
901                 }
902         }
903
904         cvm_oct_tx_initialize();
905         cvm_oct_rx_initialize();
906
907         /*
908          * 150 uS: about 10 1500-byte packets at 1GE.
909          */
910         cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
911
912         schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
913
914         return 0;
915 }
916
917 static int cvm_oct_remove(struct platform_device *pdev)
918 {
919         int port;
920
921         cvmx_ipd_disable();
922
923         atomic_inc_return(&cvm_oct_poll_queue_stopping);
924         cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
925
926         cvm_oct_rx_shutdown();
927         cvm_oct_tx_shutdown();
928
929         cvmx_pko_disable();
930
931         /* Free the ethernet devices */
932         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
933                 if (cvm_oct_device[port]) {
934                         struct net_device *dev = cvm_oct_device[port];
935                         struct octeon_ethernet *priv = netdev_priv(dev);
936
937                         cancel_delayed_work_sync(&priv->port_periodic_work);
938
939                         cvm_oct_tx_shutdown_dev(dev);
940                         unregister_netdev(dev);
941                         free_netdev(dev);
942                         cvm_oct_device[port] = NULL;
943                 }
944         }
945
946         cvmx_pko_shutdown();
947
948         cvmx_ipd_free_ptr();
949
950         /* Free the HW pools */
951         cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
952                               num_packet_buffers);
953         cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
954                               num_packet_buffers);
955         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
956                 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
957                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
958         return 0;
959 }
960
961 static const struct of_device_id cvm_oct_match[] = {
962         {
963                 .compatible = "cavium,octeon-3860-pip",
964         },
965         {},
966 };
967 MODULE_DEVICE_TABLE(of, cvm_oct_match);
968
969 static struct platform_driver cvm_oct_driver = {
970         .probe          = cvm_oct_probe,
971         .remove         = cvm_oct_remove,
972         .driver         = {
973                 .name   = KBUILD_MODNAME,
974                 .of_match_table = cvm_oct_match,
975         },
976 };
977
978 module_platform_driver(cvm_oct_driver);
979
980 MODULE_LICENSE("GPL");
981 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
982 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");