2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/lockdep.h>
36 #include <linux/pci.h>
37 #include <linux/skbuff.h>
38 #include <linux/vmalloc.h>
39 #include <net/devlink.h>
40 #include <net/dst_metadata.h>
43 #include "../nfpcore/nfp_cpp.h"
44 #include "../nfpcore/nfp_nffw.h"
45 #include "../nfpcore/nfp_nsp.h"
46 #include "../nfp_app.h"
47 #include "../nfp_main.h"
48 #include "../nfp_net.h"
49 #include "../nfp_net_repr.h"
50 #include "../nfp_port.h"
53 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
55 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
60 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
62 return DEVLINK_ESWITCH_MODE_SWITCHDEV;
65 static enum nfp_repr_type
66 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
68 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
69 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
70 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
72 return NFP_REPR_TYPE_PHYS_PORT;
74 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
75 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
76 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
77 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
78 return NFP_REPR_TYPE_PF;
80 return NFP_REPR_TYPE_VF;
83 return __NFP_REPR_TYPE_MAX;
86 static struct net_device *
87 nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
89 enum nfp_repr_type repr_type;
90 struct nfp_reprs *reprs;
93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
94 if (repr_type > NFP_REPR_TYPE_MAX)
97 reprs = rcu_dereference(app->reprs[repr_type]);
101 if (port >= reprs->num_reprs)
104 return rcu_dereference(reprs->reprs[port]);
108 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
111 struct nfp_reprs *reprs;
112 int i, err, count = 0;
114 reprs = rcu_dereference_protected(app->reprs[type],
115 lockdep_is_held(&app->pf->lock));
119 for (i = 0; i < reprs->num_reprs; i++) {
120 struct net_device *netdev;
122 netdev = nfp_repr_get_locked(app, reprs, i);
124 struct nfp_repr *repr = netdev_priv(netdev);
126 err = nfp_flower_cmsg_portreify(repr, exists);
137 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
139 struct nfp_flower_priv *priv = app->priv;
145 lockdep_assert_held(&app->pf->lock);
146 err = wait_event_interruptible_timeout(priv->reify_wait_queue,
147 atomic_read(replies) >= tot_repl,
148 msecs_to_jiffies(10));
150 nfp_warn(app->cpp, "Not all reprs responded to reify\n");
158 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
162 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
166 netif_tx_wake_all_queues(repr->netdev);
172 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
174 netif_tx_disable(repr->netdev);
176 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
180 nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
182 return tc_setup_cb_egdev_register(netdev,
183 nfp_flower_setup_tc_egress_cb,
184 netdev_priv(netdev));
188 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
190 struct nfp_repr *repr = netdev_priv(netdev);
192 kfree(repr->app_priv);
194 tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
195 netdev_priv(netdev));
199 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
201 struct nfp_repr *repr = netdev_priv(netdev);
202 struct nfp_flower_priv *priv = app->priv;
203 atomic_t *replies = &priv->reify_replies;
206 atomic_set(replies, 0);
207 err = nfp_flower_cmsg_portreify(repr, false);
209 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
213 nfp_flower_wait_repr_reify(app, replies, 1);
216 static void nfp_flower_sriov_disable(struct nfp_app *app)
218 struct nfp_flower_priv *priv = app->priv;
223 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
227 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
228 enum nfp_flower_cmsg_port_vnic_type vnic_type,
229 enum nfp_repr_type repr_type, unsigned int cnt)
231 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
232 struct nfp_flower_priv *priv = app->priv;
233 atomic_t *replies = &priv->reify_replies;
234 struct nfp_flower_repr_priv *repr_priv;
235 enum nfp_port_type port_type;
236 struct nfp_repr *nfp_repr;
237 struct nfp_reprs *reprs;
238 int i, err, reify_cnt;
241 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
244 reprs = nfp_reprs_alloc(cnt);
248 for (i = 0; i < cnt; i++) {
249 struct net_device *repr;
250 struct nfp_port *port;
253 repr = nfp_repr_alloc(app);
256 goto err_reprs_clean;
259 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
263 goto err_reprs_clean;
266 nfp_repr = netdev_priv(repr);
267 nfp_repr->app_priv = repr_priv;
269 /* For now we only support 1 PF */
270 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
272 port = nfp_port_alloc(app, port_type, repr);
277 goto err_reprs_clean;
279 if (repr_type == NFP_REPR_TYPE_PF) {
281 port->vnic = priv->nn->dp.ctrl_bar;
286 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
289 eth_hw_addr_random(repr);
291 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
293 err = nfp_repr_init(app, repr,
294 port_id, port, priv->nn->dp.netdev);
299 goto err_reprs_clean;
302 RCU_INIT_POINTER(reprs->reprs[i], repr);
303 nfp_info(app->cpp, "%s%d Representor(%s) created\n",
304 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
308 nfp_app_reprs_set(app, repr_type, reprs);
310 atomic_set(replies, 0);
311 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
314 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
315 goto err_reprs_remove;
318 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
320 goto err_reprs_remove;
324 reprs = nfp_app_reprs_set(app, repr_type, NULL);
326 nfp_reprs_clean_and_free(app, reprs);
330 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
332 struct nfp_flower_priv *priv = app->priv;
337 return nfp_flower_spawn_vnic_reprs(app,
338 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
339 NFP_REPR_TYPE_VF, num_vfs);
343 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
345 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
346 atomic_t *replies = &priv->reify_replies;
347 struct nfp_flower_repr_priv *repr_priv;
348 struct nfp_repr *nfp_repr;
349 struct sk_buff *ctrl_skb;
350 struct nfp_reprs *reprs;
354 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
358 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
361 goto err_free_ctrl_skb;
364 for (i = 0; i < eth_tbl->count; i++) {
365 unsigned int phys_port = eth_tbl->ports[i].index;
366 struct net_device *repr;
367 struct nfp_port *port;
370 repr = nfp_repr_alloc(app);
373 goto err_reprs_clean;
376 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
380 goto err_reprs_clean;
383 nfp_repr = netdev_priv(repr);
384 nfp_repr->app_priv = repr_priv;
386 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
391 goto err_reprs_clean;
393 err = nfp_port_init_phy_port(app->pf, app, port, i);
398 goto err_reprs_clean;
401 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
402 nfp_net_get_mac_addr(app->pf, repr, port);
404 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
405 err = nfp_repr_init(app, repr,
406 cmsg_port_id, port, priv->nn->dp.netdev);
411 goto err_reprs_clean;
414 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
415 eth_tbl->ports[i].nbi,
416 eth_tbl->ports[i].base,
419 RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
420 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
421 phys_port, repr->name);
424 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
426 /* The REIFY/MAC_REPR control messages should be sent after the MAC
427 * representors are registered using nfp_app_reprs_set(). This is
428 * because the firmware may respond with control messages for the
429 * MAC representors, f.e. to provide the driver with information
430 * about their state, and without registration the driver will drop
433 atomic_set(replies, 0);
434 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
437 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
438 goto err_reprs_remove;
441 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
443 goto err_reprs_remove;
445 nfp_ctrl_tx(app->ctrl, ctrl_skb);
449 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
451 nfp_reprs_clean_and_free(app, reprs);
457 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
461 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
462 goto err_invalid_port;
465 eth_hw_addr_random(nn->dp.netdev);
466 netif_keep_dst(nn->dp.netdev);
467 nn->vnic_no_name = true;
472 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
473 return PTR_ERR_OR_ZERO(nn->port);
476 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
478 struct nfp_flower_priv *priv = app->priv;
480 if (app->pf->num_vfs)
481 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
482 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
483 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
488 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
490 struct nfp_flower_priv *priv = app->priv;
495 err = nfp_flower_spawn_phy_reprs(app, app->priv);
499 err = nfp_flower_spawn_vnic_reprs(app,
500 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
501 NFP_REPR_TYPE_PF, 1);
503 goto err_destroy_reprs_phy;
505 if (app->pf->num_vfs) {
506 err = nfp_flower_spawn_vnic_reprs(app,
507 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
511 goto err_destroy_reprs_pf;
516 err_destroy_reprs_pf:
517 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
518 err_destroy_reprs_phy:
519 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
525 static int nfp_flower_init(struct nfp_app *app)
527 const struct nfp_pf *pf = app->pf;
528 struct nfp_flower_priv *app_priv;
529 u64 version, features;
533 nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
537 if (!pf->mac_stats_bar) {
538 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
542 if (!pf->vf_cfg_bar) {
543 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
547 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
549 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
553 /* We need to ensure hardware has enough flower capabilities. */
554 if (version != NFP_FLOWER_ALLOWED_VER) {
555 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
559 app_priv = vzalloc(sizeof(struct nfp_flower_priv));
563 app->priv = app_priv;
565 skb_queue_head_init(&app_priv->cmsg_skbs_high);
566 skb_queue_head_init(&app_priv->cmsg_skbs_low);
567 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
568 init_waitqueue_head(&app_priv->reify_wait_queue);
570 init_waitqueue_head(&app_priv->mtu_conf.wait_q);
571 spin_lock_init(&app_priv->mtu_conf.lock);
573 err = nfp_flower_metadata_init(app);
575 goto err_free_app_priv;
577 /* Extract the extra features supported by the firmware. */
578 features = nfp_rtsym_read_le(app->pf->rtbl,
579 "_abi_flower_extra_features", &err);
581 app_priv->flower_ext_feats = 0;
583 app_priv->flower_ext_feats = features;
585 /* Tell the firmware that the driver supports lag. */
586 err = nfp_rtsym_write_le(app->pf->rtbl,
587 "_abi_flower_balance_sync_enable", 1);
589 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
590 nfp_flower_lag_init(&app_priv->nfp_lag);
591 } else if (err == -ENOENT) {
592 nfp_warn(app->cpp, "LAG not supported by FW.\n");
594 goto err_cleanup_metadata;
599 err_cleanup_metadata:
600 nfp_flower_metadata_cleanup(app);
606 static void nfp_flower_clean(struct nfp_app *app)
608 struct nfp_flower_priv *app_priv = app->priv;
610 skb_queue_purge(&app_priv->cmsg_skbs_high);
611 skb_queue_purge(&app_priv->cmsg_skbs_low);
612 flush_work(&app_priv->cmsg_work);
614 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
615 nfp_flower_lag_cleanup(&app_priv->nfp_lag);
617 nfp_flower_metadata_cleanup(app);
622 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
626 spin_lock_bh(&app_priv->mtu_conf.lock);
627 ret = app_priv->mtu_conf.ack;
628 spin_unlock_bh(&app_priv->mtu_conf.lock);
634 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
637 struct nfp_flower_priv *app_priv = app->priv;
638 struct nfp_repr *repr = netdev_priv(netdev);
641 /* Only need to config FW for physical port MTU change. */
642 if (repr->port->type != NFP_PORT_PHYS_PORT)
645 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
646 nfp_err(app->cpp, "Physical port MTU setting not supported\n");
650 spin_lock_bh(&app_priv->mtu_conf.lock);
651 app_priv->mtu_conf.ack = false;
652 app_priv->mtu_conf.requested_val = new_mtu;
653 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
654 spin_unlock_bh(&app_priv->mtu_conf.lock);
656 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
659 spin_lock_bh(&app_priv->mtu_conf.lock);
660 app_priv->mtu_conf.requested_val = 0;
661 spin_unlock_bh(&app_priv->mtu_conf.lock);
665 /* Wait for fw to ack the change. */
666 ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
667 nfp_flower_check_ack(app_priv),
668 msecs_to_jiffies(10));
671 spin_lock_bh(&app_priv->mtu_conf.lock);
672 app_priv->mtu_conf.requested_val = 0;
673 spin_unlock_bh(&app_priv->mtu_conf.lock);
674 nfp_warn(app->cpp, "MTU change not verified with fw\n");
681 static int nfp_flower_start(struct nfp_app *app)
683 struct nfp_flower_priv *app_priv = app->priv;
686 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
687 err = nfp_flower_lag_reset(&app_priv->nfp_lag);
691 err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
696 return nfp_tunnel_config_start(app);
699 static void nfp_flower_stop(struct nfp_app *app)
701 struct nfp_flower_priv *app_priv = app->priv;
703 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
704 unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
706 nfp_tunnel_config_stop(app);
709 const struct nfp_app_type app_flower = {
710 .id = NFP_APP_FLOWER_NIC,
713 .ctrl_cap_mask = ~0U,
714 .ctrl_has_meta = true,
716 .extra_cap = nfp_flower_extra_cap,
718 .init = nfp_flower_init,
719 .clean = nfp_flower_clean,
721 .repr_change_mtu = nfp_flower_repr_change_mtu,
723 .vnic_alloc = nfp_flower_vnic_alloc,
724 .vnic_init = nfp_flower_vnic_init,
725 .vnic_clean = nfp_flower_vnic_clean,
727 .repr_init = nfp_flower_repr_netdev_init,
728 .repr_preclean = nfp_flower_repr_netdev_preclean,
729 .repr_clean = nfp_flower_repr_netdev_clean,
731 .repr_open = nfp_flower_repr_netdev_open,
732 .repr_stop = nfp_flower_repr_netdev_stop,
734 .start = nfp_flower_start,
735 .stop = nfp_flower_stop,
737 .ctrl_msg_rx = nfp_flower_cmsg_rx,
739 .sriov_enable = nfp_flower_sriov_enable,
740 .sriov_disable = nfp_flower_sriov_disable,
742 .eswitch_mode_get = eswitch_mode_get,
743 .repr_get = nfp_flower_repr_get,
745 .setup_tc = nfp_flower_setup_tc,