2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/etherdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/hash.h>
42 #include <net/busy_poll.h>
43 #include <net/vxlan.h>
44 #include <net/devlink.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/cmd.h>
49 #include <linux/mlx4/cq.h>
54 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
57 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
59 struct mlx4_en_priv *priv = netdev_priv(dev);
61 unsigned int offset = 0;
63 if (up && up != MLX4_EN_NUM_UP_HIGH)
66 netdev_set_num_tc(dev, up);
67 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
68 /* Partition Tx queues evenly amongst UP's */
69 for (i = 0; i < up; i++) {
70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
74 #ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
81 priv->cee_config.pfc_state = false;
84 #endif /* CONFIG_MLX4_EN_DCB */
89 int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
91 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev;
93 struct mlx4_en_port_profile new_prof;
94 struct mlx4_en_priv *tmp;
98 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
102 mutex_lock(&mdev->state_lock);
103 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
104 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
106 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
108 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
114 mlx4_en_stop_port(dev, 1);
117 mlx4_en_safe_replace_resources(priv, tmp);
119 err = mlx4_en_start_port(dev);
121 en_err(priv, "Failed starting port for setup TC\n");
126 err = mlx4_en_setup_tc(dev, tc);
128 mutex_unlock(&mdev->state_lock);
133 static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
136 struct tc_mqprio_qopt *mqprio = type_data;
138 if (type != TC_SETUP_MQPRIO)
141 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
144 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
146 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
149 #ifdef CONFIG_RFS_ACCEL
151 struct mlx4_en_filter {
152 struct list_head next;
153 struct work_struct work;
162 struct mlx4_en_priv *priv;
163 u32 flow_id; /* RFS infrastructure id */
164 int id; /* mlx4_en driver id */
165 u64 reg_id; /* Flow steering API id */
166 u8 activated; /* Used to prevent expiry before filter
169 struct hlist_node filter_chain;
172 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
174 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
178 return MLX4_NET_TRANS_RULE_ID_UDP;
180 return MLX4_NET_TRANS_RULE_ID_TCP;
182 return MLX4_NET_TRANS_RULE_NUM;
186 /* Must not acquire state_lock, as its corresponding work_sync
189 static void mlx4_en_filter_work(struct work_struct *work)
191 struct mlx4_en_filter *filter = container_of(work,
192 struct mlx4_en_filter,
194 struct mlx4_en_priv *priv = filter->priv;
195 struct mlx4_spec_list spec_tcp_udp = {
196 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
199 .dst_port = filter->dst_port,
200 .dst_port_msk = (__force __be16)-1,
201 .src_port = filter->src_port,
202 .src_port_msk = (__force __be16)-1,
206 struct mlx4_spec_list spec_ip = {
207 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
210 .dst_ip = filter->dst_ip,
211 .dst_ip_msk = (__force __be32)-1,
212 .src_ip = filter->src_ip,
213 .src_ip_msk = (__force __be32)-1,
217 struct mlx4_spec_list spec_eth = {
218 .id = MLX4_NET_TRANS_RULE_ID_ETH,
220 struct mlx4_net_trans_rule rule = {
221 .list = LIST_HEAD_INIT(rule.list),
222 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
225 .promisc_mode = MLX4_FS_REGULAR,
227 .priority = MLX4_DOMAIN_RFS,
230 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
232 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
233 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
237 list_add_tail(&spec_eth.list, &rule.list);
238 list_add_tail(&spec_ip.list, &rule.list);
239 list_add_tail(&spec_tcp_udp.list, &rule.list);
241 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
242 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
243 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
245 filter->activated = 0;
247 if (filter->reg_id) {
248 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
249 if (rc && rc != -ENOENT)
250 en_err(priv, "Error detaching flow. rc = %d\n", rc);
253 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
255 en_err(priv, "Error attaching flow. err = %d\n", rc);
258 mlx4_en_filter_rfs_expire(priv);
260 filter->activated = 1;
263 static inline struct hlist_head *
264 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
265 __be16 src_port, __be16 dst_port)
270 l = (__force unsigned long)src_port |
271 ((__force unsigned long)dst_port << 2);
272 l ^= (__force unsigned long)(src_ip ^ dst_ip);
274 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
276 return &priv->filter_hash[bucket_idx];
279 static struct mlx4_en_filter *
280 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
281 __be32 dst_ip, u8 ip_proto, __be16 src_port,
282 __be16 dst_port, u32 flow_id)
284 struct mlx4_en_filter *filter = NULL;
286 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
291 filter->rxq_index = rxq_index;
292 INIT_WORK(&filter->work, mlx4_en_filter_work);
294 filter->src_ip = src_ip;
295 filter->dst_ip = dst_ip;
296 filter->ip_proto = ip_proto;
297 filter->src_port = src_port;
298 filter->dst_port = dst_port;
300 filter->flow_id = flow_id;
302 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
304 list_add_tail(&filter->next, &priv->filters);
305 hlist_add_head(&filter->filter_chain,
306 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
312 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
314 struct mlx4_en_priv *priv = filter->priv;
317 list_del(&filter->next);
319 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
320 if (rc && rc != -ENOENT)
321 en_err(priv, "Error detaching flow. rc = %d\n", rc);
326 static inline struct mlx4_en_filter *
327 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
328 u8 ip_proto, __be16 src_port, __be16 dst_port)
330 struct mlx4_en_filter *filter;
331 struct mlx4_en_filter *ret = NULL;
333 hlist_for_each_entry(filter,
334 filter_hash_bucket(priv, src_ip, dst_ip,
337 if (filter->src_ip == src_ip &&
338 filter->dst_ip == dst_ip &&
339 filter->ip_proto == ip_proto &&
340 filter->src_port == src_port &&
341 filter->dst_port == dst_port) {
351 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
352 u16 rxq_index, u32 flow_id)
354 struct mlx4_en_priv *priv = netdev_priv(net_dev);
355 struct mlx4_en_filter *filter;
356 const struct iphdr *ip;
363 int nhoff = skb_network_offset(skb);
366 if (skb->encapsulation)
367 return -EPROTONOSUPPORT;
369 if (skb->protocol != htons(ETH_P_IP))
370 return -EPROTONOSUPPORT;
372 ip = (const struct iphdr *)(skb->data + nhoff);
373 if (ip_is_fragment(ip))
374 return -EPROTONOSUPPORT;
376 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
377 return -EPROTONOSUPPORT;
378 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
380 ip_proto = ip->protocol;
386 spin_lock_bh(&priv->filters_lock);
387 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
390 if (filter->rxq_index == rxq_index)
393 filter->rxq_index = rxq_index;
395 filter = mlx4_en_filter_alloc(priv, rxq_index,
396 src_ip, dst_ip, ip_proto,
397 src_port, dst_port, flow_id);
404 queue_work(priv->mdev->workqueue, &filter->work);
409 spin_unlock_bh(&priv->filters_lock);
414 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
416 struct mlx4_en_filter *filter, *tmp;
419 spin_lock_bh(&priv->filters_lock);
420 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
421 list_move(&filter->next, &del_list);
422 hlist_del(&filter->filter_chain);
424 spin_unlock_bh(&priv->filters_lock);
426 list_for_each_entry_safe(filter, tmp, &del_list, next) {
427 cancel_work_sync(&filter->work);
428 mlx4_en_filter_free(filter);
432 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
434 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
438 spin_lock_bh(&priv->filters_lock);
439 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
440 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
443 if (filter->activated &&
444 !work_pending(&filter->work) &&
445 rps_may_expire_flow(priv->dev,
446 filter->rxq_index, filter->flow_id,
448 list_move(&filter->next, &del_list);
449 hlist_del(&filter->filter_chain);
451 last_filter = filter;
456 if (last_filter && (&last_filter->next != priv->filters.next))
457 list_move(&priv->filters, &last_filter->next);
459 spin_unlock_bh(&priv->filters_lock);
461 list_for_each_entry_safe(filter, tmp, &del_list, next)
462 mlx4_en_filter_free(filter);
466 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
467 __be16 proto, u16 vid)
469 struct mlx4_en_priv *priv = netdev_priv(dev);
470 struct mlx4_en_dev *mdev = priv->mdev;
474 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
476 set_bit(vid, priv->active_vlans);
478 /* Add VID to port VLAN filter */
479 mutex_lock(&mdev->state_lock);
480 if (mdev->device_up && priv->port_up) {
481 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
483 en_err(priv, "Failed configuring VLAN filter\n");
487 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
489 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
492 mutex_unlock(&mdev->state_lock);
496 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
497 __be16 proto, u16 vid)
499 struct mlx4_en_priv *priv = netdev_priv(dev);
500 struct mlx4_en_dev *mdev = priv->mdev;
503 en_dbg(HW, priv, "Killing VID:%d\n", vid);
505 clear_bit(vid, priv->active_vlans);
507 /* Remove VID from port VLAN filter */
508 mutex_lock(&mdev->state_lock);
509 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
511 if (mdev->device_up && priv->port_up) {
512 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
514 en_err(priv, "Failed configuring VLAN filter\n");
516 mutex_unlock(&mdev->state_lock);
521 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
524 for (i = ETH_ALEN - 1; i >= 0; --i) {
525 dst_mac[i] = src_mac & 0xff;
528 memset(&dst_mac[ETH_ALEN], 0, 2);
532 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
533 int qpn, u64 *reg_id)
537 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
538 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
539 return 0; /* do nothing */
541 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
542 MLX4_DOMAIN_NIC, reg_id);
544 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
547 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
552 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
553 unsigned char *mac, int *qpn, u64 *reg_id)
555 struct mlx4_en_dev *mdev = priv->mdev;
556 struct mlx4_dev *dev = mdev->dev;
559 switch (dev->caps.steering_mode) {
560 case MLX4_STEERING_MODE_B0: {
565 memcpy(&gid[10], mac, ETH_ALEN);
568 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
571 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
572 struct mlx4_spec_list spec_eth = { {NULL} };
573 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
575 struct mlx4_net_trans_rule rule = {
576 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
579 .promisc_mode = MLX4_FS_REGULAR,
580 .priority = MLX4_DOMAIN_NIC,
583 rule.port = priv->port;
585 INIT_LIST_HEAD(&rule.list);
587 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
588 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
589 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
590 list_add_tail(&spec_eth.list, &rule.list);
592 err = mlx4_flow_attach(dev, &rule, reg_id);
599 en_warn(priv, "Failed Attaching Unicast\n");
604 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
605 unsigned char *mac, int qpn, u64 reg_id)
607 struct mlx4_en_dev *mdev = priv->mdev;
608 struct mlx4_dev *dev = mdev->dev;
610 switch (dev->caps.steering_mode) {
611 case MLX4_STEERING_MODE_B0: {
616 memcpy(&gid[10], mac, ETH_ALEN);
619 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
622 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
623 mlx4_flow_detach(dev, reg_id);
627 en_err(priv, "Invalid steering mode.\n");
631 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
633 struct mlx4_en_dev *mdev = priv->mdev;
634 struct mlx4_dev *dev = mdev->dev;
637 int *qpn = &priv->base_qpn;
638 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
640 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
641 priv->dev->dev_addr);
642 index = mlx4_register_mac(dev, priv->port, mac);
645 en_err(priv, "Failed adding MAC: %pM\n",
646 priv->dev->dev_addr);
650 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
652 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
653 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
654 *qpn = base_qpn + index;
658 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
659 MLX4_RES_USAGE_DRIVER);
660 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
662 en_err(priv, "Failed to reserve qp for mac registration\n");
663 mlx4_unregister_mac(dev, priv->port, mac);
670 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
672 struct mlx4_en_dev *mdev = priv->mdev;
673 struct mlx4_dev *dev = mdev->dev;
674 int qpn = priv->base_qpn;
676 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
677 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
678 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
679 priv->dev->dev_addr);
680 mlx4_unregister_mac(dev, priv->port, mac);
682 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
684 mlx4_qp_release_range(dev, qpn, 1);
685 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
689 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
690 unsigned char *new_mac, unsigned char *prev_mac)
692 struct mlx4_en_dev *mdev = priv->mdev;
693 struct mlx4_dev *dev = mdev->dev;
695 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
697 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
698 struct hlist_head *bucket;
699 unsigned int mac_hash;
700 struct mlx4_mac_entry *entry;
701 struct hlist_node *tmp;
702 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
704 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
705 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
706 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
707 mlx4_en_uc_steer_release(priv, entry->mac,
709 mlx4_unregister_mac(dev, priv->port,
711 hlist_del_rcu(&entry->hlist);
713 memcpy(entry->mac, new_mac, ETH_ALEN);
715 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
716 hlist_add_head_rcu(&entry->hlist,
717 &priv->mac_hash[mac_hash]);
718 mlx4_register_mac(dev, priv->port, new_mac_u64);
719 err = mlx4_en_uc_steer_add(priv, new_mac,
724 if (priv->tunnel_reg_id) {
725 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
726 priv->tunnel_reg_id = 0;
728 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
729 &priv->tunnel_reg_id);
736 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
739 static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
740 unsigned char new_mac[ETH_ALEN + 2])
742 struct mlx4_en_dev *mdev = priv->mdev;
745 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
748 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
750 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
751 new_mac, priv->port, err);
754 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
755 unsigned char new_mac[ETH_ALEN + 2])
760 /* Remove old MAC and insert the new one */
761 err = mlx4_en_replace_mac(priv, priv->base_qpn,
762 new_mac, priv->current_mac);
764 en_err(priv, "Failed changing HW MAC address\n");
766 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
769 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
774 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
776 struct mlx4_en_priv *priv = netdev_priv(dev);
777 struct mlx4_en_dev *mdev = priv->mdev;
778 struct sockaddr *saddr = addr;
779 unsigned char new_mac[ETH_ALEN + 2];
782 if (!is_valid_ether_addr(saddr->sa_data))
783 return -EADDRNOTAVAIL;
785 mutex_lock(&mdev->state_lock);
786 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
787 err = mlx4_en_do_set_mac(priv, new_mac);
791 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
792 mlx4_en_update_user_mac(priv, new_mac);
794 mutex_unlock(&mdev->state_lock);
799 static void mlx4_en_clear_list(struct net_device *dev)
801 struct mlx4_en_priv *priv = netdev_priv(dev);
802 struct mlx4_en_mc_list *tmp, *mc_to_del;
804 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
805 list_del(&mc_to_del->list);
810 static void mlx4_en_cache_mclist(struct net_device *dev)
812 struct mlx4_en_priv *priv = netdev_priv(dev);
813 struct netdev_hw_addr *ha;
814 struct mlx4_en_mc_list *tmp;
816 mlx4_en_clear_list(dev);
817 netdev_for_each_mc_addr(ha, dev) {
818 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
820 mlx4_en_clear_list(dev);
823 memcpy(tmp->addr, ha->addr, ETH_ALEN);
824 list_add_tail(&tmp->list, &priv->mc_list);
828 static void update_mclist_flags(struct mlx4_en_priv *priv,
829 struct list_head *dst,
830 struct list_head *src)
832 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
835 /* Find all the entries that should be removed from dst,
836 * These are the entries that are not found in src
838 list_for_each_entry(dst_tmp, dst, list) {
840 list_for_each_entry(src_tmp, src, list) {
841 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
847 dst_tmp->action = MCLIST_REM;
850 /* Add entries that exist in src but not in dst
851 * mark them as need to add
853 list_for_each_entry(src_tmp, src, list) {
855 list_for_each_entry(dst_tmp, dst, list) {
856 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
857 dst_tmp->action = MCLIST_NONE;
863 new_mc = kmemdup(src_tmp,
864 sizeof(struct mlx4_en_mc_list),
869 new_mc->action = MCLIST_ADD;
870 list_add_tail(&new_mc->list, dst);
875 static void mlx4_en_set_rx_mode(struct net_device *dev)
877 struct mlx4_en_priv *priv = netdev_priv(dev);
882 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
885 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
886 struct mlx4_en_dev *mdev)
890 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
891 if (netif_msg_rx_status(priv))
892 en_warn(priv, "Entering promiscuous mode\n");
893 priv->flags |= MLX4_EN_FLAG_PROMISC;
895 /* Enable promiscouos mode */
896 switch (mdev->dev->caps.steering_mode) {
897 case MLX4_STEERING_MODE_DEVICE_MANAGED:
898 err = mlx4_flow_steer_promisc_add(mdev->dev,
901 MLX4_FS_ALL_DEFAULT);
903 en_err(priv, "Failed enabling promiscuous mode\n");
904 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
907 case MLX4_STEERING_MODE_B0:
908 err = mlx4_unicast_promisc_add(mdev->dev,
912 en_err(priv, "Failed enabling unicast promiscuous mode\n");
914 /* Add the default qp number as multicast
917 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
918 err = mlx4_multicast_promisc_add(mdev->dev,
922 en_err(priv, "Failed enabling multicast promiscuous mode\n");
923 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
927 case MLX4_STEERING_MODE_A0:
928 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
933 en_err(priv, "Failed enabling promiscuous mode\n");
937 /* Disable port multicast filter (unconditionally) */
938 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
939 0, MLX4_MCAST_DISABLE);
941 en_err(priv, "Failed disabling multicast filter\n");
945 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
946 struct mlx4_en_dev *mdev)
950 if (netif_msg_rx_status(priv))
951 en_warn(priv, "Leaving promiscuous mode\n");
952 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
954 /* Disable promiscouos mode */
955 switch (mdev->dev->caps.steering_mode) {
956 case MLX4_STEERING_MODE_DEVICE_MANAGED:
957 err = mlx4_flow_steer_promisc_remove(mdev->dev,
959 MLX4_FS_ALL_DEFAULT);
961 en_err(priv, "Failed disabling promiscuous mode\n");
962 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
965 case MLX4_STEERING_MODE_B0:
966 err = mlx4_unicast_promisc_remove(mdev->dev,
970 en_err(priv, "Failed disabling unicast promiscuous mode\n");
971 /* Disable Multicast promisc */
972 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
973 err = mlx4_multicast_promisc_remove(mdev->dev,
977 en_err(priv, "Failed disabling multicast promiscuous mode\n");
978 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
982 case MLX4_STEERING_MODE_A0:
983 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
987 en_err(priv, "Failed disabling promiscuous mode\n");
992 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
993 struct net_device *dev,
994 struct mlx4_en_dev *mdev)
996 struct mlx4_en_mc_list *mclist, *tmp;
998 u8 mc_list[16] = {0};
1001 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
1002 if (dev->flags & IFF_ALLMULTI) {
1003 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1004 0, MLX4_MCAST_DISABLE);
1006 en_err(priv, "Failed disabling multicast filter\n");
1008 /* Add the default qp number as multicast promisc */
1009 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1010 switch (mdev->dev->caps.steering_mode) {
1011 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1012 err = mlx4_flow_steer_promisc_add(mdev->dev,
1015 MLX4_FS_MC_DEFAULT);
1018 case MLX4_STEERING_MODE_B0:
1019 err = mlx4_multicast_promisc_add(mdev->dev,
1024 case MLX4_STEERING_MODE_A0:
1028 en_err(priv, "Failed entering multicast promisc mode\n");
1029 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1032 /* Disable Multicast promisc */
1033 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1034 switch (mdev->dev->caps.steering_mode) {
1035 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1036 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1038 MLX4_FS_MC_DEFAULT);
1041 case MLX4_STEERING_MODE_B0:
1042 err = mlx4_multicast_promisc_remove(mdev->dev,
1047 case MLX4_STEERING_MODE_A0:
1051 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1052 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1055 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1056 0, MLX4_MCAST_DISABLE);
1058 en_err(priv, "Failed disabling multicast filter\n");
1060 /* Flush mcast filter and init it with broadcast address */
1061 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1062 1, MLX4_MCAST_CONFIG);
1064 /* Update multicast list - we cache all addresses so they won't
1065 * change while HW is updated holding the command semaphor */
1066 netif_addr_lock_bh(dev);
1067 mlx4_en_cache_mclist(dev);
1068 netif_addr_unlock_bh(dev);
1069 list_for_each_entry(mclist, &priv->mc_list, list) {
1070 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1071 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1072 mcast_addr, 0, MLX4_MCAST_CONFIG);
1074 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1075 0, MLX4_MCAST_ENABLE);
1077 en_err(priv, "Failed enabling multicast filter\n");
1079 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1080 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1081 if (mclist->action == MCLIST_REM) {
1082 /* detach this address and delete from list */
1083 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1084 mc_list[5] = priv->port;
1085 err = mlx4_multicast_detach(mdev->dev,
1086 priv->rss_map.indir_qp,
1091 en_err(priv, "Fail to detach multicast address\n");
1093 if (mclist->tunnel_reg_id) {
1094 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1096 en_err(priv, "Failed to detach multicast address\n");
1099 /* remove from list */
1100 list_del(&mclist->list);
1102 } else if (mclist->action == MCLIST_ADD) {
1103 /* attach the address */
1104 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1105 /* needed for B0 steering support */
1106 mc_list[5] = priv->port;
1107 err = mlx4_multicast_attach(mdev->dev,
1108 priv->rss_map.indir_qp,
1114 en_err(priv, "Fail to attach multicast address\n");
1116 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1117 &mclist->tunnel_reg_id);
1119 en_err(priv, "Failed to attach multicast address\n");
1125 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1126 struct net_device *dev,
1127 struct mlx4_en_dev *mdev)
1129 struct netdev_hw_addr *ha;
1130 struct mlx4_mac_entry *entry;
1131 struct hlist_node *tmp;
1135 struct hlist_head *bucket;
1140 /* Note that we do not need to protect our mac_hash traversal with rcu,
1141 * since all modification code is protected by mdev->state_lock
1144 /* find what to remove */
1145 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1146 bucket = &priv->mac_hash[i];
1147 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1149 netdev_for_each_uc_addr(ha, dev) {
1150 if (ether_addr_equal_64bits(entry->mac,
1157 /* MAC address of the port is not in uc list */
1158 if (ether_addr_equal_64bits(entry->mac,
1163 mac = mlx4_mac_to_u64(entry->mac);
1164 mlx4_en_uc_steer_release(priv, entry->mac,
1167 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1169 hlist_del_rcu(&entry->hlist);
1170 kfree_rcu(entry, rcu);
1171 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1172 entry->mac, priv->port);
1178 /* if we didn't remove anything, there is no use in trying to add
1179 * again once we are in a forced promisc mode state
1181 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1184 prev_flags = priv->flags;
1185 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1187 /* find what to add */
1188 netdev_for_each_uc_addr(ha, dev) {
1190 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1191 hlist_for_each_entry(entry, bucket, hlist) {
1192 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1199 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1201 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1202 ha->addr, priv->port);
1203 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1206 mac = mlx4_mac_to_u64(ha->addr);
1207 memcpy(entry->mac, ha->addr, ETH_ALEN);
1208 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1210 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1211 ha->addr, priv->port, err);
1213 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1216 err = mlx4_en_uc_steer_add(priv, ha->addr,
1220 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1221 ha->addr, priv->port, err);
1222 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1224 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1227 unsigned int mac_hash;
1228 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1229 ha->addr, priv->port);
1230 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1231 bucket = &priv->mac_hash[mac_hash];
1232 hlist_add_head_rcu(&entry->hlist, bucket);
1237 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1238 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1240 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1241 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1246 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1248 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1250 struct mlx4_en_dev *mdev = priv->mdev;
1251 struct net_device *dev = priv->dev;
1253 mutex_lock(&mdev->state_lock);
1254 if (!mdev->device_up) {
1255 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1258 if (!priv->port_up) {
1259 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1263 if (!netif_carrier_ok(dev)) {
1264 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1265 if (priv->port_state.link_state) {
1266 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1267 netif_carrier_on(dev);
1268 en_dbg(LINK, priv, "Link Up\n");
1273 if (dev->priv_flags & IFF_UNICAST_FLT)
1274 mlx4_en_do_uc_filter(priv, dev, mdev);
1276 /* Promsicuous mode: disable all filters */
1277 if ((dev->flags & IFF_PROMISC) ||
1278 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1279 mlx4_en_set_promisc_mode(priv, mdev);
1283 /* Not in promiscuous mode */
1284 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1285 mlx4_en_clear_promisc_mode(priv, mdev);
1287 mlx4_en_do_multicast(priv, dev, mdev);
1289 mutex_unlock(&mdev->state_lock);
1292 #ifdef CONFIG_NET_POLL_CONTROLLER
1293 static void mlx4_en_netpoll(struct net_device *dev)
1295 struct mlx4_en_priv *priv = netdev_priv(dev);
1296 struct mlx4_en_cq *cq;
1299 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1300 cq = priv->tx_cq[TX][i];
1301 napi_schedule(&cq->napi);
1306 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1310 int *qpn = &priv->base_qpn;
1311 struct mlx4_mac_entry *entry;
1313 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1317 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1318 &priv->tunnel_reg_id);
1322 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1328 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1329 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1330 entry->reg_id = reg_id;
1331 hlist_add_head_rcu(&entry->hlist,
1332 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1337 if (priv->tunnel_reg_id)
1338 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1341 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1345 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1349 int qpn = priv->base_qpn;
1350 struct hlist_head *bucket;
1351 struct hlist_node *tmp;
1352 struct mlx4_mac_entry *entry;
1354 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1355 bucket = &priv->mac_hash[i];
1356 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1357 mac = mlx4_mac_to_u64(entry->mac);
1358 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1360 mlx4_en_uc_steer_release(priv, entry->mac,
1361 qpn, entry->reg_id);
1363 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1364 hlist_del_rcu(&entry->hlist);
1365 kfree_rcu(entry, rcu);
1369 if (priv->tunnel_reg_id) {
1370 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1371 priv->tunnel_reg_id = 0;
1375 static void mlx4_en_tx_timeout(struct net_device *dev)
1377 struct mlx4_en_priv *priv = netdev_priv(dev);
1378 struct mlx4_en_dev *mdev = priv->mdev;
1381 if (netif_msg_timer(priv))
1382 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1384 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1385 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1387 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1389 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1390 i, tx_ring->qpn, tx_ring->sp_cqn,
1391 tx_ring->cons, tx_ring->prod);
1394 priv->port_stats.tx_timeout++;
1395 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
1396 en_dbg(DRV, priv, "Scheduling port restart\n");
1397 queue_work(mdev->workqueue, &priv->restart_task);
1403 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1405 struct mlx4_en_priv *priv = netdev_priv(dev);
1407 spin_lock_bh(&priv->stats_lock);
1408 mlx4_en_fold_software_stats(dev);
1409 netdev_stats_to_stats64(stats, &dev->stats);
1410 spin_unlock_bh(&priv->stats_lock);
1413 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1415 struct mlx4_en_cq *cq;
1418 /* If we haven't received a specific coalescing setting
1419 * (module param), we set the moderation parameters as follows:
1420 * - moder_cnt is set to the number of mtu sized packets to
1421 * satisfy our coalescing target.
1422 * - moder_time is set to a fixed value.
1424 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1425 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1426 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1427 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1428 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1429 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1431 /* Setup cq moderation params */
1432 for (i = 0; i < priv->rx_ring_num; i++) {
1433 cq = priv->rx_cq[i];
1434 cq->moder_cnt = priv->rx_frames;
1435 cq->moder_time = priv->rx_usecs;
1436 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1437 priv->last_moder_packets[i] = 0;
1438 priv->last_moder_bytes[i] = 0;
1441 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1442 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1443 cq = priv->tx_cq[t][i];
1444 cq->moder_cnt = priv->tx_frames;
1445 cq->moder_time = priv->tx_usecs;
1449 /* Reset auto-moderation params */
1450 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1451 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1452 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1453 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1454 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1455 priv->adaptive_rx_coal = 1;
1456 priv->last_moder_jiffies = 0;
1457 priv->last_moder_tx_packets = 0;
1460 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1462 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1463 u32 pkt_rate_high, pkt_rate_low;
1464 struct mlx4_en_cq *cq;
1465 unsigned long packets;
1467 unsigned long avg_pkt_size;
1468 unsigned long rx_packets;
1469 unsigned long rx_bytes;
1470 unsigned long rx_pkt_diff;
1474 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1477 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1478 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1480 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1481 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1482 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
1484 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
1485 packets = rx_pkt_diff;
1486 rate = packets * HZ / period;
1487 avg_pkt_size = packets ? (rx_bytes -
1488 priv->last_moder_bytes[ring]) / packets : 0;
1490 /* Apply auto-moderation only when packet rate
1491 * exceeds a rate that it matters */
1492 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1493 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1494 if (rate <= pkt_rate_low)
1495 moder_time = priv->rx_usecs_low;
1496 else if (rate >= pkt_rate_high)
1497 moder_time = priv->rx_usecs_high;
1499 moder_time = (rate - pkt_rate_low) *
1500 (priv->rx_usecs_high - priv->rx_usecs_low) /
1501 (pkt_rate_high - pkt_rate_low) +
1504 moder_time = priv->rx_usecs_low;
1507 cq = priv->rx_cq[ring];
1508 if (moder_time != priv->last_moder_time[ring] ||
1509 cq->moder_cnt != priv->rx_frames) {
1510 priv->last_moder_time[ring] = moder_time;
1511 cq->moder_time = moder_time;
1512 cq->moder_cnt = priv->rx_frames;
1513 err = mlx4_en_set_cq_moder(priv, cq);
1515 en_err(priv, "Failed modifying moderation for cq:%d\n",
1518 priv->last_moder_packets[ring] = rx_packets;
1519 priv->last_moder_bytes[ring] = rx_bytes;
1522 priv->last_moder_jiffies = jiffies;
1525 static void mlx4_en_do_get_stats(struct work_struct *work)
1527 struct delayed_work *delay = to_delayed_work(work);
1528 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1530 struct mlx4_en_dev *mdev = priv->mdev;
1533 mutex_lock(&mdev->state_lock);
1534 if (mdev->device_up) {
1535 if (priv->port_up) {
1536 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1538 en_dbg(HW, priv, "Could not update stats\n");
1540 mlx4_en_auto_moderation(priv);
1543 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1545 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1546 mlx4_en_do_set_mac(priv, priv->current_mac);
1547 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1549 mutex_unlock(&mdev->state_lock);
1552 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1555 static void mlx4_en_service_task(struct work_struct *work)
1557 struct delayed_work *delay = to_delayed_work(work);
1558 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1560 struct mlx4_en_dev *mdev = priv->mdev;
1562 mutex_lock(&mdev->state_lock);
1563 if (mdev->device_up) {
1564 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1565 mlx4_en_ptp_overflow_check(mdev);
1567 mlx4_en_recover_from_oom(priv);
1568 queue_delayed_work(mdev->workqueue, &priv->service_task,
1569 SERVICE_TASK_DELAY);
1571 mutex_unlock(&mdev->state_lock);
1574 static void mlx4_en_linkstate(struct work_struct *work)
1576 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1578 struct mlx4_en_dev *mdev = priv->mdev;
1579 int linkstate = priv->link_state;
1581 mutex_lock(&mdev->state_lock);
1582 /* If observable port state changed set carrier state and
1583 * report to system log */
1584 if (priv->last_link_state != linkstate) {
1585 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1586 en_info(priv, "Link Down\n");
1587 netif_carrier_off(priv->dev);
1589 en_info(priv, "Link Up\n");
1590 netif_carrier_on(priv->dev);
1593 priv->last_link_state = linkstate;
1594 mutex_unlock(&mdev->state_lock);
1597 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1599 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1600 int numa_node = priv->mdev->dev->numa_node;
1602 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1605 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1606 ring->affinity_mask);
1610 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1612 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1615 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1618 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1619 int rr_index = tx_ring_idx;
1621 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1622 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1623 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1624 TX_XDP, tx_ring_idx, rr_index);
1627 int mlx4_en_start_port(struct net_device *dev)
1629 struct mlx4_en_priv *priv = netdev_priv(dev);
1630 struct mlx4_en_dev *mdev = priv->mdev;
1631 struct mlx4_en_cq *cq;
1632 struct mlx4_en_tx_ring *tx_ring;
1637 u8 mc_list[16] = {0};
1639 if (priv->port_up) {
1640 en_dbg(DRV, priv, "start port called while port already up\n");
1644 INIT_LIST_HEAD(&priv->mc_list);
1645 INIT_LIST_HEAD(&priv->curr_list);
1646 INIT_LIST_HEAD(&priv->ethtool_list);
1647 memset(&priv->ethtool_rules[0], 0,
1648 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1650 /* Calculate Rx buf size */
1651 dev->mtu = min(dev->mtu, priv->max_mtu);
1652 mlx4_en_calc_rx_buf(dev);
1653 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1655 /* Configure rx cq's and rings */
1656 err = mlx4_en_activate_rx_rings(priv);
1658 en_err(priv, "Failed to activate RX rings\n");
1661 for (i = 0; i < priv->rx_ring_num; i++) {
1662 cq = priv->rx_cq[i];
1664 err = mlx4_en_init_affinity_hint(priv, i);
1666 en_err(priv, "Failed preparing IRQ affinity hint\n");
1670 err = mlx4_en_activate_cq(priv, cq, i);
1672 en_err(priv, "Failed activating Rx CQ\n");
1673 mlx4_en_free_affinity_hint(priv, i);
1677 for (j = 0; j < cq->size; j++) {
1678 struct mlx4_cqe *cqe = NULL;
1680 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1682 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1685 err = mlx4_en_set_cq_moder(priv, cq);
1687 en_err(priv, "Failed setting cq moderation parameters\n");
1688 mlx4_en_deactivate_cq(priv, cq);
1689 mlx4_en_free_affinity_hint(priv, i);
1692 mlx4_en_arm_cq(priv, cq);
1693 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1698 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1699 err = mlx4_en_get_qp(priv);
1701 en_err(priv, "Failed getting eth qp\n");
1704 mdev->mac_removed[priv->port] = 0;
1706 priv->counter_index =
1707 mlx4_get_default_counter_index(mdev->dev, priv->port);
1709 err = mlx4_en_config_rss_steer(priv);
1711 en_err(priv, "Failed configuring rss steering\n");
1715 err = mlx4_en_create_drop_qp(priv);
1719 /* Configure tx cq's and rings */
1720 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1721 u8 num_tx_rings_p_up = t == TX ?
1722 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
1724 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1726 cq = priv->tx_cq[t][i];
1727 err = mlx4_en_activate_cq(priv, cq, i);
1729 en_err(priv, "Failed allocating Tx CQ\n");
1732 err = mlx4_en_set_cq_moder(priv, cq);
1734 en_err(priv, "Failed setting cq moderation parameters\n");
1735 mlx4_en_deactivate_cq(priv, cq);
1739 "Resetting index of collapsed CQ:%d to -1\n", i);
1740 cq->buf->wqe_index = cpu_to_be16(0xffff);
1742 /* Configure ring */
1743 tx_ring = priv->tx_ring[t][i];
1744 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1746 i / num_tx_rings_p_up);
1748 en_err(priv, "Failed allocating Tx ring\n");
1749 mlx4_en_deactivate_cq(priv, cq);
1752 clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
1754 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1755 tx_ring->recycle_ring = NULL;
1757 /* Arm CQ for TX completions */
1758 mlx4_en_arm_cq(priv, cq);
1761 mlx4_en_init_recycle_ring(priv, i);
1762 /* XDP TX CQ should never be armed */
1765 /* Set initial ownership of all Tx TXBBs to SW (1) */
1766 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1767 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1771 /* Configure port */
1772 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1773 priv->rx_skb_size + ETH_FCS_LEN,
1774 priv->prof->tx_pause,
1776 priv->prof->rx_pause,
1777 priv->prof->rx_ppp);
1779 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1784 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1786 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1787 dev->mtu, priv->port, err);
1791 /* Set default qp number */
1792 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1794 en_err(priv, "Failed setting default qp numbers\n");
1798 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1799 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1801 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1808 en_dbg(HW, priv, "Initializing port\n");
1809 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1811 en_err(priv, "Failed Initializing port\n");
1815 /* Set Unicast and VXLAN steering rules */
1816 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1817 mlx4_en_set_rss_steer_rules(priv))
1818 mlx4_warn(mdev, "Failed setting steering rules\n");
1820 /* Attach rx QP to bradcast address */
1821 eth_broadcast_addr(&mc_list[10]);
1822 mc_list[5] = priv->port; /* needed for B0 steering support */
1823 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1824 priv->port, 0, MLX4_PROT_ETH,
1825 &priv->broadcast_id))
1826 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1828 /* Must redo promiscuous mode setup. */
1829 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1831 /* Schedule multicast task to populate multicast list */
1832 queue_work(mdev->workqueue, &priv->rx_mode_task);
1834 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1835 udp_tunnel_get_rx_info(dev);
1837 priv->port_up = true;
1839 /* Process all completions if exist to prevent
1840 * the queues freezing if they are full
1842 for (i = 0; i < priv->rx_ring_num; i++) {
1844 napi_schedule(&priv->rx_cq[i]->napi);
1848 clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
1849 netif_tx_start_all_queues(dev);
1850 netif_device_attach(dev);
1855 if (t == MLX4_EN_NUM_TX_TYPES) {
1857 i = priv->tx_ring_num[t];
1861 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1862 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1866 i = priv->tx_ring_num[t];
1868 mlx4_en_destroy_drop_qp(priv);
1870 mlx4_en_release_rss_steer(priv);
1872 mlx4_en_put_qp(priv);
1874 while (rx_index--) {
1875 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1876 mlx4_en_free_affinity_hint(priv, rx_index);
1878 for (i = 0; i < priv->rx_ring_num; i++)
1879 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1881 return err; /* need to close devices */
1885 void mlx4_en_stop_port(struct net_device *dev, int detach)
1887 struct mlx4_en_priv *priv = netdev_priv(dev);
1888 struct mlx4_en_dev *mdev = priv->mdev;
1889 struct mlx4_en_mc_list *mclist, *tmp;
1890 struct ethtool_flow_id *flow, *tmp_flow;
1892 u8 mc_list[16] = {0};
1894 if (!priv->port_up) {
1895 en_dbg(DRV, priv, "stop port called while port already down\n");
1900 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1902 /* Synchronize with tx routine */
1903 netif_tx_lock_bh(dev);
1905 netif_device_detach(dev);
1906 netif_tx_stop_all_queues(dev);
1907 netif_tx_unlock_bh(dev);
1909 netif_tx_disable(dev);
1911 spin_lock_bh(&priv->stats_lock);
1912 mlx4_en_fold_software_stats(dev);
1913 /* Set port as not active */
1914 priv->port_up = false;
1915 spin_unlock_bh(&priv->stats_lock);
1917 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1919 /* Promsicuous mode */
1920 if (mdev->dev->caps.steering_mode ==
1921 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1922 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1923 MLX4_EN_FLAG_MC_PROMISC);
1924 mlx4_flow_steer_promisc_remove(mdev->dev,
1926 MLX4_FS_ALL_DEFAULT);
1927 mlx4_flow_steer_promisc_remove(mdev->dev,
1929 MLX4_FS_MC_DEFAULT);
1930 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1931 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1933 /* Disable promiscouos mode */
1934 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1937 /* Disable Multicast promisc */
1938 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1939 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1941 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1945 /* Detach All multicasts */
1946 eth_broadcast_addr(&mc_list[10]);
1947 mc_list[5] = priv->port; /* needed for B0 steering support */
1948 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1949 MLX4_PROT_ETH, priv->broadcast_id);
1950 list_for_each_entry(mclist, &priv->curr_list, list) {
1951 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1952 mc_list[5] = priv->port;
1953 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
1954 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1955 if (mclist->tunnel_reg_id)
1956 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1958 mlx4_en_clear_list(dev);
1959 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1960 list_del(&mclist->list);
1964 /* Flush multicast filter */
1965 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1967 /* Remove flow steering rules for the port*/
1968 if (mdev->dev->caps.steering_mode ==
1969 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1971 list_for_each_entry_safe(flow, tmp_flow,
1972 &priv->ethtool_list, list) {
1973 mlx4_flow_detach(mdev->dev, flow->id);
1974 list_del(&flow->list);
1978 mlx4_en_destroy_drop_qp(priv);
1981 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1982 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1983 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1984 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1989 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1990 for (i = 0; i < priv->tx_ring_num[t]; i++)
1991 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
1993 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1994 mlx4_en_delete_rss_steer_rules(priv);
1997 mlx4_en_release_rss_steer(priv);
1999 /* Unregister Mac address for the port */
2000 mlx4_en_put_qp(priv);
2001 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
2002 mdev->mac_removed[priv->port] = 1;
2005 for (i = 0; i < priv->rx_ring_num; i++) {
2006 struct mlx4_en_cq *cq = priv->rx_cq[i];
2008 napi_synchronize(&cq->napi);
2009 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
2010 mlx4_en_deactivate_cq(priv, cq);
2012 mlx4_en_free_affinity_hint(priv, i);
2016 static void mlx4_en_restart(struct work_struct *work)
2018 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2020 struct mlx4_en_dev *mdev = priv->mdev;
2021 struct net_device *dev = priv->dev;
2023 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
2026 mutex_lock(&mdev->state_lock);
2027 if (priv->port_up) {
2028 mlx4_en_stop_port(dev, 1);
2029 if (mlx4_en_start_port(dev))
2030 en_err(priv, "Failed restarting port %d\n", priv->port);
2032 mutex_unlock(&mdev->state_lock);
2036 static void mlx4_en_clear_stats(struct net_device *dev)
2038 struct mlx4_en_priv *priv = netdev_priv(dev);
2039 struct mlx4_en_dev *mdev = priv->mdev;
2040 struct mlx4_en_tx_ring **tx_ring;
2043 if (!mlx4_is_slave(mdev->dev))
2044 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2045 en_dbg(HW, priv, "Failed dumping statistics\n");
2047 memset(&priv->pstats, 0, sizeof(priv->pstats));
2048 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2049 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
2050 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2051 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2052 memset(&priv->rx_priority_flowstats, 0,
2053 sizeof(priv->rx_priority_flowstats));
2054 memset(&priv->tx_priority_flowstats, 0,
2055 sizeof(priv->tx_priority_flowstats));
2056 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
2058 tx_ring = priv->tx_ring[TX];
2059 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2060 tx_ring[i]->bytes = 0;
2061 tx_ring[i]->packets = 0;
2062 tx_ring[i]->tx_csum = 0;
2063 tx_ring[i]->tx_dropped = 0;
2064 tx_ring[i]->queue_stopped = 0;
2065 tx_ring[i]->wake_queue = 0;
2066 tx_ring[i]->tso_packets = 0;
2067 tx_ring[i]->xmit_more = 0;
2069 for (i = 0; i < priv->rx_ring_num; i++) {
2070 priv->rx_ring[i]->bytes = 0;
2071 priv->rx_ring[i]->packets = 0;
2072 priv->rx_ring[i]->csum_ok = 0;
2073 priv->rx_ring[i]->csum_none = 0;
2074 priv->rx_ring[i]->csum_complete = 0;
2078 static int mlx4_en_open(struct net_device *dev)
2080 struct mlx4_en_priv *priv = netdev_priv(dev);
2081 struct mlx4_en_dev *mdev = priv->mdev;
2084 mutex_lock(&mdev->state_lock);
2086 if (!mdev->device_up) {
2087 en_err(priv, "Cannot open - device down/disabled\n");
2092 /* Reset HW statistics and SW counters */
2093 mlx4_en_clear_stats(dev);
2095 err = mlx4_en_start_port(dev);
2097 en_err(priv, "Failed starting port:%d\n", priv->port);
2100 mutex_unlock(&mdev->state_lock);
2105 static int mlx4_en_close(struct net_device *dev)
2107 struct mlx4_en_priv *priv = netdev_priv(dev);
2108 struct mlx4_en_dev *mdev = priv->mdev;
2110 en_dbg(IFDOWN, priv, "Close port called\n");
2112 mutex_lock(&mdev->state_lock);
2114 mlx4_en_stop_port(dev, 0);
2115 netif_carrier_off(dev);
2117 mutex_unlock(&mdev->state_lock);
2121 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2125 #ifdef CONFIG_RFS_ACCEL
2126 priv->dev->rx_cpu_rmap = NULL;
2129 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2130 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2131 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2132 mlx4_en_destroy_tx_ring(priv,
2133 &priv->tx_ring[t][i]);
2134 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2135 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2137 kfree(priv->tx_ring[t]);
2138 kfree(priv->tx_cq[t]);
2141 for (i = 0; i < priv->rx_ring_num; i++) {
2142 if (priv->rx_ring[i])
2143 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2144 priv->prof->rx_ring_size, priv->stride);
2146 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2151 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2153 struct mlx4_en_port_profile *prof = priv->prof;
2157 /* Create tx Rings */
2158 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2159 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2160 node = cpu_to_node(i % num_online_cpus());
2161 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2162 prof->tx_ring_size, i, t, node))
2165 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2167 TXBB_SIZE, node, i))
2172 /* Create rx Rings */
2173 for (i = 0; i < priv->rx_ring_num; i++) {
2174 node = cpu_to_node(i % num_online_cpus());
2175 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2176 prof->rx_ring_size, i, RX, node))
2179 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2180 prof->rx_ring_size, priv->stride,
2185 #ifdef CONFIG_RFS_ACCEL
2186 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2192 en_err(priv, "Failed to allocate NIC resources\n");
2193 for (i = 0; i < priv->rx_ring_num; i++) {
2194 if (priv->rx_ring[i])
2195 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2199 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2201 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2202 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2203 if (priv->tx_ring[t][i])
2204 mlx4_en_destroy_tx_ring(priv,
2205 &priv->tx_ring[t][i]);
2206 if (priv->tx_cq[t][i])
2207 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2214 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2215 struct mlx4_en_priv *src,
2216 struct mlx4_en_port_profile *prof)
2220 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2221 sizeof(dst->hwtstamp_config));
2222 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
2223 dst->rx_ring_num = prof->rx_ring_num;
2224 dst->flags = prof->flags;
2225 dst->mdev = src->mdev;
2226 dst->port = src->port;
2227 dst->dev = src->dev;
2229 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2230 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2232 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2233 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2234 if (!dst->tx_ring_num[t])
2237 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2238 MAX_TX_RINGS, GFP_KERNEL);
2239 if (!dst->tx_ring[t])
2242 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2243 MAX_TX_RINGS, GFP_KERNEL);
2244 if (!dst->tx_cq[t]) {
2245 kfree(dst->tx_ring[t]);
2254 kfree(dst->tx_ring[t]);
2255 kfree(dst->tx_cq[t]);
2260 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2261 struct mlx4_en_priv *src)
2264 memcpy(dst->rx_ring, src->rx_ring,
2265 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2266 memcpy(dst->rx_cq, src->rx_cq,
2267 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2268 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2269 sizeof(dst->hwtstamp_config));
2270 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2271 dst->tx_ring_num[t] = src->tx_ring_num[t];
2272 dst->tx_ring[t] = src->tx_ring[t];
2273 dst->tx_cq[t] = src->tx_cq[t];
2275 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
2276 dst->rx_ring_num = src->rx_ring_num;
2277 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2280 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2281 struct mlx4_en_priv *tmp,
2282 struct mlx4_en_port_profile *prof,
2283 bool carry_xdp_prog)
2285 struct bpf_prog *xdp_prog;
2288 ret = mlx4_en_copy_priv(tmp, priv, prof);
2290 en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
2295 if (mlx4_en_alloc_resources(tmp)) {
2297 "%s: Resource allocation failed, using previous configuration\n",
2299 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2300 kfree(tmp->tx_ring[t]);
2301 kfree(tmp->tx_cq[t]);
2306 /* All rx_rings has the same xdp_prog. Pick the first one. */
2307 xdp_prog = rcu_dereference_protected(
2308 priv->rx_ring[0]->xdp_prog,
2309 lockdep_is_held(&priv->mdev->state_lock));
2311 if (xdp_prog && carry_xdp_prog) {
2312 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2313 if (IS_ERR(xdp_prog)) {
2314 mlx4_en_free_resources(tmp);
2315 return PTR_ERR(xdp_prog);
2317 for (i = 0; i < tmp->rx_ring_num; i++)
2318 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2325 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2326 struct mlx4_en_priv *tmp)
2328 mlx4_en_free_resources(priv);
2329 mlx4_en_update_priv(priv, tmp);
2332 void mlx4_en_destroy_netdev(struct net_device *dev)
2334 struct mlx4_en_priv *priv = netdev_priv(dev);
2335 struct mlx4_en_dev *mdev = priv->mdev;
2337 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2339 /* Unregister device - this will close the port if it was up */
2340 if (priv->registered) {
2341 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2343 unregister_netdev(dev);
2346 if (priv->allocated)
2347 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2349 cancel_delayed_work(&priv->stats_task);
2350 cancel_delayed_work(&priv->service_task);
2351 /* flush any pending task for this netdev */
2352 flush_workqueue(mdev->workqueue);
2354 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2355 mlx4_en_remove_timestamp(mdev);
2357 /* Detach the netdev so tasks would not attempt to access it */
2358 mutex_lock(&mdev->state_lock);
2359 mdev->pndev[priv->port] = NULL;
2360 mdev->upper[priv->port] = NULL;
2362 #ifdef CONFIG_RFS_ACCEL
2363 mlx4_en_cleanup_filters(priv);
2366 mlx4_en_free_resources(priv);
2367 mutex_unlock(&mdev->state_lock);
2372 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2374 struct mlx4_en_priv *priv = netdev_priv(dev);
2376 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2377 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2378 mtu, MLX4_EN_MAX_XDP_MTU);
2385 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2387 struct mlx4_en_priv *priv = netdev_priv(dev);
2388 struct mlx4_en_dev *mdev = priv->mdev;
2391 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2394 if (priv->tx_ring_num[TX_XDP] &&
2395 !mlx4_en_check_xdp_mtu(dev, new_mtu))
2400 if (netif_running(dev)) {
2401 mutex_lock(&mdev->state_lock);
2402 if (!mdev->device_up) {
2403 /* NIC is probably restarting - let restart task reset
2405 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2407 mlx4_en_stop_port(dev, 1);
2408 err = mlx4_en_start_port(dev);
2410 en_err(priv, "Failed restarting port:%d\n",
2412 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
2414 queue_work(mdev->workqueue, &priv->restart_task);
2417 mutex_unlock(&mdev->state_lock);
2422 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2424 struct mlx4_en_priv *priv = netdev_priv(dev);
2425 struct mlx4_en_dev *mdev = priv->mdev;
2426 struct hwtstamp_config config;
2428 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2431 /* reserved for future extensions */
2435 /* device doesn't support time stamping */
2436 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2439 /* TX HW timestamp */
2440 switch (config.tx_type) {
2441 case HWTSTAMP_TX_OFF:
2442 case HWTSTAMP_TX_ON:
2448 /* RX HW timestamp */
2449 switch (config.rx_filter) {
2450 case HWTSTAMP_FILTER_NONE:
2452 case HWTSTAMP_FILTER_ALL:
2453 case HWTSTAMP_FILTER_SOME:
2454 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2455 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2456 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2457 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2458 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2459 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2460 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2461 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2462 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2463 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2464 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2465 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2466 case HWTSTAMP_FILTER_NTP_ALL:
2467 config.rx_filter = HWTSTAMP_FILTER_ALL;
2473 if (mlx4_en_reset_config(dev, config, dev->features)) {
2474 config.tx_type = HWTSTAMP_TX_OFF;
2475 config.rx_filter = HWTSTAMP_FILTER_NONE;
2478 return copy_to_user(ifr->ifr_data, &config,
2479 sizeof(config)) ? -EFAULT : 0;
2482 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2484 struct mlx4_en_priv *priv = netdev_priv(dev);
2486 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2487 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2490 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2494 return mlx4_en_hwtstamp_set(dev, ifr);
2496 return mlx4_en_hwtstamp_get(dev, ifr);
2502 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2503 netdev_features_t features)
2505 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2506 struct mlx4_en_dev *mdev = en_priv->mdev;
2508 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2509 * enable/disable make sure S-TAG flag is always in same state as
2512 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2513 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2514 features |= NETIF_F_HW_VLAN_STAG_RX;
2516 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2521 static int mlx4_en_set_features(struct net_device *netdev,
2522 netdev_features_t features)
2524 struct mlx4_en_priv *priv = netdev_priv(netdev);
2528 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2529 en_info(priv, "Turn %s RX-FCS\n",
2530 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2534 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2535 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2537 en_info(priv, "Turn %s RX-ALL\n",
2538 ignore_fcs_value ? "ON" : "OFF");
2539 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2540 priv->port, ignore_fcs_value);
2545 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2546 en_info(priv, "Turn %s RX vlan strip offload\n",
2547 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2551 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2552 en_info(priv, "Turn %s TX vlan strip offload\n",
2553 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2555 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2556 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2557 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2559 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2560 en_info(priv, "Turn %s loopback\n",
2561 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2562 mlx4_en_update_loopback_state(netdev, features);
2566 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2575 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2577 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2578 struct mlx4_en_dev *mdev = en_priv->mdev;
2580 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
2583 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2586 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2587 struct mlx4_en_dev *mdev = en_priv->mdev;
2589 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2593 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2596 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2597 struct mlx4_en_dev *mdev = en_priv->mdev;
2599 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2603 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2605 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2606 struct mlx4_en_dev *mdev = en_priv->mdev;
2608 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2611 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2613 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2614 struct mlx4_en_dev *mdev = en_priv->mdev;
2616 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2619 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2621 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2622 struct mlx4_en_dev *mdev = en_priv->mdev;
2624 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2627 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2628 struct ifla_vf_stats *vf_stats)
2630 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2631 struct mlx4_en_dev *mdev = en_priv->mdev;
2633 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2636 #define PORT_ID_BYTE_LEN 8
2637 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2638 struct netdev_phys_item_id *ppid)
2640 struct mlx4_en_priv *priv = netdev_priv(dev);
2641 struct mlx4_dev *mdev = priv->mdev->dev;
2643 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2648 ppid->id_len = sizeof(phys_port_id);
2649 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2650 ppid->id[i] = phys_port_id & 0xff;
2656 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2659 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2662 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2666 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2667 VXLAN_STEER_BY_OUTER_MAC, 1);
2670 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2675 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2677 NETIF_F_TSO | NETIF_F_TSO6 |
2678 NETIF_F_GSO_UDP_TUNNEL |
2679 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2680 NETIF_F_GSO_PARTIAL;
2683 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2686 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2688 /* unset offloads */
2689 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2691 NETIF_F_TSO | NETIF_F_TSO6 |
2692 NETIF_F_GSO_UDP_TUNNEL |
2693 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2694 NETIF_F_GSO_PARTIAL);
2696 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2697 VXLAN_STEER_BY_OUTER_MAC, 0);
2699 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2701 priv->vxlan_port = 0;
2704 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2705 struct udp_tunnel_info *ti)
2707 struct mlx4_en_priv *priv = netdev_priv(dev);
2708 __be16 port = ti->port;
2709 __be16 current_port;
2711 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2714 if (ti->sa_family != AF_INET)
2717 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2720 current_port = priv->vxlan_port;
2721 if (current_port && current_port != port) {
2722 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2723 ntohs(current_port), ntohs(port));
2727 priv->vxlan_port = port;
2728 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2731 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2732 struct udp_tunnel_info *ti)
2734 struct mlx4_en_priv *priv = netdev_priv(dev);
2735 __be16 port = ti->port;
2736 __be16 current_port;
2738 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2741 if (ti->sa_family != AF_INET)
2744 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2747 current_port = priv->vxlan_port;
2748 if (current_port != port) {
2749 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2753 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2756 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2757 struct net_device *dev,
2758 netdev_features_t features)
2760 features = vlan_features_check(skb, features);
2761 features = vxlan_features_check(skb, features);
2763 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2764 * support inner IPv6 checksums and segmentation so we need to
2765 * strip that feature if this is an IPv6 encapsulated frame.
2767 if (skb->encapsulation &&
2768 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2769 struct mlx4_en_priv *priv = netdev_priv(dev);
2771 if (!priv->vxlan_port ||
2772 (ip_hdr(skb)->version != 4) ||
2773 (udp_hdr(skb)->dest != priv->vxlan_port))
2774 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2780 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2782 struct mlx4_en_priv *priv = netdev_priv(dev);
2783 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
2784 struct mlx4_update_qp_params params;
2787 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2790 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2791 if (maxrate >> 12) {
2792 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2793 params.rate_val = maxrate / 1000;
2794 } else if (maxrate) {
2795 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2796 params.rate_val = maxrate;
2797 } else { /* zero serves to revoke the QP rate-limitation */
2798 params.rate_unit = 0;
2799 params.rate_val = 0;
2802 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2807 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2809 struct mlx4_en_priv *priv = netdev_priv(dev);
2810 struct mlx4_en_dev *mdev = priv->mdev;
2811 struct mlx4_en_port_profile new_prof;
2812 struct bpf_prog *old_prog;
2813 struct mlx4_en_priv *tmp;
2820 xdp_ring_num = prog ? priv->rx_ring_num : 0;
2822 /* No need to reconfigure buffers when simply swapping the
2823 * program for a new one.
2825 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2827 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2829 return PTR_ERR(prog);
2831 mutex_lock(&mdev->state_lock);
2832 for (i = 0; i < priv->rx_ring_num; i++) {
2833 old_prog = rcu_dereference_protected(
2834 priv->rx_ring[i]->xdp_prog,
2835 lockdep_is_held(&mdev->state_lock));
2836 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2838 bpf_prog_put(old_prog);
2840 mutex_unlock(&mdev->state_lock);
2844 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
2847 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2852 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2854 err = PTR_ERR(prog);
2859 mutex_lock(&mdev->state_lock);
2860 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2861 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2863 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2865 new_prof.tx_ring_num[TX] =
2866 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
2867 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2870 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
2873 bpf_prog_sub(prog, priv->rx_ring_num - 1);
2877 if (priv->port_up) {
2879 mlx4_en_stop_port(dev, 1);
2882 mlx4_en_safe_replace_resources(priv, tmp);
2884 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
2886 for (i = 0; i < priv->rx_ring_num; i++) {
2887 old_prog = rcu_dereference_protected(
2888 priv->rx_ring[i]->xdp_prog,
2889 lockdep_is_held(&mdev->state_lock));
2890 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2892 bpf_prog_put(old_prog);
2896 err = mlx4_en_start_port(dev);
2898 en_err(priv, "Failed starting port %d for XDP change\n",
2900 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
2901 queue_work(mdev->workqueue, &priv->restart_task);
2906 mutex_unlock(&mdev->state_lock);
2912 static u32 mlx4_xdp_query(struct net_device *dev)
2914 struct mlx4_en_priv *priv = netdev_priv(dev);
2915 struct mlx4_en_dev *mdev = priv->mdev;
2916 const struct bpf_prog *xdp_prog;
2919 if (!priv->tx_ring_num[TX_XDP])
2922 mutex_lock(&mdev->state_lock);
2923 xdp_prog = rcu_dereference_protected(
2924 priv->rx_ring[0]->xdp_prog,
2925 lockdep_is_held(&mdev->state_lock));
2927 prog_id = xdp_prog->aux->id;
2928 mutex_unlock(&mdev->state_lock);
2933 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2935 switch (xdp->command) {
2936 case XDP_SETUP_PROG:
2937 return mlx4_xdp_set(dev, xdp->prog);
2938 case XDP_QUERY_PROG:
2939 xdp->prog_id = mlx4_xdp_query(dev);
2940 xdp->prog_attached = !!xdp->prog_id;
2947 static const struct net_device_ops mlx4_netdev_ops = {
2948 .ndo_open = mlx4_en_open,
2949 .ndo_stop = mlx4_en_close,
2950 .ndo_start_xmit = mlx4_en_xmit,
2951 .ndo_select_queue = mlx4_en_select_queue,
2952 .ndo_get_stats64 = mlx4_en_get_stats64,
2953 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2954 .ndo_set_mac_address = mlx4_en_set_mac,
2955 .ndo_validate_addr = eth_validate_addr,
2956 .ndo_change_mtu = mlx4_en_change_mtu,
2957 .ndo_do_ioctl = mlx4_en_ioctl,
2958 .ndo_tx_timeout = mlx4_en_tx_timeout,
2959 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2960 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2961 #ifdef CONFIG_NET_POLL_CONTROLLER
2962 .ndo_poll_controller = mlx4_en_netpoll,
2964 .ndo_set_features = mlx4_en_set_features,
2965 .ndo_fix_features = mlx4_en_fix_features,
2966 .ndo_setup_tc = __mlx4_en_setup_tc,
2967 #ifdef CONFIG_RFS_ACCEL
2968 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2970 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2971 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2972 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2973 .ndo_features_check = mlx4_en_features_check,
2974 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2975 .ndo_xdp = mlx4_xdp,
2978 static const struct net_device_ops mlx4_netdev_ops_master = {
2979 .ndo_open = mlx4_en_open,
2980 .ndo_stop = mlx4_en_close,
2981 .ndo_start_xmit = mlx4_en_xmit,
2982 .ndo_select_queue = mlx4_en_select_queue,
2983 .ndo_get_stats64 = mlx4_en_get_stats64,
2984 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2985 .ndo_set_mac_address = mlx4_en_set_mac,
2986 .ndo_validate_addr = eth_validate_addr,
2987 .ndo_change_mtu = mlx4_en_change_mtu,
2988 .ndo_tx_timeout = mlx4_en_tx_timeout,
2989 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2990 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2991 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2992 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2993 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2994 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2995 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2996 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2997 .ndo_get_vf_config = mlx4_en_get_vf_config,
2998 #ifdef CONFIG_NET_POLL_CONTROLLER
2999 .ndo_poll_controller = mlx4_en_netpoll,
3001 .ndo_set_features = mlx4_en_set_features,
3002 .ndo_fix_features = mlx4_en_fix_features,
3003 .ndo_setup_tc = __mlx4_en_setup_tc,
3004 #ifdef CONFIG_RFS_ACCEL
3005 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
3007 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
3008 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
3009 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
3010 .ndo_features_check = mlx4_en_features_check,
3011 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
3012 .ndo_xdp = mlx4_xdp,
3015 struct mlx4_en_bond {
3016 struct work_struct work;
3017 struct mlx4_en_priv *priv;
3019 struct mlx4_port_map port_map;
3022 static void mlx4_en_bond_work(struct work_struct *work)
3024 struct mlx4_en_bond *bond = container_of(work,
3025 struct mlx4_en_bond,
3028 struct mlx4_dev *dev = bond->priv->mdev->dev;
3030 if (bond->is_bonded) {
3031 if (!mlx4_is_bonded(dev)) {
3032 err = mlx4_bond(dev);
3034 en_err(bond->priv, "Fail to bond device\n");
3037 err = mlx4_port_map_set(dev, &bond->port_map);
3039 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
3040 bond->port_map.port1,
3041 bond->port_map.port2,
3044 } else if (mlx4_is_bonded(dev)) {
3045 err = mlx4_unbond(dev);
3047 en_err(bond->priv, "Fail to unbond device\n");
3049 dev_put(bond->priv->dev);
3053 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
3054 u8 v2p_p1, u8 v2p_p2)
3056 struct mlx4_en_bond *bond = NULL;
3058 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
3062 INIT_WORK(&bond->work, mlx4_en_bond_work);
3064 bond->is_bonded = is_bonded;
3065 bond->port_map.port1 = v2p_p1;
3066 bond->port_map.port2 = v2p_p2;
3067 dev_hold(priv->dev);
3068 queue_work(priv->mdev->workqueue, &bond->work);
3072 int mlx4_en_netdev_event(struct notifier_block *this,
3073 unsigned long event, void *ptr)
3075 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3077 struct mlx4_en_dev *mdev;
3078 struct mlx4_dev *dev;
3079 int i, num_eth_ports = 0;
3080 bool do_bond = true;
3081 struct mlx4_en_priv *priv;
3085 if (!net_eq(dev_net(ndev), &init_net))
3088 mdev = container_of(this, struct mlx4_en_dev, nb);
3091 /* Go into this mode only when two network devices set on two ports
3092 * of the same mlx4 device are slaves of the same bonding master
3094 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3096 if (!port && (mdev->pndev[i] == ndev))
3098 mdev->upper[i] = mdev->pndev[i] ?
3099 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3100 /* condition not met: network device is a slave */
3101 if (!mdev->upper[i])
3103 if (num_eth_ports < 2)
3105 /* condition not met: same master */
3106 if (mdev->upper[i] != mdev->upper[i-1])
3109 /* condition not met: 2 salves */
3110 do_bond = (num_eth_ports == 2) ? do_bond : false;
3112 /* handle only events that come with enough info */
3113 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3116 priv = netdev_priv(ndev);
3118 struct netdev_notifier_bonding_info *notifier_info = ptr;
3119 struct netdev_bonding_info *bonding_info =
3120 ¬ifier_info->bonding_info;
3122 /* required mode 1, 2 or 4 */
3123 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3124 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3125 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3128 /* require exactly 2 slaves */
3129 if (bonding_info->master.num_slaves != 2)
3134 if (bonding_info->master.bond_mode ==
3135 BOND_MODE_ACTIVEBACKUP) {
3136 /* in active-backup mode virtual ports are
3137 * mapped to the physical port of the active
3139 if (bonding_info->slave.state ==
3140 BOND_STATE_BACKUP) {
3148 } else { /* BOND_STATE_ACTIVE */
3157 } else { /* Active-Active */
3158 /* in active-active mode a virtual port is
3159 * mapped to the native physical port if and only
3160 * if the physical port is up */
3161 __s8 link = bonding_info->slave.link;
3167 if ((link == BOND_LINK_UP) ||
3168 (link == BOND_LINK_FAIL)) {
3173 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3183 mlx4_en_queue_bond_work(priv, do_bond,
3184 v2p_port1, v2p_port2);
3189 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3190 struct mlx4_en_stats_bitmap *stats_bitmap,
3191 u8 rx_ppp, u8 rx_pause,
3192 u8 tx_ppp, u8 tx_pause)
3194 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
3196 if (!mlx4_is_slave(dev) &&
3197 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3198 mutex_lock(&stats_bitmap->mutex);
3199 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3202 bitmap_set(stats_bitmap->bitmap, last_i,
3203 NUM_FLOW_PRIORITY_STATS_RX);
3204 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3206 if (rx_pause && !(rx_ppp))
3207 bitmap_set(stats_bitmap->bitmap, last_i,
3209 last_i += NUM_FLOW_STATS_RX;
3212 bitmap_set(stats_bitmap->bitmap, last_i,
3213 NUM_FLOW_PRIORITY_STATS_TX);
3214 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3216 if (tx_pause && !(tx_ppp))
3217 bitmap_set(stats_bitmap->bitmap, last_i,
3219 last_i += NUM_FLOW_STATS_TX;
3221 mutex_unlock(&stats_bitmap->mutex);
3225 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3226 struct mlx4_en_stats_bitmap *stats_bitmap,
3227 u8 rx_ppp, u8 rx_pause,
3228 u8 tx_ppp, u8 tx_pause)
3232 mutex_init(&stats_bitmap->mutex);
3233 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3235 if (mlx4_is_slave(dev)) {
3236 bitmap_set(stats_bitmap->bitmap, last_i +
3237 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3238 bitmap_set(stats_bitmap->bitmap, last_i +
3239 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3240 bitmap_set(stats_bitmap->bitmap, last_i +
3241 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3242 bitmap_set(stats_bitmap->bitmap, last_i +
3243 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3244 bitmap_set(stats_bitmap->bitmap, last_i +
3245 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3246 bitmap_set(stats_bitmap->bitmap, last_i +
3247 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3249 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3251 last_i += NUM_MAIN_STATS;
3253 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3254 last_i += NUM_PORT_STATS;
3256 if (mlx4_is_master(dev))
3257 bitmap_set(stats_bitmap->bitmap, last_i,
3259 last_i += NUM_PF_STATS;
3261 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3264 last_i += NUM_FLOW_STATS;
3266 if (!mlx4_is_slave(dev))
3267 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3268 last_i += NUM_PKT_STATS;
3270 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3271 last_i += NUM_XDP_STATS;
3274 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3275 struct mlx4_en_port_profile *prof)
3277 struct net_device *dev;
3278 struct mlx4_en_priv *priv;
3282 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3283 MAX_TX_RINGS, MAX_RX_RINGS);
3287 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
3288 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3290 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3291 dev->dev_port = port - 1;
3294 * Initialize driver private data
3297 priv = netdev_priv(dev);
3298 memset(priv, 0, sizeof(struct mlx4_en_priv));
3299 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3300 spin_lock_init(&priv->stats_lock);
3301 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3302 INIT_WORK(&priv->restart_task, mlx4_en_restart);
3303 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3304 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3305 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3306 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3307 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3308 #ifdef CONFIG_RFS_ACCEL
3309 INIT_LIST_HEAD(&priv->filters);
3310 spin_lock_init(&priv->filters_lock);
3315 priv->ddev = &mdev->pdev->dev;
3318 priv->port_up = false;
3319 priv->flags = prof->flags;
3320 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3321 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3322 MLX4_WQE_CTRL_SOLICITED);
3323 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
3324 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3325 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3327 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3328 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3329 if (!priv->tx_ring_num[t])
3332 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3333 MAX_TX_RINGS, GFP_KERNEL);
3334 if (!priv->tx_ring[t]) {
3338 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3339 MAX_TX_RINGS, GFP_KERNEL);
3340 if (!priv->tx_cq[t]) {
3345 priv->rx_ring_num = prof->rx_ring_num;
3346 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3347 priv->cqe_size = mdev->dev->caps.cqe_size;
3348 priv->mac_index = -1;
3349 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3350 #ifdef CONFIG_MLX4_EN_DCB
3351 if (!mlx4_is_slave(priv->mdev->dev)) {
3354 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3355 priv->ets.prio_tc[prio] = prio;
3356 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3359 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3360 DCB_CAP_DCBX_VER_IEEE;
3361 priv->flags |= MLX4_EN_DCB_ENABLED;
3362 priv->cee_config.pfc_state = false;
3364 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
3365 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3367 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3368 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3370 en_info(priv, "enabling only PFC DCB ops\n");
3371 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3376 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3377 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3379 /* Query for default mac and max mtu */
3380 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3382 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3383 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3384 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3386 /* Set default MAC */
3387 dev->addr_len = ETH_ALEN;
3388 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3389 if (!is_valid_ether_addr(dev->dev_addr)) {
3390 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3391 priv->port, dev->dev_addr);
3394 } else if (mlx4_is_slave(priv->mdev->dev) &&
3395 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3396 /* Random MAC was assigned in mlx4_slave_cap
3397 * in mlx4_core module
3399 dev->addr_assign_type |= NET_ADDR_RANDOM;
3400 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3403 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3405 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3406 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3407 err = mlx4_en_alloc_resources(priv);
3411 /* Initialize time stamping config */
3412 priv->hwtstamp_config.flags = 0;
3413 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3414 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3416 /* Allocate page for receive rings */
3417 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3420 en_err(priv, "Failed to allocate page for rx qps\n");
3423 priv->allocated = 1;
3426 * Initialize netdev entry points
3428 if (mlx4_is_master(priv->mdev->dev))
3429 dev->netdev_ops = &mlx4_netdev_ops_master;
3431 dev->netdev_ops = &mlx4_netdev_ops;
3432 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3433 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
3434 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3436 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3439 * Set driver features
3441 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3442 if (mdev->LSO_support)
3443 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3445 dev->vlan_features = dev->hw_features;
3447 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3448 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3449 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3450 NETIF_F_HW_VLAN_CTAG_FILTER;
3451 dev->hw_features |= NETIF_F_LOOPBACK |
3452 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3454 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3455 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3456 NETIF_F_HW_VLAN_STAG_FILTER;
3457 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3460 if (mlx4_is_slave(mdev->dev)) {
3461 bool vlan_offload_disabled;
3464 err = get_phv_bit(mdev->dev, port, &phv);
3466 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3467 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3469 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3470 &vlan_offload_disabled);
3471 if (!err && vlan_offload_disabled) {
3472 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3473 NETIF_F_HW_VLAN_CTAG_RX |
3474 NETIF_F_HW_VLAN_STAG_TX |
3475 NETIF_F_HW_VLAN_STAG_RX);
3476 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3477 NETIF_F_HW_VLAN_CTAG_RX |
3478 NETIF_F_HW_VLAN_STAG_TX |
3479 NETIF_F_HW_VLAN_STAG_RX);
3482 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3483 !(mdev->dev->caps.flags2 &
3484 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3485 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3488 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3489 dev->hw_features |= NETIF_F_RXFCS;
3491 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3492 dev->hw_features |= NETIF_F_RXALL;
3494 if (mdev->dev->caps.steering_mode ==
3495 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3496 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3497 dev->hw_features |= NETIF_F_NTUPLE;
3499 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3500 dev->priv_flags |= IFF_UNICAST_FLT;
3502 /* Setting a default hash function value */
3503 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3504 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3505 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3506 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3509 "No RSS hash capabilities exposed, using Toeplitz\n");
3510 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3513 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3514 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3515 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3516 NETIF_F_GSO_PARTIAL;
3517 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3518 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3519 NETIF_F_GSO_PARTIAL;
3520 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3523 /* MTU range: 68 - hw-specific max */
3524 dev->min_mtu = ETH_MIN_MTU;
3525 dev->max_mtu = priv->max_mtu;
3527 mdev->pndev[port] = dev;
3528 mdev->upper[port] = NULL;
3530 netif_carrier_off(dev);
3531 mlx4_en_set_default_moderation(priv);
3533 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
3534 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3536 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3538 /* Configure port */
3539 mlx4_en_calc_rx_buf(dev);
3540 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3541 priv->rx_skb_size + ETH_FCS_LEN,
3542 prof->tx_pause, prof->tx_ppp,
3543 prof->rx_pause, prof->rx_ppp);
3545 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3550 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3551 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3553 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3560 en_warn(priv, "Initializing port\n");
3561 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3563 en_err(priv, "Failed Initializing port\n");
3566 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3568 /* Initialize time stamp mechanism */
3569 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3570 mlx4_en_init_timestamp(mdev);
3572 queue_delayed_work(mdev->workqueue, &priv->service_task,
3573 SERVICE_TASK_DELAY);
3575 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3576 mdev->profile.prof[priv->port].rx_ppp,
3577 mdev->profile.prof[priv->port].rx_pause,
3578 mdev->profile.prof[priv->port].tx_ppp,
3579 mdev->profile.prof[priv->port].tx_pause);
3581 err = register_netdev(dev);
3583 en_err(priv, "Netdev registration failed for port %d\n", port);
3587 priv->registered = 1;
3588 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3594 mlx4_en_destroy_netdev(dev);
3598 int mlx4_en_reset_config(struct net_device *dev,
3599 struct hwtstamp_config ts_config,
3600 netdev_features_t features)
3602 struct mlx4_en_priv *priv = netdev_priv(dev);
3603 struct mlx4_en_dev *mdev = priv->mdev;
3604 struct mlx4_en_port_profile new_prof;
3605 struct mlx4_en_priv *tmp;
3609 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3610 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3611 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3612 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3613 return 0; /* Nothing to change */
3615 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3616 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3617 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3618 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3622 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3626 mutex_lock(&mdev->state_lock);
3628 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3629 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3631 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3635 if (priv->port_up) {
3637 mlx4_en_stop_port(dev, 1);
3640 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3641 ts_config.rx_filter,
3642 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3644 mlx4_en_safe_replace_resources(priv, tmp);
3646 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3647 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3648 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3650 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3651 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3652 /* RX time-stamping is OFF, update the RX vlan offload
3653 * to the latest wanted state
3655 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3656 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3658 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3661 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3662 if (features & NETIF_F_RXFCS)
3663 dev->features |= NETIF_F_RXFCS;
3665 dev->features &= ~NETIF_F_RXFCS;
3668 /* RX vlan offload and RX time-stamping can't co-exist !
3669 * Regardless of the caller's choice,
3670 * Turn Off RX vlan offload in case of time-stamping is ON
3672 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3673 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3674 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3675 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3679 err = mlx4_en_start_port(dev);
3681 en_err(priv, "Failed starting port\n");
3685 err = mlx4_en_moderation_update(priv);
3687 mutex_unlock(&mdev->state_lock);
3690 netdev_features_change(dev);