2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
39 MLX5_LAG_FLAG_BONDED = 1 << 0,
43 struct mlx5_core_dev *dev;
44 struct net_device *netdev;
47 /* Used for collection of netdev event info. */
49 enum netdev_lag_tx_type tx_type;
50 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
54 /* LAG data of a ConnectX card.
55 * It serves both its phys functions.
59 u8 v2p_map[MLX5_MAX_PORTS];
60 struct lag_func pf[MLX5_MAX_PORTS];
61 struct lag_tracker tracker;
62 struct delayed_work bond_work;
63 struct notifier_block nb;
66 /* General purpose, use for short periods of time.
67 * Beware of lock dependencies (preferably, no locks should be acquired
70 static DEFINE_MUTEX(lag_mutex);
72 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
75 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
76 u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
77 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
79 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
81 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
82 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
84 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
87 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
90 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
91 u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
92 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
94 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
95 MLX5_SET(modify_lag_in, in, field_select, 0x1);
97 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
98 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
100 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
103 static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
105 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
106 u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
108 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
110 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
113 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
115 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
116 u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
118 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
120 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
122 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
124 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
126 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
127 u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
129 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
131 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
133 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
135 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
137 return dev->priv.lag;
140 static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
141 struct net_device *ndev)
145 for (i = 0; i < MLX5_MAX_PORTS; i++)
146 if (ldev->pf[i].netdev == ndev)
152 static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
154 return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
157 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
158 u8 *port1, u8 *port2)
162 if (!tracker->netdev_state[0].tx_enabled ||
163 !tracker->netdev_state[0].link_up) {
168 if (!tracker->netdev_state[1].tx_enabled ||
169 !tracker->netdev_state[1].link_up)
173 static void mlx5_activate_lag(struct mlx5_lag *ldev,
174 struct lag_tracker *tracker)
176 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
179 ldev->flags |= MLX5_LAG_FLAG_BONDED;
181 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
184 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
187 "Failed to create LAG (%d)\n",
191 static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
193 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
196 ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
198 err = mlx5_cmd_destroy_lag(dev0);
201 "Failed to destroy LAG (%d)\n",
205 static void mlx5_do_bond(struct mlx5_lag *ldev)
207 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
208 struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
209 struct lag_tracker tracker;
210 u8 v2p_port1, v2p_port2;
216 mutex_lock(&lag_mutex);
217 tracker = ldev->tracker;
218 mutex_unlock(&lag_mutex);
220 if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
221 if (mlx5_sriov_is_enabled(dev0) ||
222 mlx5_sriov_is_enabled(dev1)) {
223 mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
227 for (i = 0; i < MLX5_MAX_PORTS; i++)
228 mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
229 MLX5_INTERFACE_PROTOCOL_IB);
231 mlx5_activate_lag(ldev, &tracker);
233 mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
234 mlx5_nic_vport_enable_roce(dev1);
235 } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
236 mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
239 if ((v2p_port1 != ldev->v2p_map[0]) ||
240 (v2p_port2 != ldev->v2p_map[1])) {
241 ldev->v2p_map[0] = v2p_port1;
242 ldev->v2p_map[1] = v2p_port2;
244 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
247 "Failed to modify LAG (%d)\n",
250 } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
251 mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
252 mlx5_nic_vport_disable_roce(dev1);
254 mlx5_deactivate_lag(ldev);
256 for (i = 0; i < MLX5_MAX_PORTS; i++)
258 mlx5_add_dev_by_protocol(ldev->pf[i].dev,
259 MLX5_INTERFACE_PROTOCOL_IB);
263 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
265 schedule_delayed_work(&ldev->bond_work, delay);
268 static void mlx5_do_bond_work(struct work_struct *work)
270 struct delayed_work *delayed_work = to_delayed_work(work);
271 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
275 status = mlx5_dev_list_trylock();
278 mlx5_queue_bond_work(ldev, HZ);
283 mlx5_dev_list_unlock();
286 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
287 struct lag_tracker *tracker,
288 struct net_device *ndev,
289 struct netdev_notifier_changeupper_info *info)
291 struct net_device *upper = info->upper_dev, *ndev_tmp;
292 struct netdev_lag_upper_info *lag_upper_info = NULL;
298 if (!netif_is_lag_master(upper))
302 lag_upper_info = info->upper_info;
304 /* The event may still be of interest if the slave does not belong to
305 * us, but is enslaved to a master which has one or more of our netdevs
306 * as slaves (e.g., if a new slave is added to a master that bonds two
307 * of our netdevs, we should unbond).
310 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
311 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
313 bond_status |= (1 << idx);
319 /* None of this lagdev's netdevs are slaves of this master. */
320 if (!(bond_status & 0x3))
324 tracker->tx_type = lag_upper_info->tx_type;
326 /* Determine bonding status:
327 * A device is considered bonded if both its physical ports are slaves
328 * of the same lag master, and only them.
329 * Lag mode must be activebackup or hash.
331 is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
332 (bond_status == 0x3) &&
333 ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
334 (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
336 if (tracker->is_bonded != is_bonded) {
337 tracker->is_bonded = is_bonded;
344 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
345 struct lag_tracker *tracker,
346 struct net_device *ndev,
347 struct netdev_notifier_changelowerstate_info *info)
349 struct netdev_lag_lower_state_info *lag_lower_info;
352 if (!netif_is_lag_port(ndev))
355 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
359 /* This information is used to determine virtual to physical
362 lag_lower_info = info->lower_state_info;
366 tracker->netdev_state[idx] = *lag_lower_info;
371 static int mlx5_lag_netdev_event(struct notifier_block *this,
372 unsigned long event, void *ptr)
374 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
375 struct lag_tracker tracker;
376 struct mlx5_lag *ldev;
379 if (!net_eq(dev_net(ndev), &init_net))
382 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
385 ldev = container_of(this, struct mlx5_lag, nb);
386 tracker = ldev->tracker;
389 case NETDEV_CHANGEUPPER:
390 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
393 case NETDEV_CHANGELOWERSTATE:
394 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
399 mutex_lock(&lag_mutex);
400 ldev->tracker = tracker;
401 mutex_unlock(&lag_mutex);
404 mlx5_queue_bond_work(ldev, 0);
409 static struct mlx5_lag *mlx5_lag_dev_alloc(void)
411 struct mlx5_lag *ldev;
413 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
417 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
422 static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
427 static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
428 struct mlx5_core_dev *dev,
429 struct net_device *netdev)
431 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
433 if (fn >= MLX5_MAX_PORTS)
436 mutex_lock(&lag_mutex);
437 ldev->pf[fn].dev = dev;
438 ldev->pf[fn].netdev = netdev;
439 ldev->tracker.netdev_state[fn].link_up = 0;
440 ldev->tracker.netdev_state[fn].tx_enabled = 0;
442 dev->priv.lag = ldev;
443 mutex_unlock(&lag_mutex);
446 static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
447 struct mlx5_core_dev *dev)
451 for (i = 0; i < MLX5_MAX_PORTS; i++)
452 if (ldev->pf[i].dev == dev)
455 if (i == MLX5_MAX_PORTS)
458 mutex_lock(&lag_mutex);
459 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
461 dev->priv.lag = NULL;
462 mutex_unlock(&lag_mutex);
466 /* Must be called with intf_mutex held */
467 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
469 struct mlx5_lag *ldev = NULL;
470 struct mlx5_core_dev *tmp_dev;
472 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
473 !MLX5_CAP_GEN(dev, lag_master) ||
474 (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
477 tmp_dev = mlx5_get_next_phys_dev(dev);
479 ldev = tmp_dev->priv.lag;
482 ldev = mlx5_lag_dev_alloc();
484 mlx5_core_err(dev, "Failed to alloc lag dev\n");
489 mlx5_lag_dev_add_pf(ldev, dev, netdev);
491 if (!ldev->nb.notifier_call) {
492 ldev->nb.notifier_call = mlx5_lag_netdev_event;
493 if (register_netdevice_notifier(&ldev->nb)) {
494 ldev->nb.notifier_call = NULL;
495 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
500 /* Must be called with intf_mutex held */
501 void mlx5_lag_remove(struct mlx5_core_dev *dev)
503 struct mlx5_lag *ldev;
506 ldev = mlx5_lag_dev_get(dev);
510 if (mlx5_lag_is_bonded(ldev))
511 mlx5_deactivate_lag(ldev);
513 mlx5_lag_dev_remove_pf(ldev, dev);
515 for (i = 0; i < MLX5_MAX_PORTS; i++)
519 if (i == MLX5_MAX_PORTS) {
520 if (ldev->nb.notifier_call)
521 unregister_netdevice_notifier(&ldev->nb);
522 cancel_delayed_work_sync(&ldev->bond_work);
523 mlx5_lag_dev_free(ldev);
527 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
529 struct mlx5_lag *ldev;
532 mutex_lock(&lag_mutex);
533 ldev = mlx5_lag_dev_get(dev);
534 res = ldev && mlx5_lag_is_bonded(ldev);
535 mutex_unlock(&lag_mutex);
539 EXPORT_SYMBOL(mlx5_lag_is_active);
541 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
543 struct net_device *ndev = NULL;
544 struct mlx5_lag *ldev;
546 mutex_lock(&lag_mutex);
547 ldev = mlx5_lag_dev_get(dev);
549 if (!(ldev && mlx5_lag_is_bonded(ldev)))
552 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
553 ndev = ldev->tracker.netdev_state[0].tx_enabled ?
554 ldev->pf[0].netdev : ldev->pf[1].netdev;
556 ndev = ldev->pf[0].netdev;
562 mutex_unlock(&lag_mutex);
566 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
568 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
570 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
572 struct mlx5_lag *ldev;
574 if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
577 ldev = mlx5_lag_dev_get(dev);
578 if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
581 /* If bonded, we do not add an IB device for PF1. */