2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "core_priv.h"
36 #include <linux/in6.h>
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
45 static struct workqueue_struct *gid_cache_wq;
52 struct update_gid_event_work {
53 struct work_struct work;
55 struct ib_gid_attr gid_attr;
56 enum gid_op_type gid_op;
59 #define ROCE_NETDEV_CALLBACK_SZ 3
60 struct netdev_event_work_cmd {
61 roce_netdev_callback cb;
62 roce_netdev_filter filter;
63 struct net_device *ndev;
64 struct net_device *filter_ndev;
67 struct netdev_event_work {
68 struct work_struct work;
69 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
73 bool (*is_supported)(const struct ib_device *device, u8 port_num);
74 enum ib_gid_type gid_type;
75 } PORT_CAP_TO_GID_TYPE[] = {
76 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
77 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
80 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
85 unsigned int ret_flags = 0;
87 if (!rdma_protocol_roce(ib_dev, port))
88 return 1UL << IB_GID_TYPE_IB;
90 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
91 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
92 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
96 EXPORT_SYMBOL(roce_gid_type_mask_support);
98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
99 u8 port, union ib_gid *gid,
100 struct ib_gid_attr *gid_attr)
103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
105 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
106 if ((1UL << i) & gid_type_mask) {
107 gid_attr->gid_type = i;
110 ib_cache_gid_add(ib_dev, port,
114 ib_cache_gid_del(ib_dev, port,
122 enum bonding_slave_state {
123 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
124 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
125 /* No primary slave or the device isn't a slave in bonding */
126 BONDING_SLAVE_STATE_NA = 1UL << 2,
129 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
130 struct net_device *upper)
132 if (upper && netif_is_bond_master(upper)) {
133 struct net_device *pdev =
134 bond_option_active_slave_get_rcu(netdev_priv(upper));
137 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
138 BONDING_SLAVE_STATE_INACTIVE;
141 return BONDING_SLAVE_STATE_NA;
144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
145 BONDING_SLAVE_STATE_NA)
147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
148 struct net_device *rdma_ndev, void *cookie)
150 struct net_device *real_dev;
157 real_dev = rdma_vlan_dev_real_dev(cookie);
161 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
162 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
163 REQUIRED_BOND_STATES)) ||
164 real_dev == rdma_ndev);
171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
172 struct net_device *rdma_ndev, void *cookie)
174 struct net_device *master_dev;
181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183 BONDING_SLAVE_STATE_INACTIVE;
189 /** is_ndev_for_default_gid_filter - Check if a given netdevice
190 * can be considered for default GIDs or not.
191 * @ib_dev: IB device to check
192 * @port: Port to consider for adding default GID
193 * @rdma_ndev: rdma netdevice pointer
194 * @cookie_ndev: Netdevice to consider to form a default GID
196 * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
197 * considered for deriving default RoCE GID, returns false otherwise.
200 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
201 struct net_device *rdma_ndev, void *cookie)
203 struct net_device *cookie_ndev = cookie;
212 * When rdma netdevice is used in bonding, bonding master netdevice
213 * should be considered for default GIDs. Therefore, ignore slave rdma
214 * netdevices when bonding is considered.
215 * Additionally when event(cookie) netdevice is bond master device,
216 * make sure that it the upper netdevice of rdma netdevice.
218 res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
219 (netif_is_bond_master(cookie_ndev) &&
220 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
226 static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
227 struct net_device *rdma_ndev, void *cookie)
232 static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
233 struct net_device *rdma_ndev, void *cookie)
240 if (rdma_ndev == cookie)
244 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
251 * is_upper_ndev_bond_master_filter - Check if a given netdevice
252 * is bond master device of netdevice of the the RDMA device of port.
253 * @ib_dev: IB device to check
254 * @port: Port to consider for adding default GID
255 * @rdma_ndev: Pointer to rdma netdevice
256 * @cookie: Netdevice to consider to form a default GID
258 * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
259 * is bond master device and rdma_ndev is its lower netdevice. It might
260 * not have been established as slave device yet.
263 is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
264 struct net_device *rdma_ndev,
267 struct net_device *cookie_ndev = cookie;
274 if (netif_is_bond_master(cookie_ndev) &&
275 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
281 static void update_gid_ip(enum gid_op_type gid_op,
282 struct ib_device *ib_dev,
283 u8 port, struct net_device *ndev,
284 struct sockaddr *addr)
287 struct ib_gid_attr gid_attr;
289 rdma_ip2gid(addr, &gid);
290 memset(&gid_attr, 0, sizeof(gid_attr));
291 gid_attr.ndev = ndev;
293 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
296 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
298 struct net_device *rdma_ndev,
299 struct net_device *event_ndev)
301 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
302 unsigned long gid_type_mask;
308 real_dev = event_ndev;
312 if (((rdma_ndev != event_ndev &&
313 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
314 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
316 BONDING_SLAVE_STATE_INACTIVE)) {
323 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
325 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
327 IB_CACHE_GID_DEFAULT_MODE_DELETE);
330 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
331 u8 port, struct net_device *ndev)
333 struct in_device *in_dev;
335 struct list_head list;
336 struct sockaddr_in ip;
338 struct sin_list *sin_iter;
339 struct sin_list *sin_temp;
342 if (ndev->reg_state >= NETREG_UNREGISTERING)
346 in_dev = __in_dev_get_rcu(ndev);
353 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
358 entry->ip.sin_family = AF_INET;
359 entry->ip.sin_addr.s_addr = ifa->ifa_address;
360 list_add_tail(&entry->list, &sin_list);
365 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
366 update_gid_ip(GID_ADD, ib_dev, port, ndev,
367 (struct sockaddr *)&sin_iter->ip);
368 list_del(&sin_iter->list);
373 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
374 u8 port, struct net_device *ndev)
376 struct inet6_ifaddr *ifp;
377 struct inet6_dev *in6_dev;
379 struct list_head list;
380 struct sockaddr_in6 sin6;
382 struct sin6_list *sin6_iter;
383 struct sin6_list *sin6_temp;
384 struct ib_gid_attr gid_attr = {.ndev = ndev};
385 LIST_HEAD(sin6_list);
387 if (ndev->reg_state >= NETREG_UNREGISTERING)
390 in6_dev = in6_dev_get(ndev);
394 read_lock_bh(&in6_dev->lock);
395 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
396 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
401 entry->sin6.sin6_family = AF_INET6;
402 entry->sin6.sin6_addr = ifp->addr;
403 list_add_tail(&entry->list, &sin6_list);
405 read_unlock_bh(&in6_dev->lock);
407 in6_dev_put(in6_dev);
409 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
412 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
413 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
414 list_del(&sin6_iter->list);
419 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
420 struct net_device *ndev)
422 enum_netdev_ipv4_ips(ib_dev, port, ndev);
423 if (IS_ENABLED(CONFIG_IPV6))
424 enum_netdev_ipv6_ips(ib_dev, port, ndev);
427 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
428 struct net_device *rdma_ndev, void *cookie)
430 _add_netdev_ips(ib_dev, port, cookie);
433 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
434 struct net_device *rdma_ndev, void *cookie)
436 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
440 * del_default_gids - Delete default GIDs of the event/cookie netdevice
441 * @ib_dev: RDMA device pointer
442 * @port: Port of the RDMA device whose GID table to consider
443 * @rdma_ndev: Unused rdma netdevice
444 * @cookie: Pointer to event netdevice
446 * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
448 static void del_default_gids(struct ib_device *ib_dev, u8 port,
449 struct net_device *rdma_ndev, void *cookie)
451 struct net_device *cookie_ndev = cookie;
452 unsigned long gid_type_mask;
454 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
456 ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
457 IB_CACHE_GID_DEFAULT_MODE_DELETE);
460 static void add_default_gids(struct ib_device *ib_dev, u8 port,
461 struct net_device *rdma_ndev, void *cookie)
463 struct net_device *event_ndev = cookie;
464 unsigned long gid_type_mask;
466 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
467 ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
468 IB_CACHE_GID_DEFAULT_MODE_SET);
471 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
473 struct net_device *rdma_ndev,
477 struct net_device *ndev;
479 /* Lock the rtnl to make sure the netdevs does not move under
483 down_read(&net_rwsem);
485 for_each_netdev(net, ndev) {
487 * Filter and add default GIDs of the primary netdevice
488 * when not in bonding mode, or add default GIDs
489 * of bond master device, when in bonding mode.
491 if (is_ndev_for_default_gid_filter(ib_dev, port,
493 add_default_gids(ib_dev, port, rdma_ndev, ndev);
495 if (is_eth_port_of_netdev_filter(ib_dev, port,
497 _add_netdev_ips(ib_dev, port, ndev);
504 * rdma_roce_rescan_device - Rescan all of the network devices in the system
505 * and add their gids, as needed, to the relevant RoCE devices.
507 * @device: the rdma device
509 void rdma_roce_rescan_device(struct ib_device *ib_dev)
511 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
512 enum_all_gids_of_dev_cb, NULL);
514 EXPORT_SYMBOL(rdma_roce_rescan_device);
516 static void callback_for_addr_gid_device_scan(struct ib_device *device,
518 struct net_device *rdma_ndev,
521 struct update_gid_event_work *parsed = cookie;
523 return update_gid(parsed->gid_op, device,
529 struct list_head list;
530 struct net_device *upper;
533 static int netdev_upper_walk(struct net_device *upper, void *data)
535 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
536 struct list_head *upper_list = data;
541 list_add_tail(&entry->list, upper_list);
543 entry->upper = upper;
548 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
550 void (*handle_netdev)(struct ib_device *ib_dev,
552 struct net_device *ndev))
554 struct net_device *ndev = cookie;
555 struct upper_list *upper_iter;
556 struct upper_list *upper_temp;
557 LIST_HEAD(upper_list);
560 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
563 handle_netdev(ib_dev, port, ndev);
564 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
566 handle_netdev(ib_dev, port, upper_iter->upper);
567 dev_put(upper_iter->upper);
568 list_del(&upper_iter->list);
573 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
574 struct net_device *event_ndev)
576 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
579 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
580 struct net_device *rdma_ndev, void *cookie)
582 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
585 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
586 struct net_device *rdma_ndev, void *cookie)
588 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
591 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
592 struct net_device *rdma_ndev,
595 struct net_device *master_ndev;
598 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
600 dev_hold(master_ndev);
604 bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
606 dev_put(master_ndev);
610 /* The following functions operate on all IB devices. netdevice_event and
611 * addr_event execute ib_enum_all_roce_netdevs through a work.
612 * ib_enum_all_roce_netdevs iterates through all IB devices.
615 static void netdevice_event_work_handler(struct work_struct *_work)
617 struct netdev_event_work *work =
618 container_of(_work, struct netdev_event_work, work);
621 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
622 ib_enum_all_roce_netdevs(work->cmds[i].filter,
623 work->cmds[i].filter_ndev,
626 dev_put(work->cmds[i].ndev);
627 dev_put(work->cmds[i].filter_ndev);
633 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
634 struct net_device *ndev)
637 struct netdev_event_work *ndev_work =
638 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
643 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
644 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
645 if (!ndev_work->cmds[i].ndev)
646 ndev_work->cmds[i].ndev = ndev;
647 if (!ndev_work->cmds[i].filter_ndev)
648 ndev_work->cmds[i].filter_ndev = ndev;
649 dev_hold(ndev_work->cmds[i].ndev);
650 dev_hold(ndev_work->cmds[i].filter_ndev);
652 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
654 queue_work(gid_cache_wq, &ndev_work->work);
659 static const struct netdev_event_work_cmd add_cmd = {
660 .cb = add_netdev_ips,
661 .filter = is_eth_port_of_netdev_filter
664 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
665 .cb = add_netdev_upper_ips,
666 .filter = is_eth_port_of_netdev_filter
670 ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
671 struct netdev_event_work_cmd *cmds)
673 static const struct netdev_event_work_cmd
674 upper_ips_del_cmd = {
675 .cb = del_netdev_upper_ips,
676 .filter = upper_device_filter
679 cmds[0] = upper_ips_del_cmd;
680 cmds[0].ndev = changeupper_info->upper_dev;
684 static const struct netdev_event_work_cmd bonding_default_add_cmd = {
685 .cb = add_default_gids,
686 .filter = is_upper_ndev_bond_master_filter
690 ndev_event_link(struct net_device *event_ndev,
691 struct netdev_notifier_changeupper_info *changeupper_info,
692 struct netdev_event_work_cmd *cmds)
694 static const struct netdev_event_work_cmd
695 bonding_default_del_cmd = {
696 .cb = del_default_gids,
697 .filter = is_upper_ndev_bond_master_filter
700 * When a lower netdev is linked to its upper bonding
701 * netdev, delete lower slave netdev's default GIDs.
703 cmds[0] = bonding_default_del_cmd;
704 cmds[0].ndev = event_ndev;
705 cmds[0].filter_ndev = changeupper_info->upper_dev;
707 /* Now add bonding upper device default GIDs */
708 cmds[1] = bonding_default_add_cmd;
709 cmds[1].ndev = changeupper_info->upper_dev;
710 cmds[1].filter_ndev = changeupper_info->upper_dev;
712 /* Now add bonding upper device IP based GIDs */
713 cmds[2] = add_cmd_upper_ips;
714 cmds[2].ndev = changeupper_info->upper_dev;
715 cmds[2].filter_ndev = changeupper_info->upper_dev;
718 static void netdevice_event_changeupper(struct net_device *event_ndev,
719 struct netdev_notifier_changeupper_info *changeupper_info,
720 struct netdev_event_work_cmd *cmds)
722 if (changeupper_info->linking)
723 ndev_event_link(event_ndev, changeupper_info, cmds);
725 ndev_event_unlink(changeupper_info, cmds);
728 static const struct netdev_event_work_cmd add_default_gid_cmd = {
729 .cb = add_default_gids,
730 .filter = is_ndev_for_default_gid_filter,
733 static int netdevice_event(struct notifier_block *this, unsigned long event,
736 static const struct netdev_event_work_cmd del_cmd = {
737 .cb = del_netdev_ips, .filter = pass_all_filter};
738 static const struct netdev_event_work_cmd
739 bonding_default_del_cmd_join = {
740 .cb = del_netdev_default_ips_join,
741 .filter = is_eth_port_inactive_slave_filter
743 static const struct netdev_event_work_cmd
745 .cb = del_netdev_ips,
746 .filter = is_eth_port_of_netdev_filter
748 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
749 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
750 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
751 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
753 if (ndev->type != ARPHRD_ETHER)
757 case NETDEV_REGISTER:
759 cmds[0] = bonding_default_del_cmd_join;
760 cmds[1] = add_default_gid_cmd;
764 case NETDEV_UNREGISTER:
765 if (ndev->reg_state < NETREG_UNREGISTERED)
771 case NETDEV_CHANGEADDR:
772 cmds[0] = netdev_del_cmd;
773 if (ndev->reg_state == NETREG_REGISTERED) {
774 cmds[1] = add_default_gid_cmd;
779 case NETDEV_CHANGEUPPER:
780 netdevice_event_changeupper(ndev,
781 container_of(ptr, struct netdev_notifier_changeupper_info, info),
785 case NETDEV_BONDING_FAILOVER:
786 cmds[0] = bonding_event_ips_del_cmd;
787 /* Add default GIDs of the bond device */
788 cmds[1] = bonding_default_add_cmd;
789 /* Add IP based GIDs of the bond device */
790 cmds[2] = add_cmd_upper_ips;
797 return netdevice_queue_work(cmds, ndev);
800 static void update_gid_event_work_handler(struct work_struct *_work)
802 struct update_gid_event_work *work =
803 container_of(_work, struct update_gid_event_work, work);
805 ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
807 callback_for_addr_gid_device_scan, work);
809 dev_put(work->gid_attr.ndev);
813 static int addr_event(struct notifier_block *this, unsigned long event,
814 struct sockaddr *sa, struct net_device *ndev)
816 struct update_gid_event_work *work;
817 enum gid_op_type gid_op;
819 if (ndev->type != ARPHRD_ETHER)
835 work = kmalloc(sizeof(*work), GFP_ATOMIC);
839 INIT_WORK(&work->work, update_gid_event_work_handler);
841 rdma_ip2gid(sa, &work->gid);
842 work->gid_op = gid_op;
844 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
846 work->gid_attr.ndev = ndev;
848 queue_work(gid_cache_wq, &work->work);
853 static int inetaddr_event(struct notifier_block *this, unsigned long event,
856 struct sockaddr_in in;
857 struct net_device *ndev;
858 struct in_ifaddr *ifa = ptr;
860 in.sin_family = AF_INET;
861 in.sin_addr.s_addr = ifa->ifa_address;
862 ndev = ifa->ifa_dev->dev;
864 return addr_event(this, event, (struct sockaddr *)&in, ndev);
867 static int inet6addr_event(struct notifier_block *this, unsigned long event,
870 struct sockaddr_in6 in6;
871 struct net_device *ndev;
872 struct inet6_ifaddr *ifa6 = ptr;
874 in6.sin6_family = AF_INET6;
875 in6.sin6_addr = ifa6->addr;
876 ndev = ifa6->idev->dev;
878 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
881 static struct notifier_block nb_netdevice = {
882 .notifier_call = netdevice_event
885 static struct notifier_block nb_inetaddr = {
886 .notifier_call = inetaddr_event
889 static struct notifier_block nb_inet6addr = {
890 .notifier_call = inet6addr_event
893 int __init roce_gid_mgmt_init(void)
895 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
899 register_inetaddr_notifier(&nb_inetaddr);
900 if (IS_ENABLED(CONFIG_IPV6))
901 register_inet6addr_notifier(&nb_inet6addr);
902 /* We relay on the netdevice notifier to enumerate all
903 * existing devices in the system. Register to this notifier
904 * last to make sure we will not miss any IP add/del
907 register_netdevice_notifier(&nb_netdevice);
912 void __exit roce_gid_mgmt_cleanup(void)
914 if (IS_ENABLED(CONFIG_IPV6))
915 unregister_inet6addr_notifier(&nb_inet6addr);
916 unregister_inetaddr_notifier(&nb_inetaddr);
917 unregister_netdevice_notifier(&nb_netdevice);
918 /* Ensure all gid deletion tasks complete before we go down,
919 * to avoid any reference to free'd memory. By the time
920 * ib-core is removed, all physical devices have been removed,
921 * so no issue with remaining hardware contexts.
923 destroy_workqueue(gid_cache_wq);