GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / infiniband / core / roce_gid_mgmt.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 static struct workqueue_struct *gid_cache_wq;
46
47 static struct workqueue_struct *gid_cache_wq;
48
49 enum gid_op_type {
50         GID_DEL = 0,
51         GID_ADD
52 };
53
54 struct update_gid_event_work {
55         struct work_struct work;
56         union ib_gid       gid;
57         struct ib_gid_attr gid_attr;
58         enum gid_op_type gid_op;
59 };
60
61 #define ROCE_NETDEV_CALLBACK_SZ         3
62 struct netdev_event_work_cmd {
63         roce_netdev_callback    cb;
64         roce_netdev_filter      filter;
65         struct net_device       *ndev;
66         struct net_device       *filter_ndev;
67 };
68
69 struct netdev_event_work {
70         struct work_struct              work;
71         struct netdev_event_work_cmd    cmds[ROCE_NETDEV_CALLBACK_SZ];
72 };
73
74 static const struct {
75         bool (*is_supported)(const struct ib_device *device, u8 port_num);
76         enum ib_gid_type gid_type;
77 } PORT_CAP_TO_GID_TYPE[] = {
78         {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
79         {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
80 };
81
82 #define CAP_TO_GID_TABLE_SIZE   ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
83
84 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
85 {
86         int i;
87         unsigned int ret_flags = 0;
88
89         if (!rdma_protocol_roce(ib_dev, port))
90                 return 1UL << IB_GID_TYPE_IB;
91
92         for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
93                 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
94                         ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
95
96         return ret_flags;
97 }
98 EXPORT_SYMBOL(roce_gid_type_mask_support);
99
100 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
101                        u8 port, union ib_gid *gid,
102                        struct ib_gid_attr *gid_attr)
103 {
104         int i;
105         unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
106
107         for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
108                 if ((1UL << i) & gid_type_mask) {
109                         gid_attr->gid_type = i;
110                         switch (gid_op) {
111                         case GID_ADD:
112                                 ib_cache_gid_add(ib_dev, port,
113                                                  gid, gid_attr);
114                                 break;
115                         case GID_DEL:
116                                 ib_cache_gid_del(ib_dev, port,
117                                                  gid, gid_attr);
118                                 break;
119                         }
120                 }
121         }
122 }
123
124 enum bonding_slave_state {
125         BONDING_SLAVE_STATE_ACTIVE      = 1UL << 0,
126         BONDING_SLAVE_STATE_INACTIVE    = 1UL << 1,
127         /* No primary slave or the device isn't a slave in bonding */
128         BONDING_SLAVE_STATE_NA          = 1UL << 2,
129 };
130
131 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
132                                                                    struct net_device *upper)
133 {
134         if (upper && netif_is_bond_master(upper)) {
135                 struct net_device *pdev =
136                         bond_option_active_slave_get_rcu(netdev_priv(upper));
137
138                 if (pdev)
139                         return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
140                                 BONDING_SLAVE_STATE_INACTIVE;
141         }
142
143         return BONDING_SLAVE_STATE_NA;
144 }
145
146 #define REQUIRED_BOND_STATES            (BONDING_SLAVE_STATE_ACTIVE |   \
147                                          BONDING_SLAVE_STATE_NA)
148 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
149                                  struct net_device *rdma_ndev, void *cookie)
150 {
151         struct net_device *real_dev;
152         int res;
153
154         if (!rdma_ndev)
155                 return 0;
156
157         rcu_read_lock();
158         real_dev = rdma_vlan_dev_real_dev(cookie);
159         if (!real_dev)
160                 real_dev = cookie;
161
162         res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
163                (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
164                 REQUIRED_BOND_STATES)) ||
165                real_dev == rdma_ndev);
166
167         rcu_read_unlock();
168         return res;
169 }
170
171 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
172                                       struct net_device *rdma_ndev, void *cookie)
173 {
174         struct net_device *master_dev;
175         int res;
176
177         if (!rdma_ndev)
178                 return 0;
179
180         rcu_read_lock();
181         master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182         res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183                 BONDING_SLAVE_STATE_INACTIVE;
184         rcu_read_unlock();
185
186         return res;
187 }
188
189 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
190                            struct net_device *rdma_ndev, void *cookie)
191 {
192         return 1;
193 }
194
195 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
196                                struct net_device *rdma_ndev, void *cookie)
197 {
198         int res;
199
200         if (!rdma_ndev)
201                 return 0;
202
203         if (rdma_ndev == cookie)
204                 return 1;
205
206         rcu_read_lock();
207         res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
208         rcu_read_unlock();
209
210         return res;
211 }
212
213 static void update_gid_ip(enum gid_op_type gid_op,
214                           struct ib_device *ib_dev,
215                           u8 port, struct net_device *ndev,
216                           struct sockaddr *addr)
217 {
218         union ib_gid gid;
219         struct ib_gid_attr gid_attr;
220
221         rdma_ip2gid(addr, &gid);
222         memset(&gid_attr, 0, sizeof(gid_attr));
223         gid_attr.ndev = ndev;
224
225         update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
226 }
227
228 static void enum_netdev_default_gids(struct ib_device *ib_dev,
229                                      u8 port, struct net_device *event_ndev,
230                                      struct net_device *rdma_ndev)
231 {
232         unsigned long gid_type_mask;
233
234         rcu_read_lock();
235         if (!rdma_ndev ||
236             ((rdma_ndev != event_ndev &&
237               !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
238              is_eth_active_slave_of_bonding_rcu(rdma_ndev,
239                                                 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
240              BONDING_SLAVE_STATE_INACTIVE)) {
241                 rcu_read_unlock();
242                 return;
243         }
244         rcu_read_unlock();
245
246         gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
247
248         ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
249                                      IB_CACHE_GID_DEFAULT_MODE_SET);
250 }
251
252 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
253                                             u8 port,
254                                             struct net_device *event_ndev,
255                                             struct net_device *rdma_ndev)
256 {
257         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
258
259         if (!rdma_ndev)
260                 return;
261
262         if (!real_dev)
263                 real_dev = event_ndev;
264
265         rcu_read_lock();
266
267         if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
268             is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
269             BONDING_SLAVE_STATE_INACTIVE) {
270                 unsigned long gid_type_mask;
271
272                 rcu_read_unlock();
273
274                 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
275
276                 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
277                                              gid_type_mask,
278                                              IB_CACHE_GID_DEFAULT_MODE_DELETE);
279         } else {
280                 rcu_read_unlock();
281         }
282 }
283
284 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
285                                  u8 port, struct net_device *ndev)
286 {
287         struct in_device *in_dev;
288         struct sin_list {
289                 struct list_head        list;
290                 struct sockaddr_in      ip;
291         };
292         struct sin_list *sin_iter;
293         struct sin_list *sin_temp;
294
295         LIST_HEAD(sin_list);
296         if (ndev->reg_state >= NETREG_UNREGISTERING)
297                 return;
298
299         rcu_read_lock();
300         in_dev = __in_dev_get_rcu(ndev);
301         if (!in_dev) {
302                 rcu_read_unlock();
303                 return;
304         }
305
306         for_ifa(in_dev) {
307                 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
308
309                 if (!entry)
310                         continue;
311
312                 entry->ip.sin_family = AF_INET;
313                 entry->ip.sin_addr.s_addr = ifa->ifa_address;
314                 list_add_tail(&entry->list, &sin_list);
315         }
316         endfor_ifa(in_dev);
317         rcu_read_unlock();
318
319         list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
320                 update_gid_ip(GID_ADD, ib_dev, port, ndev,
321                               (struct sockaddr *)&sin_iter->ip);
322                 list_del(&sin_iter->list);
323                 kfree(sin_iter);
324         }
325 }
326
327 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
328                                  u8 port, struct net_device *ndev)
329 {
330         struct inet6_ifaddr *ifp;
331         struct inet6_dev *in6_dev;
332         struct sin6_list {
333                 struct list_head        list;
334                 struct sockaddr_in6     sin6;
335         };
336         struct sin6_list *sin6_iter;
337         struct sin6_list *sin6_temp;
338         struct ib_gid_attr gid_attr = {.ndev = ndev};
339         LIST_HEAD(sin6_list);
340
341         if (ndev->reg_state >= NETREG_UNREGISTERING)
342                 return;
343
344         in6_dev = in6_dev_get(ndev);
345         if (!in6_dev)
346                 return;
347
348         read_lock_bh(&in6_dev->lock);
349         list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
350                 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
351
352                 if (!entry)
353                         continue;
354
355                 entry->sin6.sin6_family = AF_INET6;
356                 entry->sin6.sin6_addr = ifp->addr;
357                 list_add_tail(&entry->list, &sin6_list);
358         }
359         read_unlock_bh(&in6_dev->lock);
360
361         in6_dev_put(in6_dev);
362
363         list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
364                 union ib_gid    gid;
365
366                 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
367                 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
368                 list_del(&sin6_iter->list);
369                 kfree(sin6_iter);
370         }
371 }
372
373 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
374                             struct net_device *ndev)
375 {
376         enum_netdev_ipv4_ips(ib_dev, port, ndev);
377         if (IS_ENABLED(CONFIG_IPV6))
378                 enum_netdev_ipv6_ips(ib_dev, port, ndev);
379 }
380
381 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
382                            struct net_device *rdma_ndev, void *cookie)
383 {
384         enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
385         _add_netdev_ips(ib_dev, port, cookie);
386 }
387
388 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
389                            struct net_device *rdma_ndev, void *cookie)
390 {
391         ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
392 }
393
394 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
395                                     u8 port,
396                                     struct net_device *rdma_ndev,
397                                     void *cookie)
398 {
399         struct net *net;
400         struct net_device *ndev;
401
402         /* Lock the rtnl to make sure the netdevs does not move under
403          * our feet
404          */
405         rtnl_lock();
406         for_each_net(net)
407                 for_each_netdev(net, ndev)
408                         if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
409                                 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
410         rtnl_unlock();
411 }
412
413 /* This function will rescan all of the network devices in the system
414  * and add their gids, as needed, to the relevant RoCE devices. */
415 int roce_rescan_device(struct ib_device *ib_dev)
416 {
417         ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
418                             enum_all_gids_of_dev_cb, NULL);
419
420         return 0;
421 }
422
423 static void callback_for_addr_gid_device_scan(struct ib_device *device,
424                                               u8 port,
425                                               struct net_device *rdma_ndev,
426                                               void *cookie)
427 {
428         struct update_gid_event_work *parsed = cookie;
429
430         return update_gid(parsed->gid_op, device,
431                           port, &parsed->gid,
432                           &parsed->gid_attr);
433 }
434
435 struct upper_list {
436         struct list_head list;
437         struct net_device *upper;
438 };
439
440 static int netdev_upper_walk(struct net_device *upper, void *data)
441 {
442         struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
443         struct list_head *upper_list = data;
444
445         if (!entry)
446                 return 0;
447
448         list_add_tail(&entry->list, upper_list);
449         dev_hold(upper);
450         entry->upper = upper;
451
452         return 0;
453 }
454
455 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
456                                 void *cookie,
457                                 void (*handle_netdev)(struct ib_device *ib_dev,
458                                                       u8 port,
459                                                       struct net_device *ndev))
460 {
461         struct net_device *ndev = cookie;
462         struct upper_list *upper_iter;
463         struct upper_list *upper_temp;
464         LIST_HEAD(upper_list);
465
466         rcu_read_lock();
467         netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
468         rcu_read_unlock();
469
470         handle_netdev(ib_dev, port, ndev);
471         list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
472                                  list) {
473                 handle_netdev(ib_dev, port, upper_iter->upper);
474                 dev_put(upper_iter->upper);
475                 list_del(&upper_iter->list);
476                 kfree(upper_iter);
477         }
478 }
479
480 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
481                                       struct net_device *event_ndev)
482 {
483         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
484 }
485
486 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
487                                  struct net_device *rdma_ndev, void *cookie)
488 {
489         handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
490 }
491
492 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
493                                  struct net_device *rdma_ndev, void *cookie)
494 {
495         handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
496 }
497
498 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
499                                         struct net_device *rdma_ndev,
500                                         void *cookie)
501 {
502         struct net_device *master_ndev;
503
504         rcu_read_lock();
505         master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
506         if (master_ndev)
507                 dev_hold(master_ndev);
508         rcu_read_unlock();
509
510         if (master_ndev) {
511                 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
512                                                 rdma_ndev);
513                 dev_put(master_ndev);
514         }
515 }
516
517 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
518                                    struct net_device *rdma_ndev, void *cookie)
519 {
520         bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
521 }
522
523 /* The following functions operate on all IB devices. netdevice_event and
524  * addr_event execute ib_enum_all_roce_netdevs through a work.
525  * ib_enum_all_roce_netdevs iterates through all IB devices.
526  */
527
528 static void netdevice_event_work_handler(struct work_struct *_work)
529 {
530         struct netdev_event_work *work =
531                 container_of(_work, struct netdev_event_work, work);
532         unsigned int i;
533
534         for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
535                 ib_enum_all_roce_netdevs(work->cmds[i].filter,
536                                          work->cmds[i].filter_ndev,
537                                          work->cmds[i].cb,
538                                          work->cmds[i].ndev);
539                 dev_put(work->cmds[i].ndev);
540                 dev_put(work->cmds[i].filter_ndev);
541         }
542
543         kfree(work);
544 }
545
546 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
547                                 struct net_device *ndev)
548 {
549         unsigned int i;
550         struct netdev_event_work *ndev_work =
551                 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
552
553         if (!ndev_work)
554                 return NOTIFY_DONE;
555
556         memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
557         for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
558                 if (!ndev_work->cmds[i].ndev)
559                         ndev_work->cmds[i].ndev = ndev;
560                 if (!ndev_work->cmds[i].filter_ndev)
561                         ndev_work->cmds[i].filter_ndev = ndev;
562                 dev_hold(ndev_work->cmds[i].ndev);
563                 dev_hold(ndev_work->cmds[i].filter_ndev);
564         }
565         INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
566
567         queue_work(gid_cache_wq, &ndev_work->work);
568
569         return NOTIFY_DONE;
570 }
571
572 static const struct netdev_event_work_cmd add_cmd = {
573         .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
574 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
575         .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
576
577 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
578                                         struct netdev_event_work_cmd *cmds)
579 {
580         static const struct netdev_event_work_cmd upper_ips_del_cmd = {
581                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
582         static const struct netdev_event_work_cmd bonding_default_del_cmd = {
583                 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
584
585         if (changeupper_info->linking == false) {
586                 cmds[0] = upper_ips_del_cmd;
587                 cmds[0].ndev = changeupper_info->upper_dev;
588                 cmds[1] = add_cmd;
589         } else {
590                 cmds[0] = bonding_default_del_cmd;
591                 cmds[0].ndev = changeupper_info->upper_dev;
592                 cmds[1] = add_cmd_upper_ips;
593                 cmds[1].ndev = changeupper_info->upper_dev;
594                 cmds[1].filter_ndev = changeupper_info->upper_dev;
595         }
596 }
597
598 static int netdevice_event(struct notifier_block *this, unsigned long event,
599                            void *ptr)
600 {
601         static const struct netdev_event_work_cmd del_cmd = {
602                 .cb = del_netdev_ips, .filter = pass_all_filter};
603         static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
604                 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
605         static const struct netdev_event_work_cmd default_del_cmd = {
606                 .cb = del_netdev_default_ips, .filter = pass_all_filter};
607         static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
608                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
609         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
610         struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
611
612         if (ndev->type != ARPHRD_ETHER)
613                 return NOTIFY_DONE;
614
615         switch (event) {
616         case NETDEV_REGISTER:
617         case NETDEV_UP:
618                 cmds[0] = bonding_default_del_cmd_join;
619                 cmds[1] = add_cmd;
620                 break;
621
622         case NETDEV_UNREGISTER:
623                 if (ndev->reg_state < NETREG_UNREGISTERED)
624                         cmds[0] = del_cmd;
625                 else
626                         return NOTIFY_DONE;
627                 break;
628
629         case NETDEV_CHANGEADDR:
630                 cmds[0] = default_del_cmd;
631                 cmds[1] = add_cmd;
632                 break;
633
634         case NETDEV_CHANGEUPPER:
635                 netdevice_event_changeupper(
636                         container_of(ptr, struct netdev_notifier_changeupper_info, info),
637                         cmds);
638                 break;
639
640         case NETDEV_BONDING_FAILOVER:
641                 cmds[0] = bonding_event_ips_del_cmd;
642                 cmds[1] = bonding_default_del_cmd_join;
643                 cmds[2] = add_cmd_upper_ips;
644                 break;
645
646         default:
647                 return NOTIFY_DONE;
648         }
649
650         return netdevice_queue_work(cmds, ndev);
651 }
652
653 static void update_gid_event_work_handler(struct work_struct *_work)
654 {
655         struct update_gid_event_work *work =
656                 container_of(_work, struct update_gid_event_work, work);
657
658         ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
659                                  callback_for_addr_gid_device_scan, work);
660
661         dev_put(work->gid_attr.ndev);
662         kfree(work);
663 }
664
665 static int addr_event(struct notifier_block *this, unsigned long event,
666                       struct sockaddr *sa, struct net_device *ndev)
667 {
668         struct update_gid_event_work *work;
669         enum gid_op_type gid_op;
670
671         if (ndev->type != ARPHRD_ETHER)
672                 return NOTIFY_DONE;
673
674         switch (event) {
675         case NETDEV_UP:
676                 gid_op = GID_ADD;
677                 break;
678
679         case NETDEV_DOWN:
680                 gid_op = GID_DEL;
681                 break;
682
683         default:
684                 return NOTIFY_DONE;
685         }
686
687         work = kmalloc(sizeof(*work), GFP_ATOMIC);
688         if (!work)
689                 return NOTIFY_DONE;
690
691         INIT_WORK(&work->work, update_gid_event_work_handler);
692
693         rdma_ip2gid(sa, &work->gid);
694         work->gid_op = gid_op;
695
696         memset(&work->gid_attr, 0, sizeof(work->gid_attr));
697         dev_hold(ndev);
698         work->gid_attr.ndev   = ndev;
699
700         queue_work(gid_cache_wq, &work->work);
701
702         return NOTIFY_DONE;
703 }
704
705 static int inetaddr_event(struct notifier_block *this, unsigned long event,
706                           void *ptr)
707 {
708         struct sockaddr_in      in;
709         struct net_device       *ndev;
710         struct in_ifaddr        *ifa = ptr;
711
712         in.sin_family = AF_INET;
713         in.sin_addr.s_addr = ifa->ifa_address;
714         ndev = ifa->ifa_dev->dev;
715
716         return addr_event(this, event, (struct sockaddr *)&in, ndev);
717 }
718
719 static int inet6addr_event(struct notifier_block *this, unsigned long event,
720                            void *ptr)
721 {
722         struct sockaddr_in6     in6;
723         struct net_device       *ndev;
724         struct inet6_ifaddr     *ifa6 = ptr;
725
726         in6.sin6_family = AF_INET6;
727         in6.sin6_addr = ifa6->addr;
728         ndev = ifa6->idev->dev;
729
730         return addr_event(this, event, (struct sockaddr *)&in6, ndev);
731 }
732
733 static struct notifier_block nb_netdevice = {
734         .notifier_call = netdevice_event
735 };
736
737 static struct notifier_block nb_inetaddr = {
738         .notifier_call = inetaddr_event
739 };
740
741 static struct notifier_block nb_inet6addr = {
742         .notifier_call = inet6addr_event
743 };
744
745 int __init roce_gid_mgmt_init(void)
746 {
747         gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
748         if (!gid_cache_wq)
749                 return -ENOMEM;
750
751         register_inetaddr_notifier(&nb_inetaddr);
752         if (IS_ENABLED(CONFIG_IPV6))
753                 register_inet6addr_notifier(&nb_inet6addr);
754         /* We relay on the netdevice notifier to enumerate all
755          * existing devices in the system. Register to this notifier
756          * last to make sure we will not miss any IP add/del
757          * callbacks.
758          */
759         register_netdevice_notifier(&nb_netdevice);
760
761         return 0;
762 }
763
764 void __exit roce_gid_mgmt_cleanup(void)
765 {
766         if (IS_ENABLED(CONFIG_IPV6))
767                 unregister_inet6addr_notifier(&nb_inet6addr);
768         unregister_inetaddr_notifier(&nb_inetaddr);
769         unregister_netdevice_notifier(&nb_netdevice);
770         /* Ensure all gid deletion tasks complete before we go down,
771          * to avoid any reference to free'd memory. By the time
772          * ib-core is removed, all physical devices have been removed,
773          * so no issue with remaining hardware contexts.
774          */
775         destroy_workqueue(gid_cache_wq);
776 }