GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / net / ethernet / netronome / nfp / flower / tunnel_conf.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/etherdevice.h>
35 #include <linux/inetdevice.h>
36 #include <net/netevent.h>
37 #include <linux/idr.h>
38 #include <net/dst_metadata.h>
39 #include <net/arp.h>
40
41 #include "cmsg.h"
42 #include "main.h"
43 #include "../nfp_net_repr.h"
44 #include "../nfp_net.h"
45
46 #define NFP_FL_MAX_ROUTES               32
47
48 /**
49  * struct nfp_tun_active_tuns - periodic message of active tunnels
50  * @seq:                sequence number of the message
51  * @count:              number of tunnels report in message
52  * @flags:              options part of the request
53  * @tun_info.ipv4:              dest IPv4 address of active route
54  * @tun_info.egress_port:       port the encapsulated packet egressed
55  * @tun_info.extra:             reserved for future use
56  * @tun_info:           tunnels that have sent traffic in reported period
57  */
58 struct nfp_tun_active_tuns {
59         __be32 seq;
60         __be32 count;
61         __be32 flags;
62         struct route_ip_info {
63                 __be32 ipv4;
64                 __be32 egress_port;
65                 __be32 extra[2];
66         } tun_info[];
67 };
68
69 /**
70  * struct nfp_tun_neigh - neighbour/route entry on the NFP
71  * @dst_ipv4:   destination IPv4 address
72  * @src_ipv4:   source IPv4 address
73  * @dst_addr:   destination MAC address
74  * @src_addr:   source MAC address
75  * @port_id:    NFP port to output packet on - associated with source IPv4
76  */
77 struct nfp_tun_neigh {
78         __be32 dst_ipv4;
79         __be32 src_ipv4;
80         u8 dst_addr[ETH_ALEN];
81         u8 src_addr[ETH_ALEN];
82         __be32 port_id;
83 };
84
85 /**
86  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
87  * @ingress_port:       ingress port of packet that signalled request
88  * @ipv4_addr:          destination ipv4 address for route
89  * @reserved:           reserved for future use
90  */
91 struct nfp_tun_req_route_ipv4 {
92         __be32 ingress_port;
93         __be32 ipv4_addr;
94         __be32 reserved[2];
95 };
96
97 /**
98  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
99  * @ipv4_addr:  destination of route
100  * @list:       list pointer
101  */
102 struct nfp_ipv4_route_entry {
103         __be32 ipv4_addr;
104         struct list_head list;
105 };
106
107 #define NFP_FL_IPV4_ADDRS_MAX        32
108
109 /**
110  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
111  * @count:      number of IPs populated in the array
112  * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
113  */
114 struct nfp_tun_ipv4_addr {
115         __be32 count;
116         __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
117 };
118
119 /**
120  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
121  * @ipv4_addr:  IP address
122  * @ref_count:  number of rules currently using this IP
123  * @list:       list pointer
124  */
125 struct nfp_ipv4_addr_entry {
126         __be32 ipv4_addr;
127         int ref_count;
128         struct list_head list;
129 };
130
131 /**
132  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
133  * @reserved:   reserved for future use
134  * @count:      number of MAC addresses in the message
135  * @addresses.index:    index of MAC address in the lookup table
136  * @addresses.addr:     interface MAC address
137  * @addresses:  series of MACs to offload
138  */
139 struct nfp_tun_mac_addr {
140         __be16 reserved;
141         __be16 count;
142         struct index_mac_addr {
143                 __be16 index;
144                 u8 addr[ETH_ALEN];
145         } addresses[];
146 };
147
148 /**
149  * struct nfp_tun_mac_offload_entry - list of MACs to offload
150  * @index:      index of MAC address for offloading
151  * @addr:       interface MAC address
152  * @list:       list pointer
153  */
154 struct nfp_tun_mac_offload_entry {
155         __be16 index;
156         u8 addr[ETH_ALEN];
157         struct list_head list;
158 };
159
160 #define NFP_MAX_MAC_INDEX       0xff
161
162 /**
163  * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
164  * @ifindex:    netdev ifindex of the device
165  * @index:      index of netdevs mac on NFP
166  * @list:       list pointer
167  */
168 struct nfp_tun_mac_non_nfp_idx {
169         int ifindex;
170         u8 index;
171         struct list_head list;
172 };
173
174 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
175 {
176         struct nfp_tun_active_tuns *payload;
177         struct net_device *netdev;
178         int count, i, pay_len;
179         struct neighbour *n;
180         __be32 ipv4_addr;
181         u32 port;
182
183         payload = nfp_flower_cmsg_get_data(skb);
184         count = be32_to_cpu(payload->count);
185         if (count > NFP_FL_MAX_ROUTES) {
186                 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
187                 return;
188         }
189
190         pay_len = nfp_flower_cmsg_get_data_len(skb);
191         if (pay_len != sizeof(struct nfp_tun_active_tuns) +
192             sizeof(struct route_ip_info) * count) {
193                 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
194                 return;
195         }
196
197         rcu_read_lock();
198         for (i = 0; i < count; i++) {
199                 ipv4_addr = payload->tun_info[i].ipv4;
200                 port = be32_to_cpu(payload->tun_info[i].egress_port);
201                 netdev = nfp_app_repr_get(app, port);
202                 if (!netdev)
203                         continue;
204
205                 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
206                 if (!n)
207                         continue;
208
209                 /* Update the used timestamp of neighbour */
210                 neigh_event_send(n, NULL);
211                 neigh_release(n);
212         }
213         rcu_read_unlock();
214 }
215
216 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
217 {
218         if (!netdev->rtnl_link_ops)
219                 return false;
220         if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
221                 return true;
222         if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
223                 return true;
224
225         return false;
226 }
227
228 static int
229 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
230                          gfp_t flag)
231 {
232         struct sk_buff *skb;
233         unsigned char *msg;
234
235         skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
236         if (!skb)
237                 return -ENOMEM;
238
239         msg = nfp_flower_cmsg_get_data(skb);
240         memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
241
242         nfp_ctrl_tx(app->ctrl, skb);
243         return 0;
244 }
245
246 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
247 {
248         struct nfp_flower_priv *priv = app->priv;
249         struct nfp_ipv4_route_entry *entry;
250         struct list_head *ptr, *storage;
251
252         spin_lock_bh(&priv->nfp_neigh_off_lock);
253         list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
254                 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
255                 if (entry->ipv4_addr == ipv4_addr) {
256                         spin_unlock_bh(&priv->nfp_neigh_off_lock);
257                         return true;
258                 }
259         }
260         spin_unlock_bh(&priv->nfp_neigh_off_lock);
261         return false;
262 }
263
264 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
265 {
266         struct nfp_flower_priv *priv = app->priv;
267         struct nfp_ipv4_route_entry *entry;
268         struct list_head *ptr, *storage;
269
270         spin_lock_bh(&priv->nfp_neigh_off_lock);
271         list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
272                 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
273                 if (entry->ipv4_addr == ipv4_addr) {
274                         spin_unlock_bh(&priv->nfp_neigh_off_lock);
275                         return;
276                 }
277         }
278         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
279         if (!entry) {
280                 spin_unlock_bh(&priv->nfp_neigh_off_lock);
281                 nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
282                 return;
283         }
284
285         entry->ipv4_addr = ipv4_addr;
286         list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
287         spin_unlock_bh(&priv->nfp_neigh_off_lock);
288 }
289
290 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
291 {
292         struct nfp_flower_priv *priv = app->priv;
293         struct nfp_ipv4_route_entry *entry;
294         struct list_head *ptr, *storage;
295
296         spin_lock_bh(&priv->nfp_neigh_off_lock);
297         list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
298                 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
299                 if (entry->ipv4_addr == ipv4_addr) {
300                         list_del(&entry->list);
301                         kfree(entry);
302                         break;
303                 }
304         }
305         spin_unlock_bh(&priv->nfp_neigh_off_lock);
306 }
307
308 static void
309 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
310                     struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
311 {
312         struct nfp_tun_neigh payload;
313
314         /* Only offload representor IPv4s for now. */
315         if (!nfp_netdev_is_nfp_repr(netdev))
316                 return;
317
318         memset(&payload, 0, sizeof(struct nfp_tun_neigh));
319         payload.dst_ipv4 = flow->daddr;
320
321         /* If entry has expired send dst IP with all other fields 0. */
322         if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
323                 nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
324                 /* Trigger ARP to verify invalid neighbour state. */
325                 neigh_event_send(neigh, NULL);
326                 goto send_msg;
327         }
328
329         /* Have a valid neighbour so populate rest of entry. */
330         payload.src_ipv4 = flow->saddr;
331         ether_addr_copy(payload.src_addr, netdev->dev_addr);
332         neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
333         payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
334         /* Add destination of new route to NFP cache. */
335         nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
336
337 send_msg:
338         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
339                                  sizeof(struct nfp_tun_neigh),
340                                  (unsigned char *)&payload, flag);
341 }
342
343 static int
344 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
345                             void *ptr)
346 {
347         struct nfp_flower_priv *app_priv;
348         struct netevent_redirect *redir;
349         struct flowi4 flow = {};
350         struct neighbour *n;
351         struct nfp_app *app;
352         struct rtable *rt;
353         int err;
354
355         switch (event) {
356         case NETEVENT_REDIRECT:
357                 redir = (struct netevent_redirect *)ptr;
358                 n = redir->neigh;
359                 break;
360         case NETEVENT_NEIGH_UPDATE:
361                 n = (struct neighbour *)ptr;
362                 break;
363         default:
364                 return NOTIFY_DONE;
365         }
366
367         flow.daddr = *(__be32 *)n->primary_key;
368
369         /* Only concerned with route changes for representors. */
370         if (!nfp_netdev_is_nfp_repr(n->dev))
371                 return NOTIFY_DONE;
372
373         app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
374         app = app_priv->app;
375
376         /* Only concerned with changes to routes already added to NFP. */
377         if (!nfp_tun_has_route(app, flow.daddr))
378                 return NOTIFY_DONE;
379
380 #if IS_ENABLED(CONFIG_INET)
381         /* Do a route lookup to populate flow data. */
382         rt = ip_route_output_key(dev_net(n->dev), &flow);
383         err = PTR_ERR_OR_ZERO(rt);
384         if (err)
385                 return NOTIFY_DONE;
386
387         ip_rt_put(rt);
388 #else
389         return NOTIFY_DONE;
390 #endif
391
392         flow.flowi4_proto = IPPROTO_UDP;
393         nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
394
395         return NOTIFY_OK;
396 }
397
398 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
399 {
400         struct nfp_tun_req_route_ipv4 *payload;
401         struct net_device *netdev;
402         struct flowi4 flow = {};
403         struct neighbour *n;
404         struct rtable *rt;
405         int err;
406
407         payload = nfp_flower_cmsg_get_data(skb);
408
409         rcu_read_lock();
410         netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
411         if (!netdev)
412                 goto fail_rcu_unlock;
413
414         flow.daddr = payload->ipv4_addr;
415         flow.flowi4_proto = IPPROTO_UDP;
416
417 #if IS_ENABLED(CONFIG_INET)
418         /* Do a route lookup on same namespace as ingress port. */
419         rt = ip_route_output_key(dev_net(netdev), &flow);
420         err = PTR_ERR_OR_ZERO(rt);
421         if (err)
422                 goto fail_rcu_unlock;
423 #else
424         goto fail_rcu_unlock;
425 #endif
426
427         /* Get the neighbour entry for the lookup */
428         n = dst_neigh_lookup(&rt->dst, &flow.daddr);
429         ip_rt_put(rt);
430         if (!n)
431                 goto fail_rcu_unlock;
432         nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
433         neigh_release(n);
434         rcu_read_unlock();
435         return;
436
437 fail_rcu_unlock:
438         rcu_read_unlock();
439         nfp_flower_cmsg_warn(app, "Requested route not found.\n");
440 }
441
442 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
443 {
444         struct nfp_flower_priv *priv = app->priv;
445         struct nfp_ipv4_addr_entry *entry;
446         struct nfp_tun_ipv4_addr payload;
447         struct list_head *ptr, *storage;
448         int count;
449
450         memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
451         mutex_lock(&priv->nfp_ipv4_off_lock);
452         count = 0;
453         list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
454                 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
455                         mutex_unlock(&priv->nfp_ipv4_off_lock);
456                         nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
457                         return;
458                 }
459                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
460                 payload.ipv4_addr[count++] = entry->ipv4_addr;
461         }
462         payload.count = cpu_to_be32(count);
463         mutex_unlock(&priv->nfp_ipv4_off_lock);
464
465         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
466                                  sizeof(struct nfp_tun_ipv4_addr),
467                                  &payload, GFP_KERNEL);
468 }
469
470 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
471 {
472         struct nfp_flower_priv *priv = app->priv;
473         struct nfp_ipv4_addr_entry *entry;
474         struct list_head *ptr, *storage;
475
476         mutex_lock(&priv->nfp_ipv4_off_lock);
477         list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
478                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
479                 if (entry->ipv4_addr == ipv4) {
480                         entry->ref_count++;
481                         mutex_unlock(&priv->nfp_ipv4_off_lock);
482                         return;
483                 }
484         }
485
486         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
487         if (!entry) {
488                 mutex_unlock(&priv->nfp_ipv4_off_lock);
489                 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
490                 return;
491         }
492         entry->ipv4_addr = ipv4;
493         entry->ref_count = 1;
494         list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
495         mutex_unlock(&priv->nfp_ipv4_off_lock);
496
497         nfp_tun_write_ipv4_list(app);
498 }
499
500 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
501 {
502         struct nfp_flower_priv *priv = app->priv;
503         struct nfp_ipv4_addr_entry *entry;
504         struct list_head *ptr, *storage;
505
506         mutex_lock(&priv->nfp_ipv4_off_lock);
507         list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
508                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
509                 if (entry->ipv4_addr == ipv4) {
510                         entry->ref_count--;
511                         if (!entry->ref_count) {
512                                 list_del(&entry->list);
513                                 kfree(entry);
514                         }
515                         break;
516                 }
517         }
518         mutex_unlock(&priv->nfp_ipv4_off_lock);
519
520         nfp_tun_write_ipv4_list(app);
521 }
522
523 void nfp_tunnel_write_macs(struct nfp_app *app)
524 {
525         struct nfp_flower_priv *priv = app->priv;
526         struct nfp_tun_mac_offload_entry *entry;
527         struct nfp_tun_mac_addr *payload;
528         struct list_head *ptr, *storage;
529         int mac_count, err, pay_size;
530
531         mutex_lock(&priv->nfp_mac_off_lock);
532         if (!priv->nfp_mac_off_count) {
533                 mutex_unlock(&priv->nfp_mac_off_lock);
534                 return;
535         }
536
537         pay_size = sizeof(struct nfp_tun_mac_addr) +
538                    sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
539
540         payload = kzalloc(pay_size, GFP_KERNEL);
541         if (!payload) {
542                 mutex_unlock(&priv->nfp_mac_off_lock);
543                 return;
544         }
545
546         payload->count = cpu_to_be16(priv->nfp_mac_off_count);
547
548         mac_count = 0;
549         list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
550                 entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
551                                    list);
552                 payload->addresses[mac_count].index = entry->index;
553                 ether_addr_copy(payload->addresses[mac_count].addr,
554                                 entry->addr);
555                 mac_count++;
556         }
557
558         err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
559                                        pay_size, payload, GFP_KERNEL);
560
561         kfree(payload);
562
563         if (err) {
564                 mutex_unlock(&priv->nfp_mac_off_lock);
565                 /* Write failed so retain list for future retry. */
566                 return;
567         }
568
569         /* If list was successfully offloaded, flush it. */
570         list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
571                 entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
572                                    list);
573                 list_del(&entry->list);
574                 kfree(entry);
575         }
576
577         priv->nfp_mac_off_count = 0;
578         mutex_unlock(&priv->nfp_mac_off_lock);
579 }
580
581 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
582 {
583         struct nfp_flower_priv *priv = app->priv;
584         struct nfp_tun_mac_non_nfp_idx *entry;
585         struct list_head *ptr, *storage;
586         int idx;
587
588         mutex_lock(&priv->nfp_mac_index_lock);
589         list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
590                 entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
591                 if (entry->ifindex == ifindex) {
592                         idx = entry->index;
593                         mutex_unlock(&priv->nfp_mac_index_lock);
594                         return idx;
595                 }
596         }
597
598         idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
599                              NFP_MAX_MAC_INDEX, GFP_KERNEL);
600         if (idx < 0) {
601                 mutex_unlock(&priv->nfp_mac_index_lock);
602                 return idx;
603         }
604
605         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
606         if (!entry) {
607                 mutex_unlock(&priv->nfp_mac_index_lock);
608                 return -ENOMEM;
609         }
610         entry->ifindex = ifindex;
611         entry->index = idx;
612         list_add_tail(&entry->list, &priv->nfp_mac_index_list);
613         mutex_unlock(&priv->nfp_mac_index_lock);
614
615         return idx;
616 }
617
618 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
619 {
620         struct nfp_flower_priv *priv = app->priv;
621         struct nfp_tun_mac_non_nfp_idx *entry;
622         struct list_head *ptr, *storage;
623
624         mutex_lock(&priv->nfp_mac_index_lock);
625         list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
626                 entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
627                 if (entry->ifindex == ifindex) {
628                         ida_simple_remove(&priv->nfp_mac_off_ids,
629                                           entry->index);
630                         list_del(&entry->list);
631                         kfree(entry);
632                         break;
633                 }
634         }
635         mutex_unlock(&priv->nfp_mac_index_lock);
636 }
637
638 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
639                                             struct nfp_app *app)
640 {
641         struct nfp_flower_priv *priv = app->priv;
642         struct nfp_tun_mac_offload_entry *entry;
643         u16 nfp_mac_idx;
644         int port = 0;
645
646         /* Check if MAC should be offloaded. */
647         if (!is_valid_ether_addr(netdev->dev_addr))
648                 return;
649
650         if (nfp_netdev_is_nfp_repr(netdev))
651                 port = nfp_repr_get_port_id(netdev);
652         else if (!nfp_tun_is_netdev_to_offload(netdev))
653                 return;
654
655         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
656         if (!entry) {
657                 nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
658                 return;
659         }
660
661         if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
662             NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
663                 nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
664         } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
665                    NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
666                 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
667                 nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
668         } else {
669                 /* Must assign our own unique 8-bit index. */
670                 int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
671
672                 if (idx < 0) {
673                         nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
674                         kfree(entry);
675                         return;
676                 }
677                 nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
678         }
679
680         entry->index = cpu_to_be16(nfp_mac_idx);
681         ether_addr_copy(entry->addr, netdev->dev_addr);
682
683         mutex_lock(&priv->nfp_mac_off_lock);
684         priv->nfp_mac_off_count++;
685         list_add_tail(&entry->list, &priv->nfp_mac_off_list);
686         mutex_unlock(&priv->nfp_mac_off_lock);
687 }
688
689 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
690                                      unsigned long event, void *ptr)
691 {
692         struct nfp_flower_priv *app_priv;
693         struct net_device *netdev;
694         struct nfp_app *app;
695
696         if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
697                 app_priv = container_of(nb, struct nfp_flower_priv,
698                                         nfp_tun_mac_nb);
699                 app = app_priv->app;
700                 netdev = netdev_notifier_info_to_dev(ptr);
701
702                 /* If non-nfp netdev then free its offload index. */
703                 if (nfp_tun_is_netdev_to_offload(netdev))
704                         nfp_tun_del_mac_idx(app, netdev->ifindex);
705         } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
706                    event == NETDEV_REGISTER) {
707                 app_priv = container_of(nb, struct nfp_flower_priv,
708                                         nfp_tun_mac_nb);
709                 app = app_priv->app;
710                 netdev = netdev_notifier_info_to_dev(ptr);
711
712                 nfp_tun_add_to_mac_offload_list(netdev, app);
713
714                 /* Force a list write to keep NFP up to date. */
715                 nfp_tunnel_write_macs(app);
716         }
717         return NOTIFY_OK;
718 }
719
720 int nfp_tunnel_config_start(struct nfp_app *app)
721 {
722         struct nfp_flower_priv *priv = app->priv;
723         struct net_device *netdev;
724         int err;
725
726         /* Initialise priv data for MAC offloading. */
727         priv->nfp_mac_off_count = 0;
728         mutex_init(&priv->nfp_mac_off_lock);
729         INIT_LIST_HEAD(&priv->nfp_mac_off_list);
730         priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
731         mutex_init(&priv->nfp_mac_index_lock);
732         INIT_LIST_HEAD(&priv->nfp_mac_index_list);
733         ida_init(&priv->nfp_mac_off_ids);
734
735         /* Initialise priv data for IPv4 offloading. */
736         mutex_init(&priv->nfp_ipv4_off_lock);
737         INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
738
739         /* Initialise priv data for neighbour offloading. */
740         spin_lock_init(&priv->nfp_neigh_off_lock);
741         INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
742         priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
743
744         err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
745         if (err)
746                 goto err_free_mac_ida;
747
748         err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
749         if (err)
750                 goto err_unreg_mac_nb;
751
752         /* Parse netdevs already registered for MACs that need offloaded. */
753         rtnl_lock();
754         for_each_netdev(&init_net, netdev)
755                 nfp_tun_add_to_mac_offload_list(netdev, app);
756         rtnl_unlock();
757
758         return 0;
759
760 err_unreg_mac_nb:
761         unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
762 err_free_mac_ida:
763         ida_destroy(&priv->nfp_mac_off_ids);
764         return err;
765 }
766
767 void nfp_tunnel_config_stop(struct nfp_app *app)
768 {
769         struct nfp_tun_mac_offload_entry *mac_entry;
770         struct nfp_flower_priv *priv = app->priv;
771         struct nfp_ipv4_route_entry *route_entry;
772         struct nfp_tun_mac_non_nfp_idx *mac_idx;
773         struct nfp_ipv4_addr_entry *ip_entry;
774         struct list_head *ptr, *storage;
775
776         unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
777         unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
778
779         /* Free any memory that may be occupied by MAC list. */
780         list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
781                 mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
782                                        list);
783                 list_del(&mac_entry->list);
784                 kfree(mac_entry);
785         }
786
787         /* Free any memory that may be occupied by MAC index list. */
788         list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
789                 mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
790                                      list);
791                 list_del(&mac_idx->list);
792                 kfree(mac_idx);
793         }
794
795         ida_destroy(&priv->nfp_mac_off_ids);
796
797         /* Free any memory that may be occupied by ipv4 list. */
798         list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
799                 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
800                 list_del(&ip_entry->list);
801                 kfree(ip_entry);
802         }
803
804         /* Free any memory that may be occupied by the route list. */
805         list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
806                 route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
807                                          list);
808                 list_del(&route_entry->list);
809                 kfree(route_entry);
810         }
811 }