GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / netronome / nfp / flower / match.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/bitfield.h>
35 #include <net/pkt_cls.h>
36
37 #include "cmsg.h"
38 #include "main.h"
39
40 static void
41 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
42                             struct tc_cls_flower_offload *flow, u8 key_type,
43                             bool mask_version)
44 {
45         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
46         struct flow_dissector_key_vlan *flow_vlan;
47         u16 tmp_tci;
48
49         memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
50         /* Populate the metadata frame. */
51         frame->nfp_flow_key_layer = key_type;
52         frame->mask_id = ~0;
53
54         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
55                 flow_vlan = skb_flow_dissector_target(flow->dissector,
56                                                       FLOW_DISSECTOR_KEY_VLAN,
57                                                       target);
58                 /* Populate the tci field. */
59                 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
60                 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61                                       flow_vlan->vlan_priority) |
62                            FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
63                                       flow_vlan->vlan_id);
64                 frame->tci = cpu_to_be16(tmp_tci);
65         }
66 }
67
68 static void
69 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
70 {
71         frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
72 }
73
74 static int
75 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
76                         bool mask_version, enum nfp_flower_tun_type tun_type)
77 {
78         if (mask_version) {
79                 frame->in_port = cpu_to_be32(~0);
80                 return 0;
81         }
82
83         if (tun_type)
84                 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
85         else
86                 frame->in_port = cpu_to_be32(cmsg_port);
87
88         return 0;
89 }
90
91 static void
92 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
93                        struct tc_cls_flower_offload *flow,
94                        bool mask_version)
95 {
96         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
97         struct flow_dissector_key_eth_addrs *addr;
98
99         memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
100
101         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
102                 addr = skb_flow_dissector_target(flow->dissector,
103                                                  FLOW_DISSECTOR_KEY_ETH_ADDRS,
104                                                  target);
105                 /* Populate mac frame. */
106                 ether_addr_copy(frame->mac_dst, &addr->dst[0]);
107                 ether_addr_copy(frame->mac_src, &addr->src[0]);
108         }
109
110         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
111                 struct flow_dissector_key_mpls *mpls;
112                 u32 t_mpls;
113
114                 mpls = skb_flow_dissector_target(flow->dissector,
115                                                  FLOW_DISSECTOR_KEY_MPLS,
116                                                  target);
117
118                 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
119                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
120                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
121                          NFP_FLOWER_MASK_MPLS_Q;
122
123                 frame->mpls_lse = cpu_to_be32(t_mpls);
124         } else if (dissector_uses_key(flow->dissector,
125                                       FLOW_DISSECTOR_KEY_BASIC)) {
126                 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
127                  * bit, which indicates an mpls ether type but without any
128                  * mpls fields.
129                  */
130                 struct flow_dissector_key_basic *key_basic;
131
132                 key_basic = skb_flow_dissector_target(flow->dissector,
133                                                       FLOW_DISSECTOR_KEY_BASIC,
134                                                       flow->key);
135                 if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
136                     key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
137                         frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
138         }
139 }
140
141 static void
142 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
143                          struct tc_cls_flower_offload *flow,
144                          bool mask_version)
145 {
146         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
147         struct flow_dissector_key_ports *tp;
148
149         memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
150
151         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
152                 tp = skb_flow_dissector_target(flow->dissector,
153                                                FLOW_DISSECTOR_KEY_PORTS,
154                                                target);
155                 frame->port_src = tp->src;
156                 frame->port_dst = tp->dst;
157         }
158 }
159
160 static void
161 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
162                           struct tc_cls_flower_offload *flow,
163                           bool mask_version)
164 {
165         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
166
167         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
168                 struct flow_dissector_key_basic *basic;
169
170                 basic = skb_flow_dissector_target(flow->dissector,
171                                                   FLOW_DISSECTOR_KEY_BASIC,
172                                                   target);
173                 frame->proto = basic->ip_proto;
174         }
175
176         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
177                 struct flow_dissector_key_ip *flow_ip;
178
179                 flow_ip = skb_flow_dissector_target(flow->dissector,
180                                                     FLOW_DISSECTOR_KEY_IP,
181                                                     target);
182                 frame->tos = flow_ip->tos;
183                 frame->ttl = flow_ip->ttl;
184         }
185
186         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
187                 struct flow_dissector_key_tcp *tcp;
188                 u32 tcp_flags;
189
190                 tcp = skb_flow_dissector_target(flow->dissector,
191                                                 FLOW_DISSECTOR_KEY_TCP, target);
192                 tcp_flags = be16_to_cpu(tcp->flags);
193
194                 if (tcp_flags & TCPHDR_FIN)
195                         frame->flags |= NFP_FL_TCP_FLAG_FIN;
196                 if (tcp_flags & TCPHDR_SYN)
197                         frame->flags |= NFP_FL_TCP_FLAG_SYN;
198                 if (tcp_flags & TCPHDR_RST)
199                         frame->flags |= NFP_FL_TCP_FLAG_RST;
200                 if (tcp_flags & TCPHDR_PSH)
201                         frame->flags |= NFP_FL_TCP_FLAG_PSH;
202                 if (tcp_flags & TCPHDR_URG)
203                         frame->flags |= NFP_FL_TCP_FLAG_URG;
204         }
205
206         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
207                 struct flow_dissector_key_control *key;
208
209                 key = skb_flow_dissector_target(flow->dissector,
210                                                 FLOW_DISSECTOR_KEY_CONTROL,
211                                                 target);
212                 if (key->flags & FLOW_DIS_IS_FRAGMENT)
213                         frame->flags |= NFP_FL_IP_FRAGMENTED;
214                 if (key->flags & FLOW_DIS_FIRST_FRAG)
215                         frame->flags |= NFP_FL_IP_FRAG_FIRST;
216         }
217 }
218
219 static void
220 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
221                         struct tc_cls_flower_offload *flow,
222                         bool mask_version)
223 {
224         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
225         struct flow_dissector_key_ipv4_addrs *addr;
226
227         memset(frame, 0, sizeof(struct nfp_flower_ipv4));
228
229         if (dissector_uses_key(flow->dissector,
230                                FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
231                 addr = skb_flow_dissector_target(flow->dissector,
232                                                  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
233                                                  target);
234                 frame->ipv4_src = addr->src;
235                 frame->ipv4_dst = addr->dst;
236         }
237
238         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
239 }
240
241 static void
242 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
243                         struct tc_cls_flower_offload *flow,
244                         bool mask_version)
245 {
246         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
247         struct flow_dissector_key_ipv6_addrs *addr;
248
249         memset(frame, 0, sizeof(struct nfp_flower_ipv6));
250
251         if (dissector_uses_key(flow->dissector,
252                                FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
253                 addr = skb_flow_dissector_target(flow->dissector,
254                                                  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
255                                                  target);
256                 frame->ipv6_src = addr->src;
257                 frame->ipv6_dst = addr->dst;
258         }
259
260         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
261 }
262
263 static int
264 nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
265                               bool mask_version)
266 {
267         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
268         struct flow_dissector_key_enc_opts *opts;
269
270         opts = skb_flow_dissector_target(flow->dissector,
271                                          FLOW_DISSECTOR_KEY_ENC_OPTS,
272                                          target);
273         memcpy(key_buf, opts->data, opts->len);
274
275         return 0;
276 }
277
278 static void
279 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
280                                 struct tc_cls_flower_offload *flow,
281                                 bool mask_version)
282 {
283         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
284         struct flow_dissector_key_ipv4_addrs *tun_ips;
285         struct flow_dissector_key_keyid *vni;
286         struct flow_dissector_key_ip *ip;
287
288         memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
289
290         if (dissector_uses_key(flow->dissector,
291                                FLOW_DISSECTOR_KEY_ENC_KEYID)) {
292                 u32 temp_vni;
293
294                 vni = skb_flow_dissector_target(flow->dissector,
295                                                 FLOW_DISSECTOR_KEY_ENC_KEYID,
296                                                 target);
297                 temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
298                 frame->tun_id = cpu_to_be32(temp_vni);
299         }
300
301         if (dissector_uses_key(flow->dissector,
302                                FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
303                 tun_ips =
304                    skb_flow_dissector_target(flow->dissector,
305                                              FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
306                                              target);
307                 frame->ip_src = tun_ips->src;
308                 frame->ip_dst = tun_ips->dst;
309         }
310
311         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
312                 ip = skb_flow_dissector_target(flow->dissector,
313                                                FLOW_DISSECTOR_KEY_ENC_IP,
314                                                target);
315                 frame->tos = ip->tos;
316                 frame->ttl = ip->ttl;
317         }
318 }
319
320 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
321                                   struct nfp_fl_key_ls *key_ls,
322                                   struct net_device *netdev,
323                                   struct nfp_fl_payload *nfp_flow,
324                                   enum nfp_flower_tun_type tun_type)
325 {
326         struct nfp_repr *netdev_repr;
327         int err;
328         u8 *ext;
329         u8 *msk;
330
331         memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
332         memset(nfp_flow->mask_data, 0, key_ls->key_size);
333
334         ext = nfp_flow->unmasked_data;
335         msk = nfp_flow->mask_data;
336
337         /* Populate Exact Metadata. */
338         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
339                                     flow, key_ls->key_layer, false);
340         /* Populate Mask Metadata. */
341         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
342                                     flow, key_ls->key_layer, true);
343         ext += sizeof(struct nfp_flower_meta_tci);
344         msk += sizeof(struct nfp_flower_meta_tci);
345
346         /* Populate Extended Metadata if Required. */
347         if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
348                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
349                                             key_ls->key_layer_two);
350                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
351                                             key_ls->key_layer_two);
352                 ext += sizeof(struct nfp_flower_ext_meta);
353                 msk += sizeof(struct nfp_flower_ext_meta);
354         }
355
356         /* Populate Exact Port data. */
357         err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
358                                       nfp_repr_get_port_id(netdev),
359                                       false, tun_type);
360         if (err)
361                 return err;
362
363         /* Populate Mask Port Data. */
364         err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
365                                       nfp_repr_get_port_id(netdev),
366                                       true, tun_type);
367         if (err)
368                 return err;
369
370         ext += sizeof(struct nfp_flower_in_port);
371         msk += sizeof(struct nfp_flower_in_port);
372
373         if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
374                 /* Populate Exact MAC Data. */
375                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
376                                        flow, false);
377                 /* Populate Mask MAC Data. */
378                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
379                                        flow, true);
380                 ext += sizeof(struct nfp_flower_mac_mpls);
381                 msk += sizeof(struct nfp_flower_mac_mpls);
382         }
383
384         if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
385                 /* Populate Exact TP Data. */
386                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
387                                          flow, false);
388                 /* Populate Mask TP Data. */
389                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
390                                          flow, true);
391                 ext += sizeof(struct nfp_flower_tp_ports);
392                 msk += sizeof(struct nfp_flower_tp_ports);
393         }
394
395         if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
396                 /* Populate Exact IPv4 Data. */
397                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
398                                         flow, false);
399                 /* Populate Mask IPv4 Data. */
400                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
401                                         flow, true);
402                 ext += sizeof(struct nfp_flower_ipv4);
403                 msk += sizeof(struct nfp_flower_ipv4);
404         }
405
406         if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
407                 /* Populate Exact IPv4 Data. */
408                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
409                                         flow, false);
410                 /* Populate Mask IPv4 Data. */
411                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
412                                         flow, true);
413                 ext += sizeof(struct nfp_flower_ipv6);
414                 msk += sizeof(struct nfp_flower_ipv6);
415         }
416
417         if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
418             key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
419                 __be32 tun_dst;
420
421                 /* Populate Exact VXLAN Data. */
422                 nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
423                 /* Populate Mask VXLAN Data. */
424                 nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
425                 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
426                 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
427                 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
428
429                 /* Configure tunnel end point MAC. */
430                 if (nfp_netdev_is_nfp_repr(netdev)) {
431                         netdev_repr = netdev_priv(netdev);
432                         nfp_tunnel_write_macs(netdev_repr->app);
433
434                         /* Store the tunnel destination in the rule data.
435                          * This must be present and be an exact match.
436                          */
437                         nfp_flow->nfp_tun_ipv4_addr = tun_dst;
438                         nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
439                 }
440
441                 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
442                         err = nfp_flower_compile_geneve_opt(ext, flow, false);
443                         if (err)
444                                 return err;
445
446                         err = nfp_flower_compile_geneve_opt(msk, flow, true);
447                         if (err)
448                                 return err;
449                 }
450         }
451
452         return 0;
453 }