GNU Linux-libre 4.14.290-gnu1
[releases.git] / net / core / flow_dissector.c
1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/dsa.h>
8 #include <net/ip.h>
9 #include <net/ipv6.h>
10 #include <net/gre.h>
11 #include <net/pptp.h>
12 #include <linux/igmp.h>
13 #include <linux/icmp.h>
14 #include <linux/sctp.h>
15 #include <linux/dccp.h>
16 #include <linux/if_tunnel.h>
17 #include <linux/if_pppox.h>
18 #include <linux/ppp_defs.h>
19 #include <linux/stddef.h>
20 #include <linux/if_ether.h>
21 #include <linux/mpls.h>
22 #include <linux/tcp.h>
23 #include <net/flow_dissector.h>
24 #include <scsi/fc/fc_fcoe.h>
25
26 static void dissector_set_key(struct flow_dissector *flow_dissector,
27                               enum flow_dissector_key_id key_id)
28 {
29         flow_dissector->used_keys |= (1 << key_id);
30 }
31
32 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
33                              const struct flow_dissector_key *key,
34                              unsigned int key_count)
35 {
36         unsigned int i;
37
38         memset(flow_dissector, 0, sizeof(*flow_dissector));
39
40         for (i = 0; i < key_count; i++, key++) {
41                 /* User should make sure that every key target offset is withing
42                  * boundaries of unsigned short.
43                  */
44                 BUG_ON(key->offset > USHRT_MAX);
45                 BUG_ON(dissector_uses_key(flow_dissector,
46                                           key->key_id));
47
48                 dissector_set_key(flow_dissector, key->key_id);
49                 flow_dissector->offset[key->key_id] = key->offset;
50         }
51
52         /* Ensure that the dissector always includes control and basic key.
53          * That way we are able to avoid handling lack of these in fast path.
54          */
55         BUG_ON(!dissector_uses_key(flow_dissector,
56                                    FLOW_DISSECTOR_KEY_CONTROL));
57         BUG_ON(!dissector_uses_key(flow_dissector,
58                                    FLOW_DISSECTOR_KEY_BASIC));
59 }
60 EXPORT_SYMBOL(skb_flow_dissector_init);
61
62 /**
63  * skb_flow_get_be16 - extract be16 entity
64  * @skb: sk_buff to extract from
65  * @poff: offset to extract at
66  * @data: raw buffer pointer to the packet
67  * @hlen: packet header length
68  *
69  * The function will try to retrieve a be32 entity at
70  * offset poff
71  */
72 static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
73                                 void *data, int hlen)
74 {
75         __be16 *u, _u;
76
77         u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
78         if (u)
79                 return *u;
80
81         return 0;
82 }
83
84 /**
85  * __skb_flow_get_ports - extract the upper layer ports and return them
86  * @skb: sk_buff to extract the ports from
87  * @thoff: transport header offset
88  * @ip_proto: protocol for which to get port offset
89  * @data: raw buffer pointer to the packet, if NULL use skb->data
90  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
91  *
92  * The function will try to retrieve the ports at offset thoff + poff where poff
93  * is the protocol port offset returned from proto_ports_offset
94  */
95 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
96                             void *data, int hlen)
97 {
98         int poff = proto_ports_offset(ip_proto);
99
100         if (!data) {
101                 data = skb->data;
102                 hlen = skb_headlen(skb);
103         }
104
105         if (poff >= 0) {
106                 __be32 *ports, _ports;
107
108                 ports = __skb_header_pointer(skb, thoff + poff,
109                                              sizeof(_ports), data, hlen, &_ports);
110                 if (ports)
111                         return *ports;
112         }
113
114         return 0;
115 }
116 EXPORT_SYMBOL(__skb_flow_get_ports);
117
118 static enum flow_dissect_ret
119 __skb_flow_dissect_mpls(const struct sk_buff *skb,
120                         struct flow_dissector *flow_dissector,
121                         void *target_container, void *data, int nhoff, int hlen)
122 {
123         struct flow_dissector_key_keyid *key_keyid;
124         struct mpls_label *hdr, _hdr[2];
125         u32 entry, label;
126
127         if (!dissector_uses_key(flow_dissector,
128                                 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
129             !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
130                 return FLOW_DISSECT_RET_OUT_GOOD;
131
132         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
133                                    hlen, &_hdr);
134         if (!hdr)
135                 return FLOW_DISSECT_RET_OUT_BAD;
136
137         entry = ntohl(hdr[0].entry);
138         label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
139
140         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
141                 struct flow_dissector_key_mpls *key_mpls;
142
143                 key_mpls = skb_flow_dissector_target(flow_dissector,
144                                                      FLOW_DISSECTOR_KEY_MPLS,
145                                                      target_container);
146                 key_mpls->mpls_label = label;
147                 key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
148                                         >> MPLS_LS_TTL_SHIFT;
149                 key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
150                                         >> MPLS_LS_TC_SHIFT;
151                 key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
152                                         >> MPLS_LS_S_SHIFT;
153         }
154
155         if (label == MPLS_LABEL_ENTROPY) {
156                 key_keyid = skb_flow_dissector_target(flow_dissector,
157                                                       FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
158                                                       target_container);
159                 key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
160         }
161         return FLOW_DISSECT_RET_OUT_GOOD;
162 }
163
164 static enum flow_dissect_ret
165 __skb_flow_dissect_arp(const struct sk_buff *skb,
166                        struct flow_dissector *flow_dissector,
167                        void *target_container, void *data, int nhoff, int hlen)
168 {
169         struct flow_dissector_key_arp *key_arp;
170         struct {
171                 unsigned char ar_sha[ETH_ALEN];
172                 unsigned char ar_sip[4];
173                 unsigned char ar_tha[ETH_ALEN];
174                 unsigned char ar_tip[4];
175         } *arp_eth, _arp_eth;
176         const struct arphdr *arp;
177         struct arphdr _arp;
178
179         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
180                 return FLOW_DISSECT_RET_OUT_GOOD;
181
182         arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
183                                    hlen, &_arp);
184         if (!arp)
185                 return FLOW_DISSECT_RET_OUT_BAD;
186
187         if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
188             arp->ar_pro != htons(ETH_P_IP) ||
189             arp->ar_hln != ETH_ALEN ||
190             arp->ar_pln != 4 ||
191             (arp->ar_op != htons(ARPOP_REPLY) &&
192              arp->ar_op != htons(ARPOP_REQUEST)))
193                 return FLOW_DISSECT_RET_OUT_BAD;
194
195         arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
196                                        sizeof(_arp_eth), data,
197                                        hlen, &_arp_eth);
198         if (!arp_eth)
199                 return FLOW_DISSECT_RET_OUT_BAD;
200
201         key_arp = skb_flow_dissector_target(flow_dissector,
202                                             FLOW_DISSECTOR_KEY_ARP,
203                                             target_container);
204
205         memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
206         memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
207
208         /* Only store the lower byte of the opcode;
209          * this covers ARPOP_REPLY and ARPOP_REQUEST.
210          */
211         key_arp->op = ntohs(arp->ar_op) & 0xff;
212
213         ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
214         ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
215
216         return FLOW_DISSECT_RET_OUT_GOOD;
217 }
218
219 static enum flow_dissect_ret
220 __skb_flow_dissect_gre(const struct sk_buff *skb,
221                        struct flow_dissector_key_control *key_control,
222                        struct flow_dissector *flow_dissector,
223                        void *target_container, void *data,
224                        __be16 *p_proto, int *p_nhoff, int *p_hlen,
225                        unsigned int flags)
226 {
227         struct flow_dissector_key_keyid *key_keyid;
228         struct gre_base_hdr *hdr, _hdr;
229         int offset = 0;
230         u16 gre_ver;
231
232         hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
233                                    data, *p_hlen, &_hdr);
234         if (!hdr)
235                 return FLOW_DISSECT_RET_OUT_BAD;
236
237         /* Only look inside GRE without routing */
238         if (hdr->flags & GRE_ROUTING)
239                 return FLOW_DISSECT_RET_OUT_GOOD;
240
241         /* Only look inside GRE for version 0 and 1 */
242         gre_ver = ntohs(hdr->flags & GRE_VERSION);
243         if (gre_ver > 1)
244                 return FLOW_DISSECT_RET_OUT_GOOD;
245
246         *p_proto = hdr->protocol;
247         if (gre_ver) {
248                 /* Version1 must be PPTP, and check the flags */
249                 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
250                         return FLOW_DISSECT_RET_OUT_GOOD;
251         }
252
253         offset += sizeof(struct gre_base_hdr);
254
255         if (hdr->flags & GRE_CSUM)
256                 offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
257                           sizeof(((struct gre_full_hdr *) 0)->reserved1);
258
259         if (hdr->flags & GRE_KEY) {
260                 const __be32 *keyid;
261                 __be32 _keyid;
262
263                 keyid = __skb_header_pointer(skb, *p_nhoff + offset,
264                                              sizeof(_keyid),
265                                              data, *p_hlen, &_keyid);
266                 if (!keyid)
267                         return FLOW_DISSECT_RET_OUT_BAD;
268
269                 if (dissector_uses_key(flow_dissector,
270                                        FLOW_DISSECTOR_KEY_GRE_KEYID)) {
271                         key_keyid = skb_flow_dissector_target(flow_dissector,
272                                                               FLOW_DISSECTOR_KEY_GRE_KEYID,
273                                                               target_container);
274                         if (gre_ver == 0)
275                                 key_keyid->keyid = *keyid;
276                         else
277                                 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
278                 }
279                 offset += sizeof(((struct gre_full_hdr *) 0)->key);
280         }
281
282         if (hdr->flags & GRE_SEQ)
283                 offset += sizeof(((struct pptp_gre_header *) 0)->seq);
284
285         if (gre_ver == 0) {
286                 if (*p_proto == htons(ETH_P_TEB)) {
287                         const struct ethhdr *eth;
288                         struct ethhdr _eth;
289
290                         eth = __skb_header_pointer(skb, *p_nhoff + offset,
291                                                    sizeof(_eth),
292                                                    data, *p_hlen, &_eth);
293                         if (!eth)
294                                 return FLOW_DISSECT_RET_OUT_BAD;
295                         *p_proto = eth->h_proto;
296                         offset += sizeof(*eth);
297
298                         /* Cap headers that we access via pointers at the
299                          * end of the Ethernet header as our maximum alignment
300                          * at that point is only 2 bytes.
301                          */
302                         if (NET_IP_ALIGN)
303                                 *p_hlen = *p_nhoff + offset;
304                 }
305         } else { /* version 1, must be PPTP */
306                 u8 _ppp_hdr[PPP_HDRLEN];
307                 u8 *ppp_hdr;
308
309                 if (hdr->flags & GRE_ACK)
310                         offset += sizeof(((struct pptp_gre_header *) 0)->ack);
311
312                 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
313                                                sizeof(_ppp_hdr),
314                                                data, *p_hlen, _ppp_hdr);
315                 if (!ppp_hdr)
316                         return FLOW_DISSECT_RET_OUT_BAD;
317
318                 switch (PPP_PROTOCOL(ppp_hdr)) {
319                 case PPP_IP:
320                         *p_proto = htons(ETH_P_IP);
321                         break;
322                 case PPP_IPV6:
323                         *p_proto = htons(ETH_P_IPV6);
324                         break;
325                 default:
326                         /* Could probably catch some more like MPLS */
327                         break;
328                 }
329
330                 offset += PPP_HDRLEN;
331         }
332
333         *p_nhoff += offset;
334         key_control->flags |= FLOW_DIS_ENCAPSULATION;
335         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
336                 return FLOW_DISSECT_RET_OUT_GOOD;
337
338         return FLOW_DISSECT_RET_PROTO_AGAIN;
339 }
340
341 static void
342 __skb_flow_dissect_tcp(const struct sk_buff *skb,
343                        struct flow_dissector *flow_dissector,
344                        void *target_container, void *data, int thoff, int hlen)
345 {
346         struct flow_dissector_key_tcp *key_tcp;
347         struct tcphdr *th, _th;
348
349         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
350                 return;
351
352         th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
353         if (!th)
354                 return;
355
356         if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
357                 return;
358
359         key_tcp = skb_flow_dissector_target(flow_dissector,
360                                             FLOW_DISSECTOR_KEY_TCP,
361                                             target_container);
362         key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
363 }
364
365 static void
366 __skb_flow_dissect_ipv4(const struct sk_buff *skb,
367                         struct flow_dissector *flow_dissector,
368                         void *target_container, void *data, const struct iphdr *iph)
369 {
370         struct flow_dissector_key_ip *key_ip;
371
372         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
373                 return;
374
375         key_ip = skb_flow_dissector_target(flow_dissector,
376                                            FLOW_DISSECTOR_KEY_IP,
377                                            target_container);
378         key_ip->tos = iph->tos;
379         key_ip->ttl = iph->ttl;
380 }
381
382 static void
383 __skb_flow_dissect_ipv6(const struct sk_buff *skb,
384                         struct flow_dissector *flow_dissector,
385                         void *target_container, void *data, const struct ipv6hdr *iph)
386 {
387         struct flow_dissector_key_ip *key_ip;
388
389         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
390                 return;
391
392         key_ip = skb_flow_dissector_target(flow_dissector,
393                                            FLOW_DISSECTOR_KEY_IP,
394                                            target_container);
395         key_ip->tos = ipv6_get_dsfield(iph);
396         key_ip->ttl = iph->hop_limit;
397 }
398
399 /* Maximum number of protocol headers that can be parsed in
400  * __skb_flow_dissect
401  */
402 #define MAX_FLOW_DISSECT_HDRS   15
403
404 static bool skb_flow_dissect_allowed(int *num_hdrs)
405 {
406         ++*num_hdrs;
407
408         return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
409 }
410
411 /**
412  * __skb_flow_dissect - extract the flow_keys struct and return it
413  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
414  * @flow_dissector: list of keys to dissect
415  * @target_container: target structure to put dissected values into
416  * @data: raw buffer pointer to the packet, if NULL use skb->data
417  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
418  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
419  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
420  *
421  * The function will try to retrieve individual keys into target specified
422  * by flow_dissector from either the skbuff or a raw buffer specified by the
423  * rest parameters.
424  *
425  * Caller must take care of zeroing target container memory.
426  */
427 bool __skb_flow_dissect(const struct sk_buff *skb,
428                         struct flow_dissector *flow_dissector,
429                         void *target_container,
430                         void *data, __be16 proto, int nhoff, int hlen,
431                         unsigned int flags)
432 {
433         struct flow_dissector_key_control *key_control;
434         struct flow_dissector_key_basic *key_basic;
435         struct flow_dissector_key_addrs *key_addrs;
436         struct flow_dissector_key_ports *key_ports;
437         struct flow_dissector_key_icmp *key_icmp;
438         struct flow_dissector_key_tags *key_tags;
439         struct flow_dissector_key_vlan *key_vlan;
440         enum flow_dissect_ret fdret;
441         bool skip_vlan = false;
442         int num_hdrs = 0;
443         u8 ip_proto = 0;
444         bool ret;
445
446         if (!data) {
447                 data = skb->data;
448                 proto = skb_vlan_tag_present(skb) ?
449                          skb->vlan_proto : skb->protocol;
450                 nhoff = skb_network_offset(skb);
451                 hlen = skb_headlen(skb);
452 #if IS_ENABLED(CONFIG_NET_DSA)
453                 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
454                              proto == htons(ETH_P_XDSA))) {
455                         const struct dsa_device_ops *ops;
456                         int offset = 0;
457
458                         ops = skb->dev->dsa_ptr->tag_ops;
459                         if (ops->flow_dissect &&
460                             !ops->flow_dissect(skb, &proto, &offset)) {
461                                 hlen -= offset;
462                                 nhoff += offset;
463                         }
464                 }
465 #endif
466         }
467
468         /* It is ensured by skb_flow_dissector_init() that control key will
469          * be always present.
470          */
471         key_control = skb_flow_dissector_target(flow_dissector,
472                                                 FLOW_DISSECTOR_KEY_CONTROL,
473                                                 target_container);
474
475         /* It is ensured by skb_flow_dissector_init() that basic key will
476          * be always present.
477          */
478         key_basic = skb_flow_dissector_target(flow_dissector,
479                                               FLOW_DISSECTOR_KEY_BASIC,
480                                               target_container);
481
482         if (dissector_uses_key(flow_dissector,
483                                FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
484                 struct ethhdr *eth = eth_hdr(skb);
485                 struct flow_dissector_key_eth_addrs *key_eth_addrs;
486
487                 key_eth_addrs = skb_flow_dissector_target(flow_dissector,
488                                                           FLOW_DISSECTOR_KEY_ETH_ADDRS,
489                                                           target_container);
490                 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
491         }
492
493 proto_again:
494         fdret = FLOW_DISSECT_RET_CONTINUE;
495
496         switch (proto) {
497         case htons(ETH_P_IP): {
498                 const struct iphdr *iph;
499                 struct iphdr _iph;
500
501                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
502                 if (!iph || iph->ihl < 5) {
503                         fdret = FLOW_DISSECT_RET_OUT_BAD;
504                         break;
505                 }
506
507                 nhoff += iph->ihl * 4;
508
509                 ip_proto = iph->protocol;
510
511                 if (dissector_uses_key(flow_dissector,
512                                        FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
513                         key_addrs = skb_flow_dissector_target(flow_dissector,
514                                                               FLOW_DISSECTOR_KEY_IPV4_ADDRS,
515                                                               target_container);
516
517                         memcpy(&key_addrs->v4addrs.src, &iph->saddr,
518                                sizeof(key_addrs->v4addrs.src));
519                         memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
520                                sizeof(key_addrs->v4addrs.dst));
521                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
522                 }
523
524                 if (ip_is_fragment(iph)) {
525                         key_control->flags |= FLOW_DIS_IS_FRAGMENT;
526
527                         if (iph->frag_off & htons(IP_OFFSET)) {
528                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
529                                 break;
530                         } else {
531                                 key_control->flags |= FLOW_DIS_FIRST_FRAG;
532                                 if (!(flags &
533                                       FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
534                                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
535                                         break;
536                                 }
537                         }
538                 }
539
540                 __skb_flow_dissect_ipv4(skb, flow_dissector,
541                                         target_container, data, iph);
542
543                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
544                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
545                         break;
546                 }
547
548                 break;
549         }
550         case htons(ETH_P_IPV6): {
551                 const struct ipv6hdr *iph;
552                 struct ipv6hdr _iph;
553
554                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
555                 if (!iph) {
556                         fdret = FLOW_DISSECT_RET_OUT_BAD;
557                         break;
558                 }
559
560                 ip_proto = iph->nexthdr;
561                 nhoff += sizeof(struct ipv6hdr);
562
563                 if (dissector_uses_key(flow_dissector,
564                                        FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
565                         key_addrs = skb_flow_dissector_target(flow_dissector,
566                                                               FLOW_DISSECTOR_KEY_IPV6_ADDRS,
567                                                               target_container);
568
569                         memcpy(&key_addrs->v6addrs.src, &iph->saddr,
570                                sizeof(key_addrs->v6addrs.src));
571                         memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
572                                sizeof(key_addrs->v6addrs.dst));
573                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
574                 }
575
576                 if ((dissector_uses_key(flow_dissector,
577                                         FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
578                      (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
579                     ip6_flowlabel(iph)) {
580                         __be32 flow_label = ip6_flowlabel(iph);
581
582                         if (dissector_uses_key(flow_dissector,
583                                                FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
584                                 key_tags = skb_flow_dissector_target(flow_dissector,
585                                                                      FLOW_DISSECTOR_KEY_FLOW_LABEL,
586                                                                      target_container);
587                                 key_tags->flow_label = ntohl(flow_label);
588                         }
589                         if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
590                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
591                                 break;
592                         }
593                 }
594
595                 __skb_flow_dissect_ipv6(skb, flow_dissector,
596                                         target_container, data, iph);
597
598                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
599                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
600
601                 break;
602         }
603         case htons(ETH_P_8021AD):
604         case htons(ETH_P_8021Q): {
605                 const struct vlan_hdr *vlan;
606                 struct vlan_hdr _vlan;
607                 bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
608
609                 if (vlan_tag_present)
610                         proto = skb->protocol;
611
612                 if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
613                         vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
614                                                     data, hlen, &_vlan);
615                         if (!vlan) {
616                                 fdret = FLOW_DISSECT_RET_OUT_BAD;
617                                 break;
618                         }
619
620                         proto = vlan->h_vlan_encapsulated_proto;
621                         nhoff += sizeof(*vlan);
622                         if (skip_vlan) {
623                                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
624                                 break;
625                         }
626                 }
627
628                 skip_vlan = true;
629                 if (dissector_uses_key(flow_dissector,
630                                        FLOW_DISSECTOR_KEY_VLAN)) {
631                         key_vlan = skb_flow_dissector_target(flow_dissector,
632                                                              FLOW_DISSECTOR_KEY_VLAN,
633                                                              target_container);
634
635                         if (vlan_tag_present) {
636                                 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
637                                 key_vlan->vlan_priority =
638                                         (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
639                         } else {
640                                 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
641                                         VLAN_VID_MASK;
642                                 key_vlan->vlan_priority =
643                                         (ntohs(vlan->h_vlan_TCI) &
644                                          VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
645                         }
646                 }
647
648                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
649                 break;
650         }
651         case htons(ETH_P_PPP_SES): {
652                 struct {
653                         struct pppoe_hdr hdr;
654                         __be16 proto;
655                 } *hdr, _hdr;
656                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
657                 if (!hdr) {
658                         fdret = FLOW_DISSECT_RET_OUT_BAD;
659                         break;
660                 }
661
662                 proto = hdr->proto;
663                 nhoff += PPPOE_SES_HLEN;
664                 switch (proto) {
665                 case htons(PPP_IP):
666                         proto = htons(ETH_P_IP);
667                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
668                         break;
669                 case htons(PPP_IPV6):
670                         proto = htons(ETH_P_IPV6);
671                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
672                         break;
673                 default:
674                         fdret = FLOW_DISSECT_RET_OUT_BAD;
675                         break;
676                 }
677                 break;
678         }
679         case htons(ETH_P_TIPC): {
680                 struct {
681                         __be32 pre[3];
682                         __be32 srcnode;
683                 } *hdr, _hdr;
684                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
685                 if (!hdr) {
686                         fdret = FLOW_DISSECT_RET_OUT_BAD;
687                         break;
688                 }
689
690                 if (dissector_uses_key(flow_dissector,
691                                        FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
692                         key_addrs = skb_flow_dissector_target(flow_dissector,
693                                                               FLOW_DISSECTOR_KEY_TIPC_ADDRS,
694                                                               target_container);
695                         key_addrs->tipcaddrs.srcnode = hdr->srcnode;
696                         key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
697                 }
698                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
699                 break;
700         }
701
702         case htons(ETH_P_MPLS_UC):
703         case htons(ETH_P_MPLS_MC):
704                 fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
705                                                 target_container, data,
706                                                 nhoff, hlen);
707                 break;
708         case htons(ETH_P_FCOE):
709                 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
710                         fdret = FLOW_DISSECT_RET_OUT_BAD;
711                         break;
712                 }
713
714                 nhoff += FCOE_HEADER_LEN;
715                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
716                 break;
717
718         case htons(ETH_P_ARP):
719         case htons(ETH_P_RARP):
720                 fdret = __skb_flow_dissect_arp(skb, flow_dissector,
721                                                target_container, data,
722                                                nhoff, hlen);
723                 break;
724
725         default:
726                 fdret = FLOW_DISSECT_RET_OUT_BAD;
727                 break;
728         }
729
730         /* Process result of proto processing */
731         switch (fdret) {
732         case FLOW_DISSECT_RET_OUT_GOOD:
733                 goto out_good;
734         case FLOW_DISSECT_RET_PROTO_AGAIN:
735                 if (skb_flow_dissect_allowed(&num_hdrs))
736                         goto proto_again;
737                 goto out_good;
738         case FLOW_DISSECT_RET_CONTINUE:
739         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
740                 break;
741         case FLOW_DISSECT_RET_OUT_BAD:
742         default:
743                 goto out_bad;
744         }
745
746 ip_proto_again:
747         fdret = FLOW_DISSECT_RET_CONTINUE;
748
749         switch (ip_proto) {
750         case IPPROTO_GRE:
751                 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
752                                                target_container, data,
753                                                &proto, &nhoff, &hlen, flags);
754                 break;
755
756         case NEXTHDR_HOP:
757         case NEXTHDR_ROUTING:
758         case NEXTHDR_DEST: {
759                 u8 _opthdr[2], *opthdr;
760
761                 if (proto != htons(ETH_P_IPV6))
762                         break;
763
764                 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
765                                               data, hlen, &_opthdr);
766                 if (!opthdr) {
767                         fdret = FLOW_DISSECT_RET_OUT_BAD;
768                         break;
769                 }
770
771                 ip_proto = opthdr[0];
772                 nhoff += (opthdr[1] + 1) << 3;
773
774                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
775                 break;
776         }
777         case NEXTHDR_FRAGMENT: {
778                 struct frag_hdr _fh, *fh;
779
780                 if (proto != htons(ETH_P_IPV6))
781                         break;
782
783                 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
784                                           data, hlen, &_fh);
785
786                 if (!fh) {
787                         fdret = FLOW_DISSECT_RET_OUT_BAD;
788                         break;
789                 }
790
791                 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
792
793                 nhoff += sizeof(_fh);
794                 ip_proto = fh->nexthdr;
795
796                 if (!(fh->frag_off & htons(IP6_OFFSET))) {
797                         key_control->flags |= FLOW_DIS_FIRST_FRAG;
798                         if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
799                                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
800                                 break;
801                         }
802                 }
803
804                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
805                 break;
806         }
807         case IPPROTO_IPIP:
808                 proto = htons(ETH_P_IP);
809
810                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
811                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
812                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
813                         break;
814                 }
815
816                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
817                 break;
818
819         case IPPROTO_IPV6:
820                 proto = htons(ETH_P_IPV6);
821
822                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
823                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
824                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
825                         break;
826                 }
827
828                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
829                 break;
830
831
832         case IPPROTO_MPLS:
833                 proto = htons(ETH_P_MPLS_UC);
834                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
835                 break;
836
837         case IPPROTO_TCP:
838                 __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
839                                        data, nhoff, hlen);
840                 break;
841
842         default:
843                 break;
844         }
845
846         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
847             !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
848                 key_ports = skb_flow_dissector_target(flow_dissector,
849                                                       FLOW_DISSECTOR_KEY_PORTS,
850                                                       target_container);
851                 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
852                                                         data, hlen);
853         }
854
855         if (dissector_uses_key(flow_dissector,
856                                FLOW_DISSECTOR_KEY_ICMP)) {
857                 key_icmp = skb_flow_dissector_target(flow_dissector,
858                                                      FLOW_DISSECTOR_KEY_ICMP,
859                                                      target_container);
860                 key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
861         }
862
863         /* Process result of IP proto processing */
864         switch (fdret) {
865         case FLOW_DISSECT_RET_PROTO_AGAIN:
866                 if (skb_flow_dissect_allowed(&num_hdrs))
867                         goto proto_again;
868                 break;
869         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
870                 if (skb_flow_dissect_allowed(&num_hdrs))
871                         goto ip_proto_again;
872                 break;
873         case FLOW_DISSECT_RET_OUT_GOOD:
874         case FLOW_DISSECT_RET_CONTINUE:
875                 break;
876         case FLOW_DISSECT_RET_OUT_BAD:
877         default:
878                 goto out_bad;
879         }
880
881 out_good:
882         ret = true;
883
884 out:
885         key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
886         key_basic->n_proto = proto;
887         key_basic->ip_proto = ip_proto;
888
889         return ret;
890
891 out_bad:
892         ret = false;
893         goto out;
894 }
895 EXPORT_SYMBOL(__skb_flow_dissect);
896
897 static siphash_key_t hashrnd __read_mostly;
898 static __always_inline void __flow_hash_secret_init(void)
899 {
900         net_get_random_once(&hashrnd, sizeof(hashrnd));
901 }
902
903 static const void *flow_keys_hash_start(const struct flow_keys *flow)
904 {
905         BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
906         return &flow->FLOW_KEYS_HASH_START_FIELD;
907 }
908
909 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
910 {
911         size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET;
912
913         switch (flow->control.addr_type) {
914         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
915                 len += sizeof(flow->addrs.v4addrs);
916                 break;
917         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
918                 len += sizeof(flow->addrs.v6addrs);
919                 break;
920         case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
921                 len += sizeof(flow->addrs.tipcaddrs);
922                 break;
923         }
924         return len;
925 }
926
927 __be32 flow_get_u32_src(const struct flow_keys *flow)
928 {
929         switch (flow->control.addr_type) {
930         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
931                 return flow->addrs.v4addrs.src;
932         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
933                 return (__force __be32)ipv6_addr_hash(
934                         &flow->addrs.v6addrs.src);
935         case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
936                 return flow->addrs.tipcaddrs.srcnode;
937         default:
938                 return 0;
939         }
940 }
941 EXPORT_SYMBOL(flow_get_u32_src);
942
943 __be32 flow_get_u32_dst(const struct flow_keys *flow)
944 {
945         switch (flow->control.addr_type) {
946         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
947                 return flow->addrs.v4addrs.dst;
948         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
949                 return (__force __be32)ipv6_addr_hash(
950                         &flow->addrs.v6addrs.dst);
951         default:
952                 return 0;
953         }
954 }
955 EXPORT_SYMBOL(flow_get_u32_dst);
956
957 static inline void __flow_hash_consistentify(struct flow_keys *keys)
958 {
959         int addr_diff, i;
960
961         switch (keys->control.addr_type) {
962         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
963                 addr_diff = (__force u32)keys->addrs.v4addrs.dst -
964                             (__force u32)keys->addrs.v4addrs.src;
965                 if ((addr_diff < 0) ||
966                     (addr_diff == 0 &&
967                      ((__force u16)keys->ports.dst <
968                       (__force u16)keys->ports.src))) {
969                         swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
970                         swap(keys->ports.src, keys->ports.dst);
971                 }
972                 break;
973         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
974                 addr_diff = memcmp(&keys->addrs.v6addrs.dst,
975                                    &keys->addrs.v6addrs.src,
976                                    sizeof(keys->addrs.v6addrs.dst));
977                 if ((addr_diff < 0) ||
978                     (addr_diff == 0 &&
979                      ((__force u16)keys->ports.dst <
980                       (__force u16)keys->ports.src))) {
981                         for (i = 0; i < 4; i++)
982                                 swap(keys->addrs.v6addrs.src.s6_addr32[i],
983                                      keys->addrs.v6addrs.dst.s6_addr32[i]);
984                         swap(keys->ports.src, keys->ports.dst);
985                 }
986                 break;
987         }
988 }
989
990 static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
991                                         const siphash_key_t *keyval)
992 {
993         u32 hash;
994
995         __flow_hash_consistentify(keys);
996
997         hash = siphash(flow_keys_hash_start(keys),
998                        flow_keys_hash_length(keys), keyval);
999         if (!hash)
1000                 hash = 1;
1001
1002         return hash;
1003 }
1004
1005 u32 flow_hash_from_keys(struct flow_keys *keys)
1006 {
1007         __flow_hash_secret_init();
1008         return __flow_hash_from_keys(keys, &hashrnd);
1009 }
1010 EXPORT_SYMBOL(flow_hash_from_keys);
1011
1012 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1013                                   struct flow_keys *keys,
1014                                   const siphash_key_t *keyval)
1015 {
1016         skb_flow_dissect_flow_keys(skb, keys,
1017                                    FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1018
1019         return __flow_hash_from_keys(keys, keyval);
1020 }
1021
1022 struct _flow_keys_digest_data {
1023         __be16  n_proto;
1024         u8      ip_proto;
1025         u8      padding;
1026         __be32  ports;
1027         __be32  src;
1028         __be32  dst;
1029 };
1030
1031 void make_flow_keys_digest(struct flow_keys_digest *digest,
1032                            const struct flow_keys *flow)
1033 {
1034         struct _flow_keys_digest_data *data =
1035             (struct _flow_keys_digest_data *)digest;
1036
1037         BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1038
1039         memset(digest, 0, sizeof(*digest));
1040
1041         data->n_proto = flow->basic.n_proto;
1042         data->ip_proto = flow->basic.ip_proto;
1043         data->ports = flow->ports.ports;
1044         data->src = flow->addrs.v4addrs.src;
1045         data->dst = flow->addrs.v4addrs.dst;
1046 }
1047 EXPORT_SYMBOL(make_flow_keys_digest);
1048
1049 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1050
1051 u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1052 {
1053         struct flow_keys keys;
1054
1055         __flow_hash_secret_init();
1056
1057         memset(&keys, 0, sizeof(keys));
1058         __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
1059                            NULL, 0, 0, 0,
1060                            FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1061
1062         return __flow_hash_from_keys(&keys, &hashrnd);
1063 }
1064 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1065
1066 /**
1067  * __skb_get_hash: calculate a flow hash
1068  * @skb: sk_buff to calculate flow hash from
1069  *
1070  * This function calculates a flow hash based on src/dst addresses
1071  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1072  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1073  * if hash is a canonical 4-tuple hash over transport ports.
1074  */
1075 void __skb_get_hash(struct sk_buff *skb)
1076 {
1077         struct flow_keys keys;
1078         u32 hash;
1079
1080         __flow_hash_secret_init();
1081
1082         hash = ___skb_get_hash(skb, &keys, &hashrnd);
1083
1084         __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1085 }
1086 EXPORT_SYMBOL(__skb_get_hash);
1087
1088 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1089                            const siphash_key_t *perturb)
1090 {
1091         struct flow_keys keys;
1092
1093         return ___skb_get_hash(skb, &keys, perturb);
1094 }
1095 EXPORT_SYMBOL(skb_get_hash_perturb);
1096
1097 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1098                    const struct flow_keys *keys, int hlen)
1099 {
1100         u32 poff = keys->control.thoff;
1101
1102         /* skip L4 headers for fragments after the first */
1103         if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1104             !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1105                 return poff;
1106
1107         switch (keys->basic.ip_proto) {
1108         case IPPROTO_TCP: {
1109                 /* access doff as u8 to avoid unaligned access */
1110                 const u8 *doff;
1111                 u8 _doff;
1112
1113                 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1114                                             data, hlen, &_doff);
1115                 if (!doff)
1116                         return poff;
1117
1118                 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1119                 break;
1120         }
1121         case IPPROTO_UDP:
1122         case IPPROTO_UDPLITE:
1123                 poff += sizeof(struct udphdr);
1124                 break;
1125         /* For the rest, we do not really care about header
1126          * extensions at this point for now.
1127          */
1128         case IPPROTO_ICMP:
1129                 poff += sizeof(struct icmphdr);
1130                 break;
1131         case IPPROTO_ICMPV6:
1132                 poff += sizeof(struct icmp6hdr);
1133                 break;
1134         case IPPROTO_IGMP:
1135                 poff += sizeof(struct igmphdr);
1136                 break;
1137         case IPPROTO_DCCP:
1138                 poff += sizeof(struct dccp_hdr);
1139                 break;
1140         case IPPROTO_SCTP:
1141                 poff += sizeof(struct sctphdr);
1142                 break;
1143         }
1144
1145         return poff;
1146 }
1147
1148 /**
1149  * skb_get_poff - get the offset to the payload
1150  * @skb: sk_buff to get the payload offset from
1151  *
1152  * The function will get the offset to the payload as far as it could
1153  * be dissected.  The main user is currently BPF, so that we can dynamically
1154  * truncate packets without needing to push actual payload to the user
1155  * space and can analyze headers only, instead.
1156  */
1157 u32 skb_get_poff(const struct sk_buff *skb)
1158 {
1159         struct flow_keys keys;
1160
1161         if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
1162                 return 0;
1163
1164         return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1165 }
1166
1167 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1168 {
1169         memset(keys, 0, sizeof(*keys));
1170
1171         memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1172             sizeof(keys->addrs.v6addrs.src));
1173         memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1174             sizeof(keys->addrs.v6addrs.dst));
1175         keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1176         keys->ports.src = fl6->fl6_sport;
1177         keys->ports.dst = fl6->fl6_dport;
1178         keys->keyid.keyid = fl6->fl6_gre_key;
1179         keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1180         keys->basic.ip_proto = fl6->flowi6_proto;
1181
1182         return flow_hash_from_keys(keys);
1183 }
1184 EXPORT_SYMBOL(__get_hash_from_flowi6);
1185
1186 __u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
1187 {
1188         memset(keys, 0, sizeof(*keys));
1189
1190         keys->addrs.v4addrs.src = fl4->saddr;
1191         keys->addrs.v4addrs.dst = fl4->daddr;
1192         keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1193         keys->ports.src = fl4->fl4_sport;
1194         keys->ports.dst = fl4->fl4_dport;
1195         keys->keyid.keyid = fl4->fl4_gre_key;
1196         keys->basic.ip_proto = fl4->flowi4_proto;
1197
1198         return flow_hash_from_keys(keys);
1199 }
1200 EXPORT_SYMBOL(__get_hash_from_flowi4);
1201
1202 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1203         {
1204                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1205                 .offset = offsetof(struct flow_keys, control),
1206         },
1207         {
1208                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1209                 .offset = offsetof(struct flow_keys, basic),
1210         },
1211         {
1212                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1213                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1214         },
1215         {
1216                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1217                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1218         },
1219         {
1220                 .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
1221                 .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
1222         },
1223         {
1224                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1225                 .offset = offsetof(struct flow_keys, ports),
1226         },
1227         {
1228                 .key_id = FLOW_DISSECTOR_KEY_VLAN,
1229                 .offset = offsetof(struct flow_keys, vlan),
1230         },
1231         {
1232                 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1233                 .offset = offsetof(struct flow_keys, tags),
1234         },
1235         {
1236                 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1237                 .offset = offsetof(struct flow_keys, keyid),
1238         },
1239 };
1240
1241 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1242         {
1243                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1244                 .offset = offsetof(struct flow_keys, control),
1245         },
1246         {
1247                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1248                 .offset = offsetof(struct flow_keys, basic),
1249         },
1250         {
1251                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1252                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1253         },
1254         {
1255                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1256                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1257         },
1258         {
1259                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1260                 .offset = offsetof(struct flow_keys, ports),
1261         },
1262 };
1263
1264 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
1265         {
1266                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1267                 .offset = offsetof(struct flow_keys, control),
1268         },
1269         {
1270                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1271                 .offset = offsetof(struct flow_keys, basic),
1272         },
1273 };
1274
1275 struct flow_dissector flow_keys_dissector __read_mostly;
1276 EXPORT_SYMBOL(flow_keys_dissector);
1277
1278 struct flow_dissector flow_keys_buf_dissector __read_mostly;
1279
1280 static int __init init_default_flow_dissectors(void)
1281 {
1282         skb_flow_dissector_init(&flow_keys_dissector,
1283                                 flow_keys_dissector_keys,
1284                                 ARRAY_SIZE(flow_keys_dissector_keys));
1285         skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1286                                 flow_keys_dissector_symmetric_keys,
1287                                 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1288         skb_flow_dissector_init(&flow_keys_buf_dissector,
1289                                 flow_keys_buf_dissector_keys,
1290                                 ARRAY_SIZE(flow_keys_buf_dissector_keys));
1291         return 0;
1292 }
1293
1294 core_initcall(init_default_flow_dissectors);