2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
13 #include <asm/types.h>
14 #include <linux/types.h>
26 #include <sys/capability.h>
28 #include <linux/unistd.h>
29 #include <linux/filter.h>
30 #include <linux/bpf_perf_event.h>
31 #include <linux/bpf.h>
32 #include <linux/if_ether.h>
37 # include "autoconf.h"
39 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
43 #include "bpf_rlimit.h"
46 #include "../../../include/linux/filter.h"
48 #define MAX_INSNS BPF_MAXINSNS
51 #define POINTER_VALUE 0xcafe4all
52 #define TEST_DATA_LEN 64
54 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
55 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
58 static bool unpriv_disabled = false;
62 struct bpf_insn insns[MAX_INSNS];
63 int fixup_map1[MAX_FIXUPS];
64 int fixup_map2[MAX_FIXUPS];
65 int fixup_map3[MAX_FIXUPS];
66 int fixup_map4[MAX_FIXUPS];
67 int fixup_prog1[MAX_FIXUPS];
68 int fixup_prog2[MAX_FIXUPS];
69 int fixup_map_in_map[MAX_FIXUPS];
70 int fixup_cgroup_storage[MAX_FIXUPS];
72 const char *errstr_unpriv;
73 uint32_t retval, retval_unpriv;
78 } result, result_unpriv;
79 enum bpf_prog_type prog_type;
81 __u8 data[TEST_DATA_LEN];
82 void (*fill_helper)(struct bpf_test *self);
85 /* Note we want this to be 64 bit aligned so that the end of our array is
86 * actually the end of the structure.
88 #define MAX_ENTRIES 11
100 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
102 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
104 unsigned int len = BPF_MAXINSNS;
105 struct bpf_insn *insn = self->insns;
108 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
110 for (j = 0; j < PUSH_CNT; j++) {
111 insn[i++] = BPF_LD_ABS(BPF_B, 0);
112 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
114 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
115 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
116 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
117 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
118 BPF_FUNC_skb_vlan_push),
119 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
123 for (j = 0; j < PUSH_CNT; j++) {
124 insn[i++] = BPF_LD_ABS(BPF_B, 0);
125 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
127 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
128 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
129 BPF_FUNC_skb_vlan_pop),
130 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
136 for (; i < len - 1; i++)
137 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
138 insn[len - 1] = BPF_EXIT_INSN();
141 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
143 struct bpf_insn *insn = self->insns;
144 unsigned int len = BPF_MAXINSNS;
147 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
148 insn[i++] = BPF_LD_ABS(BPF_B, 0);
149 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
152 insn[i++] = BPF_LD_ABS(BPF_B, 1);
153 insn[i] = BPF_EXIT_INSN();
156 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
158 struct bpf_insn *insn = self->insns;
162 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
163 while (i < self->retval) {
164 uint64_t val = bpf_semi_rand_get();
165 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
170 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
172 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
173 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
174 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
175 insn[i] = BPF_EXIT_INSN();
177 self->retval = (uint32_t)res;
180 static struct bpf_test tests[] = {
184 BPF_MOV64_IMM(BPF_REG_1, 1),
185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
186 BPF_MOV64_IMM(BPF_REG_2, 3),
187 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
189 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
190 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
197 "DIV32 by 0, zero check 1",
199 BPF_MOV32_IMM(BPF_REG_0, 42),
200 BPF_MOV32_IMM(BPF_REG_1, 0),
201 BPF_MOV32_IMM(BPF_REG_2, 1),
202 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
209 "DIV32 by 0, zero check 2",
211 BPF_MOV32_IMM(BPF_REG_0, 42),
212 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
213 BPF_MOV32_IMM(BPF_REG_2, 1),
214 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
221 "DIV64 by 0, zero check",
223 BPF_MOV32_IMM(BPF_REG_0, 42),
224 BPF_MOV32_IMM(BPF_REG_1, 0),
225 BPF_MOV32_IMM(BPF_REG_2, 1),
226 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
233 "MOD32 by 0, zero check 1",
235 BPF_MOV32_IMM(BPF_REG_0, 42),
236 BPF_MOV32_IMM(BPF_REG_1, 0),
237 BPF_MOV32_IMM(BPF_REG_2, 1),
238 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
245 "MOD32 by 0, zero check 2",
247 BPF_MOV32_IMM(BPF_REG_0, 42),
248 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
249 BPF_MOV32_IMM(BPF_REG_2, 1),
250 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
257 "MOD64 by 0, zero check",
259 BPF_MOV32_IMM(BPF_REG_0, 42),
260 BPF_MOV32_IMM(BPF_REG_1, 0),
261 BPF_MOV32_IMM(BPF_REG_2, 1),
262 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
269 "DIV32 by 0, zero check ok, cls",
271 BPF_MOV32_IMM(BPF_REG_0, 42),
272 BPF_MOV32_IMM(BPF_REG_1, 2),
273 BPF_MOV32_IMM(BPF_REG_2, 16),
274 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
275 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
278 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
283 "DIV32 by 0, zero check 1, cls",
285 BPF_MOV32_IMM(BPF_REG_1, 0),
286 BPF_MOV32_IMM(BPF_REG_0, 1),
287 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
290 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
295 "DIV32 by 0, zero check 2, cls",
297 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
298 BPF_MOV32_IMM(BPF_REG_0, 1),
299 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
307 "DIV64 by 0, zero check, cls",
309 BPF_MOV32_IMM(BPF_REG_1, 0),
310 BPF_MOV32_IMM(BPF_REG_0, 1),
311 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
319 "MOD32 by 0, zero check ok, cls",
321 BPF_MOV32_IMM(BPF_REG_0, 42),
322 BPF_MOV32_IMM(BPF_REG_1, 3),
323 BPF_MOV32_IMM(BPF_REG_2, 5),
324 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
325 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
328 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
333 "MOD32 by 0, zero check 1, cls",
335 BPF_MOV32_IMM(BPF_REG_1, 0),
336 BPF_MOV32_IMM(BPF_REG_0, 1),
337 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
345 "MOD32 by 0, zero check 2, cls",
347 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
348 BPF_MOV32_IMM(BPF_REG_0, 1),
349 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
352 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
357 "MOD64 by 0, zero check 1, cls",
359 BPF_MOV32_IMM(BPF_REG_1, 0),
360 BPF_MOV32_IMM(BPF_REG_0, 2),
361 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
364 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
369 "MOD64 by 0, zero check 2, cls",
371 BPF_MOV32_IMM(BPF_REG_1, 0),
372 BPF_MOV32_IMM(BPF_REG_0, -1),
373 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
380 /* Just make sure that JITs used udiv/umod as otherwise we get
381 * an exception from INT_MIN/-1 overflow similarly as with div
385 "DIV32 overflow, check 1",
387 BPF_MOV32_IMM(BPF_REG_1, -1),
388 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
389 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
392 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
397 "DIV32 overflow, check 2",
399 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
400 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
408 "DIV64 overflow, check 1",
410 BPF_MOV64_IMM(BPF_REG_1, -1),
411 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
412 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
415 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
420 "DIV64 overflow, check 2",
422 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
423 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
431 "MOD32 overflow, check 1",
433 BPF_MOV32_IMM(BPF_REG_1, -1),
434 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
435 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
438 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
443 "MOD32 overflow, check 2",
445 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
446 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
454 "MOD64 overflow, check 1",
456 BPF_MOV64_IMM(BPF_REG_1, -1),
457 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
458 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
459 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
460 BPF_MOV32_IMM(BPF_REG_0, 0),
461 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
462 BPF_MOV32_IMM(BPF_REG_0, 1),
465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
470 "MOD64 overflow, check 2",
472 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
473 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
474 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
475 BPF_MOV32_IMM(BPF_REG_0, 0),
476 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
477 BPF_MOV32_IMM(BPF_REG_0, 1),
480 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
485 "xor32 zero extend check",
487 BPF_MOV32_IMM(BPF_REG_2, -1),
488 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
489 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
490 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
491 BPF_MOV32_IMM(BPF_REG_0, 2),
492 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
493 BPF_MOV32_IMM(BPF_REG_0, 1),
496 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
504 .errstr = "unknown opcode 00",
512 .errstr = "R0 !read_ok",
521 .errstr = "unreachable",
527 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
528 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
531 .errstr = "unreachable",
537 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
540 .errstr = "jump out of range",
544 "out of range jump2",
546 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
549 .errstr = "jump out of range",
555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
556 BPF_LD_IMM64(BPF_REG_0, 0),
557 BPF_LD_IMM64(BPF_REG_0, 0),
558 BPF_LD_IMM64(BPF_REG_0, 1),
559 BPF_LD_IMM64(BPF_REG_0, 1),
560 BPF_MOV64_IMM(BPF_REG_0, 2),
563 .errstr = "invalid BPF_LD_IMM insn",
564 .errstr_unpriv = "R1 pointer comparison",
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
571 BPF_LD_IMM64(BPF_REG_0, 0),
572 BPF_LD_IMM64(BPF_REG_0, 0),
573 BPF_LD_IMM64(BPF_REG_0, 1),
574 BPF_LD_IMM64(BPF_REG_0, 1),
577 .errstr = "invalid BPF_LD_IMM insn",
578 .errstr_unpriv = "R1 pointer comparison",
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
585 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
586 BPF_LD_IMM64(BPF_REG_0, 0),
587 BPF_LD_IMM64(BPF_REG_0, 0),
588 BPF_LD_IMM64(BPF_REG_0, 1),
589 BPF_LD_IMM64(BPF_REG_0, 1),
592 .errstr = "invalid bpf_ld_imm64 insn",
598 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
601 .errstr = "invalid bpf_ld_imm64 insn",
607 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
609 .errstr = "invalid bpf_ld_imm64 insn",
615 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
616 BPF_RAW_INSN(0, 0, 0, 0, 0),
624 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
625 BPF_RAW_INSN(0, 0, 0, 0, 1),
634 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
635 BPF_RAW_INSN(0, 0, 0, 0, 1),
638 .errstr = "uses reserved fields",
644 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
645 BPF_RAW_INSN(0, 0, 0, 1, 1),
648 .errstr = "invalid bpf_ld_imm64 insn",
654 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
655 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
658 .errstr = "invalid bpf_ld_imm64 insn",
664 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
665 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
668 .errstr = "invalid bpf_ld_imm64 insn",
674 BPF_MOV64_IMM(BPF_REG_1, 0),
675 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
676 BPF_RAW_INSN(0, 0, 0, 0, 1),
679 .errstr = "not pointing to valid bpf_map",
685 BPF_MOV64_IMM(BPF_REG_1, 0),
686 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
687 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 .errstr = "invalid bpf_ld_imm64 insn",
696 BPF_MOV64_IMM(BPF_REG_0, 1),
697 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
701 .errstr = "unknown opcode c4",
706 BPF_MOV64_IMM(BPF_REG_0, 1),
707 BPF_MOV64_IMM(BPF_REG_1, 5),
708 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
712 .errstr = "unknown opcode cc",
717 BPF_MOV64_IMM(BPF_REG_0, 1),
718 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
726 BPF_MOV64_IMM(BPF_REG_0, 1),
727 BPF_MOV64_IMM(BPF_REG_1, 5),
728 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
736 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
738 .errstr = "not an exit",
744 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
747 .errstr = "back-edge",
753 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
755 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
756 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
759 .errstr = "back-edge",
765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
767 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
771 .errstr = "back-edge",
775 "read uninitialized register",
777 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
780 .errstr = "R2 !read_ok",
784 "read invalid register",
786 BPF_MOV64_REG(BPF_REG_0, -1),
789 .errstr = "R15 is invalid",
793 "program doesn't init R0 before exit",
795 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
798 .errstr = "R0 !read_ok",
802 "program doesn't init R0 before exit in all branches",
804 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
805 BPF_MOV64_IMM(BPF_REG_0, 1),
806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
809 .errstr = "R0 !read_ok",
810 .errstr_unpriv = "R1 pointer comparison",
814 "stack out of bounds",
816 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
819 .errstr = "invalid stack",
823 "invalid call insn1",
825 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
828 .errstr = "unknown opcode 8d",
832 "invalid call insn2",
834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
837 .errstr = "BPF_CALL uses reserved",
841 "invalid function call",
843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
846 .errstr = "invalid func unknown#1234567",
850 "uninitialized stack1",
852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
854 BPF_LD_MAP_FD(BPF_REG_1, 0),
855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
856 BPF_FUNC_map_lookup_elem),
860 .errstr = "invalid indirect read from stack",
864 "uninitialized stack2",
866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
867 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
870 .errstr = "invalid read from stack",
874 "invalid fp arithmetic",
875 /* If this gets ever changed, make sure JITs can deal with it. */
877 BPF_MOV64_IMM(BPF_REG_0, 0),
878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
879 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
880 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
883 .errstr = "R1 subtraction from stack pointer",
887 "non-invalid fp arithmetic",
889 BPF_MOV64_IMM(BPF_REG_0, 0),
890 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
896 "invalid argument register",
898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
899 BPF_FUNC_get_cgroup_classid),
900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
901 BPF_FUNC_get_cgroup_classid),
904 .errstr = "R1 !read_ok",
906 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
909 "non-invalid argument register",
911 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
913 BPF_FUNC_get_cgroup_classid),
914 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
915 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
916 BPF_FUNC_get_cgroup_classid),
920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
923 "check valid spill/fill",
925 /* spill R1(ctx) into stack */
926 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
927 /* fill it back into R2 */
928 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
929 /* should be able to access R0 = *(R2 + 8) */
930 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
931 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
934 .errstr_unpriv = "R0 leaks addr",
936 .result_unpriv = REJECT,
937 .retval = POINTER_VALUE,
940 "check valid spill/fill, skb mark",
942 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
943 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
944 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
945 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
946 offsetof(struct __sk_buff, mark)),
950 .result_unpriv = ACCEPT,
953 "check corrupted spill/fill",
955 /* spill R1(ctx) into stack */
956 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
957 /* mess up with R1 pointer on stack */
958 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
959 /* fill back into R0 is fine for priv.
960 * R0 now becomes SCALAR_VALUE.
962 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
963 /* Load from R0 should fail. */
964 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
967 .errstr_unpriv = "attempt to corrupt spilled",
968 .errstr = "R0 invalid mem access 'inv",
970 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
973 "check corrupted spill/fill, LSB",
975 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
976 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
977 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
980 .errstr_unpriv = "attempt to corrupt spilled",
981 .result_unpriv = REJECT,
983 .retval = POINTER_VALUE,
986 "check corrupted spill/fill, MSB",
988 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
989 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
990 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
993 .errstr_unpriv = "attempt to corrupt spilled",
994 .result_unpriv = REJECT,
996 .retval = POINTER_VALUE,
999 "invalid src register in STX",
1001 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1004 .errstr = "R15 is invalid",
1008 "invalid dst register in STX",
1010 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1013 .errstr = "R14 is invalid",
1017 "invalid dst register in ST",
1019 BPF_ST_MEM(BPF_B, 14, -1, -1),
1022 .errstr = "R14 is invalid",
1026 "invalid src register in LDX",
1028 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1031 .errstr = "R12 is invalid",
1035 "invalid dst register in LDX",
1037 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1040 .errstr = "R11 is invalid",
1046 BPF_RAW_INSN(0, 0, 0, 0, 0),
1049 .errstr = "unknown opcode 00",
1055 BPF_RAW_INSN(1, 0, 0, 0, 0),
1058 .errstr = "BPF_LDX uses reserved fields",
1064 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1067 .errstr = "unknown opcode ff",
1073 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1076 .errstr = "unknown opcode ff",
1082 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1085 .errstr = "BPF_ALU uses reserved fields",
1089 "misaligned read from stack",
1091 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1092 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1095 .errstr = "misaligned stack access",
1099 "invalid map_fd for function call",
1101 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1102 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1104 BPF_LD_MAP_FD(BPF_REG_1, 0),
1105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1106 BPF_FUNC_map_delete_elem),
1109 .errstr = "fd 0 is not pointing to valid bpf_map",
1113 "don't check return value before access",
1115 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1116 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1118 BPF_LD_MAP_FD(BPF_REG_1, 0),
1119 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1120 BPF_FUNC_map_lookup_elem),
1121 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1124 .fixup_map1 = { 3 },
1125 .errstr = "R0 invalid mem access 'map_value_or_null'",
1129 "access memory with incorrect alignment",
1131 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1132 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1134 BPF_LD_MAP_FD(BPF_REG_1, 0),
1135 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1136 BPF_FUNC_map_lookup_elem),
1137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1138 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1141 .fixup_map1 = { 3 },
1142 .errstr = "misaligned value access",
1144 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1147 "sometimes access memory with incorrect alignment",
1149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1152 BPF_LD_MAP_FD(BPF_REG_1, 0),
1153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1154 BPF_FUNC_map_lookup_elem),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1156 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1158 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1161 .fixup_map1 = { 3 },
1162 .errstr = "R0 invalid mem access",
1163 .errstr_unpriv = "R0 leaks addr",
1165 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1170 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1171 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1173 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1175 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1177 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1179 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1181 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1182 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1183 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1184 BPF_MOV64_IMM(BPF_REG_0, 0),
1187 .errstr_unpriv = "R1 pointer comparison",
1188 .result_unpriv = REJECT,
1194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1196 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1197 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1199 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1201 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1202 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1203 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1205 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1207 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1208 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1209 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1211 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1212 BPF_MOV64_IMM(BPF_REG_0, 0),
1215 .errstr_unpriv = "R1 pointer comparison",
1216 .result_unpriv = REJECT,
1222 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1224 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1226 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1228 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1230 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1231 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1232 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1234 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1236 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1238 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1240 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1242 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1243 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1244 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1246 BPF_LD_MAP_FD(BPF_REG_1, 0),
1247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1248 BPF_FUNC_map_delete_elem),
1251 .fixup_map1 = { 24 },
1252 .errstr_unpriv = "R1 pointer comparison",
1253 .result_unpriv = REJECT,
1260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1273 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1278 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1281 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1282 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1290 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1295 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1300 BPF_MOV64_IMM(BPF_REG_0, 0),
1303 .errstr_unpriv = "R1 pointer comparison",
1304 .result_unpriv = REJECT,
1310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1311 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1312 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1313 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1314 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1315 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1316 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1319 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1320 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1321 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1322 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1323 BPF_MOV64_IMM(BPF_REG_0, 0),
1324 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1325 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1326 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1327 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1328 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1329 BPF_MOV64_IMM(BPF_REG_0, 0),
1330 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1331 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1332 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1333 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1334 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1335 BPF_MOV64_IMM(BPF_REG_0, 0),
1336 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1337 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1338 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1339 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1340 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1341 BPF_MOV64_IMM(BPF_REG_0, 0),
1344 .errstr_unpriv = "R1 pointer comparison",
1345 .result_unpriv = REJECT,
1349 "access skb fields ok",
1351 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1352 offsetof(struct __sk_buff, len)),
1353 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1354 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1355 offsetof(struct __sk_buff, mark)),
1356 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1357 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1358 offsetof(struct __sk_buff, pkt_type)),
1359 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1360 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, queue_mapping)),
1362 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1363 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1364 offsetof(struct __sk_buff, protocol)),
1365 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1366 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, vlan_present)),
1368 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1369 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1370 offsetof(struct __sk_buff, vlan_tci)),
1371 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1372 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1373 offsetof(struct __sk_buff, napi_id)),
1374 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1380 "access skb fields bad1",
1382 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1385 .errstr = "invalid bpf_context access",
1389 "access skb fields bad2",
1391 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1392 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1395 BPF_LD_MAP_FD(BPF_REG_1, 0),
1396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1397 BPF_FUNC_map_lookup_elem),
1398 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1401 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1402 offsetof(struct __sk_buff, pkt_type)),
1405 .fixup_map1 = { 4 },
1406 .errstr = "different pointers",
1407 .errstr_unpriv = "R1 pointer comparison",
1411 "access skb fields bad3",
1413 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1414 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1415 offsetof(struct __sk_buff, pkt_type)),
1417 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1418 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1420 BPF_LD_MAP_FD(BPF_REG_1, 0),
1421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1422 BPF_FUNC_map_lookup_elem),
1423 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1425 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1426 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1428 .fixup_map1 = { 6 },
1429 .errstr = "different pointers",
1430 .errstr_unpriv = "R1 pointer comparison",
1434 "access skb fields bad4",
1436 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1437 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1438 offsetof(struct __sk_buff, len)),
1439 BPF_MOV64_IMM(BPF_REG_0, 0),
1441 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1442 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1444 BPF_LD_MAP_FD(BPF_REG_1, 0),
1445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1446 BPF_FUNC_map_lookup_elem),
1447 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1449 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1450 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1452 .fixup_map1 = { 7 },
1453 .errstr = "different pointers",
1454 .errstr_unpriv = "R1 pointer comparison",
1458 "invalid access __sk_buff family",
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, family)),
1464 .errstr = "invalid bpf_context access",
1468 "invalid access __sk_buff remote_ip4",
1470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471 offsetof(struct __sk_buff, remote_ip4)),
1474 .errstr = "invalid bpf_context access",
1478 "invalid access __sk_buff local_ip4",
1480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1481 offsetof(struct __sk_buff, local_ip4)),
1484 .errstr = "invalid bpf_context access",
1488 "invalid access __sk_buff remote_ip6",
1490 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1491 offsetof(struct __sk_buff, remote_ip6)),
1494 .errstr = "invalid bpf_context access",
1498 "invalid access __sk_buff local_ip6",
1500 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1501 offsetof(struct __sk_buff, local_ip6)),
1504 .errstr = "invalid bpf_context access",
1508 "invalid access __sk_buff remote_port",
1510 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1511 offsetof(struct __sk_buff, remote_port)),
1514 .errstr = "invalid bpf_context access",
1518 "invalid access __sk_buff remote_port",
1520 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1521 offsetof(struct __sk_buff, local_port)),
1524 .errstr = "invalid bpf_context access",
1528 "valid access __sk_buff family",
1530 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1531 offsetof(struct __sk_buff, family)),
1535 .prog_type = BPF_PROG_TYPE_SK_SKB,
1538 "valid access __sk_buff remote_ip4",
1540 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1541 offsetof(struct __sk_buff, remote_ip4)),
1545 .prog_type = BPF_PROG_TYPE_SK_SKB,
1548 "valid access __sk_buff local_ip4",
1550 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1551 offsetof(struct __sk_buff, local_ip4)),
1555 .prog_type = BPF_PROG_TYPE_SK_SKB,
1558 "valid access __sk_buff remote_ip6",
1560 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1561 offsetof(struct __sk_buff, remote_ip6[0])),
1562 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1563 offsetof(struct __sk_buff, remote_ip6[1])),
1564 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565 offsetof(struct __sk_buff, remote_ip6[2])),
1566 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1567 offsetof(struct __sk_buff, remote_ip6[3])),
1571 .prog_type = BPF_PROG_TYPE_SK_SKB,
1574 "valid access __sk_buff local_ip6",
1576 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1577 offsetof(struct __sk_buff, local_ip6[0])),
1578 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1579 offsetof(struct __sk_buff, local_ip6[1])),
1580 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1581 offsetof(struct __sk_buff, local_ip6[2])),
1582 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1583 offsetof(struct __sk_buff, local_ip6[3])),
1587 .prog_type = BPF_PROG_TYPE_SK_SKB,
1590 "valid access __sk_buff remote_port",
1592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1593 offsetof(struct __sk_buff, remote_port)),
1597 .prog_type = BPF_PROG_TYPE_SK_SKB,
1600 "valid access __sk_buff remote_port",
1602 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1603 offsetof(struct __sk_buff, local_port)),
1607 .prog_type = BPF_PROG_TYPE_SK_SKB,
1610 "invalid access of tc_classid for SK_SKB",
1612 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1613 offsetof(struct __sk_buff, tc_classid)),
1617 .prog_type = BPF_PROG_TYPE_SK_SKB,
1618 .errstr = "invalid bpf_context access",
1621 "invalid access of skb->mark for SK_SKB",
1623 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1624 offsetof(struct __sk_buff, mark)),
1628 .prog_type = BPF_PROG_TYPE_SK_SKB,
1629 .errstr = "invalid bpf_context access",
1632 "check skb->mark is not writeable by SK_SKB",
1634 BPF_MOV64_IMM(BPF_REG_0, 0),
1635 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1636 offsetof(struct __sk_buff, mark)),
1640 .prog_type = BPF_PROG_TYPE_SK_SKB,
1641 .errstr = "invalid bpf_context access",
1644 "check skb->tc_index is writeable by SK_SKB",
1646 BPF_MOV64_IMM(BPF_REG_0, 0),
1647 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, tc_index)),
1652 .prog_type = BPF_PROG_TYPE_SK_SKB,
1655 "check skb->priority is writeable by SK_SKB",
1657 BPF_MOV64_IMM(BPF_REG_0, 0),
1658 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1659 offsetof(struct __sk_buff, priority)),
1663 .prog_type = BPF_PROG_TYPE_SK_SKB,
1666 "direct packet read for SK_SKB",
1668 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1669 offsetof(struct __sk_buff, data)),
1670 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1671 offsetof(struct __sk_buff, data_end)),
1672 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1674 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1675 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1676 BPF_MOV64_IMM(BPF_REG_0, 0),
1680 .prog_type = BPF_PROG_TYPE_SK_SKB,
1683 "direct packet write for SK_SKB",
1685 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1686 offsetof(struct __sk_buff, data)),
1687 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1688 offsetof(struct __sk_buff, data_end)),
1689 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1691 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1692 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1693 BPF_MOV64_IMM(BPF_REG_0, 0),
1697 .prog_type = BPF_PROG_TYPE_SK_SKB,
1700 "overlapping checks for direct packet access SK_SKB",
1702 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1703 offsetof(struct __sk_buff, data)),
1704 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1705 offsetof(struct __sk_buff, data_end)),
1706 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1708 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1711 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1712 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1713 BPF_MOV64_IMM(BPF_REG_0, 0),
1717 .prog_type = BPF_PROG_TYPE_SK_SKB,
1720 "valid access family in SK_MSG",
1722 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1723 offsetof(struct sk_msg_md, family)),
1727 .prog_type = BPF_PROG_TYPE_SK_MSG,
1730 "valid access remote_ip4 in SK_MSG",
1732 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1733 offsetof(struct sk_msg_md, remote_ip4)),
1737 .prog_type = BPF_PROG_TYPE_SK_MSG,
1740 "valid access local_ip4 in SK_MSG",
1742 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1743 offsetof(struct sk_msg_md, local_ip4)),
1747 .prog_type = BPF_PROG_TYPE_SK_MSG,
1750 "valid access remote_port in SK_MSG",
1752 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1753 offsetof(struct sk_msg_md, remote_port)),
1757 .prog_type = BPF_PROG_TYPE_SK_MSG,
1760 "valid access local_port in SK_MSG",
1762 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1763 offsetof(struct sk_msg_md, local_port)),
1767 .prog_type = BPF_PROG_TYPE_SK_MSG,
1770 "valid access remote_ip6 in SK_MSG",
1772 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1773 offsetof(struct sk_msg_md, remote_ip6[0])),
1774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1775 offsetof(struct sk_msg_md, remote_ip6[1])),
1776 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777 offsetof(struct sk_msg_md, remote_ip6[2])),
1778 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1779 offsetof(struct sk_msg_md, remote_ip6[3])),
1783 .prog_type = BPF_PROG_TYPE_SK_SKB,
1786 "valid access local_ip6 in SK_MSG",
1788 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1789 offsetof(struct sk_msg_md, local_ip6[0])),
1790 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1791 offsetof(struct sk_msg_md, local_ip6[1])),
1792 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1793 offsetof(struct sk_msg_md, local_ip6[2])),
1794 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1795 offsetof(struct sk_msg_md, local_ip6[3])),
1799 .prog_type = BPF_PROG_TYPE_SK_SKB,
1802 "invalid 64B read of family in SK_MSG",
1804 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1805 offsetof(struct sk_msg_md, family)),
1808 .errstr = "invalid bpf_context access",
1810 .prog_type = BPF_PROG_TYPE_SK_MSG,
1811 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1814 "invalid read past end of SK_MSG",
1816 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1817 offsetof(struct sk_msg_md, local_port) + 4),
1820 .errstr = "R0 !read_ok",
1822 .prog_type = BPF_PROG_TYPE_SK_MSG,
1825 "invalid read offset in SK_MSG",
1827 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1828 offsetof(struct sk_msg_md, family) + 1),
1831 .errstr = "invalid bpf_context access",
1833 .prog_type = BPF_PROG_TYPE_SK_MSG,
1834 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1837 "direct packet read for SK_MSG",
1839 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1840 offsetof(struct sk_msg_md, data)),
1841 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1842 offsetof(struct sk_msg_md, data_end)),
1843 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1845 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1847 BPF_MOV64_IMM(BPF_REG_0, 0),
1851 .prog_type = BPF_PROG_TYPE_SK_MSG,
1854 "direct packet write for SK_MSG",
1856 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1857 offsetof(struct sk_msg_md, data)),
1858 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1859 offsetof(struct sk_msg_md, data_end)),
1860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1862 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1863 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1864 BPF_MOV64_IMM(BPF_REG_0, 0),
1868 .prog_type = BPF_PROG_TYPE_SK_MSG,
1871 "overlapping checks for direct packet access SK_MSG",
1873 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1874 offsetof(struct sk_msg_md, data)),
1875 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1876 offsetof(struct sk_msg_md, data_end)),
1877 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1879 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1880 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1882 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1883 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1884 BPF_MOV64_IMM(BPF_REG_0, 0),
1888 .prog_type = BPF_PROG_TYPE_SK_MSG,
1891 "check skb->mark is not writeable by sockets",
1893 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1894 offsetof(struct __sk_buff, mark)),
1897 .errstr = "invalid bpf_context access",
1898 .errstr_unpriv = "R1 leaks addr",
1902 "check skb->tc_index is not writeable by sockets",
1904 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1905 offsetof(struct __sk_buff, tc_index)),
1908 .errstr = "invalid bpf_context access",
1909 .errstr_unpriv = "R1 leaks addr",
1913 "check cb access: byte",
1915 BPF_MOV64_IMM(BPF_REG_0, 0),
1916 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1917 offsetof(struct __sk_buff, cb[0])),
1918 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1919 offsetof(struct __sk_buff, cb[0]) + 1),
1920 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1921 offsetof(struct __sk_buff, cb[0]) + 2),
1922 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1923 offsetof(struct __sk_buff, cb[0]) + 3),
1924 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1925 offsetof(struct __sk_buff, cb[1])),
1926 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1927 offsetof(struct __sk_buff, cb[1]) + 1),
1928 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1929 offsetof(struct __sk_buff, cb[1]) + 2),
1930 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1931 offsetof(struct __sk_buff, cb[1]) + 3),
1932 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1933 offsetof(struct __sk_buff, cb[2])),
1934 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1935 offsetof(struct __sk_buff, cb[2]) + 1),
1936 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1937 offsetof(struct __sk_buff, cb[2]) + 2),
1938 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1939 offsetof(struct __sk_buff, cb[2]) + 3),
1940 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1941 offsetof(struct __sk_buff, cb[3])),
1942 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1943 offsetof(struct __sk_buff, cb[3]) + 1),
1944 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1945 offsetof(struct __sk_buff, cb[3]) + 2),
1946 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1947 offsetof(struct __sk_buff, cb[3]) + 3),
1948 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1949 offsetof(struct __sk_buff, cb[4])),
1950 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1951 offsetof(struct __sk_buff, cb[4]) + 1),
1952 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1953 offsetof(struct __sk_buff, cb[4]) + 2),
1954 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1955 offsetof(struct __sk_buff, cb[4]) + 3),
1956 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1957 offsetof(struct __sk_buff, cb[0])),
1958 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1959 offsetof(struct __sk_buff, cb[0]) + 1),
1960 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1961 offsetof(struct __sk_buff, cb[0]) + 2),
1962 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1963 offsetof(struct __sk_buff, cb[0]) + 3),
1964 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1965 offsetof(struct __sk_buff, cb[1])),
1966 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1967 offsetof(struct __sk_buff, cb[1]) + 1),
1968 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1969 offsetof(struct __sk_buff, cb[1]) + 2),
1970 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1971 offsetof(struct __sk_buff, cb[1]) + 3),
1972 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1973 offsetof(struct __sk_buff, cb[2])),
1974 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1975 offsetof(struct __sk_buff, cb[2]) + 1),
1976 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1977 offsetof(struct __sk_buff, cb[2]) + 2),
1978 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1979 offsetof(struct __sk_buff, cb[2]) + 3),
1980 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1981 offsetof(struct __sk_buff, cb[3])),
1982 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1983 offsetof(struct __sk_buff, cb[3]) + 1),
1984 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1985 offsetof(struct __sk_buff, cb[3]) + 2),
1986 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1987 offsetof(struct __sk_buff, cb[3]) + 3),
1988 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1989 offsetof(struct __sk_buff, cb[4])),
1990 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1991 offsetof(struct __sk_buff, cb[4]) + 1),
1992 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1993 offsetof(struct __sk_buff, cb[4]) + 2),
1994 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1995 offsetof(struct __sk_buff, cb[4]) + 3),
2001 "__sk_buff->hash, offset 0, byte store not permitted",
2003 BPF_MOV64_IMM(BPF_REG_0, 0),
2004 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2005 offsetof(struct __sk_buff, hash)),
2008 .errstr = "invalid bpf_context access",
2012 "__sk_buff->tc_index, offset 3, byte store not permitted",
2014 BPF_MOV64_IMM(BPF_REG_0, 0),
2015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2016 offsetof(struct __sk_buff, tc_index) + 3),
2019 .errstr = "invalid bpf_context access",
2023 "check skb->hash byte load permitted",
2025 BPF_MOV64_IMM(BPF_REG_0, 0),
2026 #if __BYTE_ORDER == __LITTLE_ENDIAN
2027 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2028 offsetof(struct __sk_buff, hash)),
2030 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2031 offsetof(struct __sk_buff, hash) + 3),
2038 "check skb->hash byte load permitted 1",
2040 BPF_MOV64_IMM(BPF_REG_0, 0),
2041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2042 offsetof(struct __sk_buff, hash) + 1),
2048 "check skb->hash byte load permitted 2",
2050 BPF_MOV64_IMM(BPF_REG_0, 0),
2051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2052 offsetof(struct __sk_buff, hash) + 2),
2058 "check skb->hash byte load permitted 3",
2060 BPF_MOV64_IMM(BPF_REG_0, 0),
2061 #if __BYTE_ORDER == __LITTLE_ENDIAN
2062 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2063 offsetof(struct __sk_buff, hash) + 3),
2065 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2066 offsetof(struct __sk_buff, hash)),
2073 "check cb access: byte, wrong type",
2075 BPF_MOV64_IMM(BPF_REG_0, 0),
2076 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2077 offsetof(struct __sk_buff, cb[0])),
2080 .errstr = "invalid bpf_context access",
2082 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2085 "check cb access: half",
2087 BPF_MOV64_IMM(BPF_REG_0, 0),
2088 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 offsetof(struct __sk_buff, cb[0])),
2090 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 offsetof(struct __sk_buff, cb[0]) + 2),
2092 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 offsetof(struct __sk_buff, cb[1])),
2094 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 offsetof(struct __sk_buff, cb[1]) + 2),
2096 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 offsetof(struct __sk_buff, cb[2])),
2098 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099 offsetof(struct __sk_buff, cb[2]) + 2),
2100 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2101 offsetof(struct __sk_buff, cb[3])),
2102 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2103 offsetof(struct __sk_buff, cb[3]) + 2),
2104 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2105 offsetof(struct __sk_buff, cb[4])),
2106 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2107 offsetof(struct __sk_buff, cb[4]) + 2),
2108 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 offsetof(struct __sk_buff, cb[0])),
2110 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 offsetof(struct __sk_buff, cb[0]) + 2),
2112 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 offsetof(struct __sk_buff, cb[1])),
2114 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[1]) + 2),
2116 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[2])),
2118 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119 offsetof(struct __sk_buff, cb[2]) + 2),
2120 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2121 offsetof(struct __sk_buff, cb[3])),
2122 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2123 offsetof(struct __sk_buff, cb[3]) + 2),
2124 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2125 offsetof(struct __sk_buff, cb[4])),
2126 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2127 offsetof(struct __sk_buff, cb[4]) + 2),
2133 "check cb access: half, unaligned",
2135 BPF_MOV64_IMM(BPF_REG_0, 0),
2136 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2137 offsetof(struct __sk_buff, cb[0]) + 1),
2140 .errstr = "misaligned context access",
2142 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2145 "check __sk_buff->hash, offset 0, half store not permitted",
2147 BPF_MOV64_IMM(BPF_REG_0, 0),
2148 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2149 offsetof(struct __sk_buff, hash)),
2152 .errstr = "invalid bpf_context access",
2156 "check __sk_buff->tc_index, offset 2, half store not permitted",
2158 BPF_MOV64_IMM(BPF_REG_0, 0),
2159 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2160 offsetof(struct __sk_buff, tc_index) + 2),
2163 .errstr = "invalid bpf_context access",
2167 "check skb->hash half load permitted",
2169 BPF_MOV64_IMM(BPF_REG_0, 0),
2170 #if __BYTE_ORDER == __LITTLE_ENDIAN
2171 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2172 offsetof(struct __sk_buff, hash)),
2174 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2175 offsetof(struct __sk_buff, hash) + 2),
2182 "check skb->hash half load permitted 2",
2184 BPF_MOV64_IMM(BPF_REG_0, 0),
2185 #if __BYTE_ORDER == __LITTLE_ENDIAN
2186 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2187 offsetof(struct __sk_buff, hash) + 2),
2189 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2190 offsetof(struct __sk_buff, hash)),
2197 "check skb->hash half load not permitted, unaligned 1",
2199 BPF_MOV64_IMM(BPF_REG_0, 0),
2200 #if __BYTE_ORDER == __LITTLE_ENDIAN
2201 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2202 offsetof(struct __sk_buff, hash) + 1),
2204 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2205 offsetof(struct __sk_buff, hash) + 3),
2209 .errstr = "invalid bpf_context access",
2211 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2214 "check skb->hash half load not permitted, unaligned 3",
2216 BPF_MOV64_IMM(BPF_REG_0, 0),
2217 #if __BYTE_ORDER == __LITTLE_ENDIAN
2218 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2219 offsetof(struct __sk_buff, hash) + 3),
2221 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2222 offsetof(struct __sk_buff, hash) + 1),
2226 .errstr = "invalid bpf_context access",
2228 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2231 "check cb access: half, wrong type",
2233 BPF_MOV64_IMM(BPF_REG_0, 0),
2234 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2235 offsetof(struct __sk_buff, cb[0])),
2238 .errstr = "invalid bpf_context access",
2240 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2243 "check cb access: word",
2245 BPF_MOV64_IMM(BPF_REG_0, 0),
2246 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2247 offsetof(struct __sk_buff, cb[0])),
2248 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2249 offsetof(struct __sk_buff, cb[1])),
2250 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2251 offsetof(struct __sk_buff, cb[2])),
2252 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2253 offsetof(struct __sk_buff, cb[3])),
2254 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2255 offsetof(struct __sk_buff, cb[4])),
2256 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2257 offsetof(struct __sk_buff, cb[0])),
2258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2259 offsetof(struct __sk_buff, cb[1])),
2260 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2261 offsetof(struct __sk_buff, cb[2])),
2262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2263 offsetof(struct __sk_buff, cb[3])),
2264 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2265 offsetof(struct __sk_buff, cb[4])),
2271 "check cb access: word, unaligned 1",
2273 BPF_MOV64_IMM(BPF_REG_0, 0),
2274 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2275 offsetof(struct __sk_buff, cb[0]) + 2),
2278 .errstr = "misaligned context access",
2280 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2283 "check cb access: word, unaligned 2",
2285 BPF_MOV64_IMM(BPF_REG_0, 0),
2286 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2287 offsetof(struct __sk_buff, cb[4]) + 1),
2290 .errstr = "misaligned context access",
2292 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2295 "check cb access: word, unaligned 3",
2297 BPF_MOV64_IMM(BPF_REG_0, 0),
2298 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2299 offsetof(struct __sk_buff, cb[4]) + 2),
2302 .errstr = "misaligned context access",
2304 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2307 "check cb access: word, unaligned 4",
2309 BPF_MOV64_IMM(BPF_REG_0, 0),
2310 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2311 offsetof(struct __sk_buff, cb[4]) + 3),
2314 .errstr = "misaligned context access",
2316 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2319 "check cb access: double",
2321 BPF_MOV64_IMM(BPF_REG_0, 0),
2322 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2323 offsetof(struct __sk_buff, cb[0])),
2324 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2325 offsetof(struct __sk_buff, cb[2])),
2326 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2327 offsetof(struct __sk_buff, cb[0])),
2328 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2329 offsetof(struct __sk_buff, cb[2])),
2335 "check cb access: double, unaligned 1",
2337 BPF_MOV64_IMM(BPF_REG_0, 0),
2338 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2339 offsetof(struct __sk_buff, cb[1])),
2342 .errstr = "misaligned context access",
2344 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2347 "check cb access: double, unaligned 2",
2349 BPF_MOV64_IMM(BPF_REG_0, 0),
2350 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2351 offsetof(struct __sk_buff, cb[3])),
2354 .errstr = "misaligned context access",
2356 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2359 "check cb access: double, oob 1",
2361 BPF_MOV64_IMM(BPF_REG_0, 0),
2362 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2363 offsetof(struct __sk_buff, cb[4])),
2366 .errstr = "invalid bpf_context access",
2370 "check cb access: double, oob 2",
2372 BPF_MOV64_IMM(BPF_REG_0, 0),
2373 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2374 offsetof(struct __sk_buff, cb[4])),
2377 .errstr = "invalid bpf_context access",
2381 "check __sk_buff->ifindex dw store not permitted",
2383 BPF_MOV64_IMM(BPF_REG_0, 0),
2384 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2385 offsetof(struct __sk_buff, ifindex)),
2388 .errstr = "invalid bpf_context access",
2392 "check __sk_buff->ifindex dw load not permitted",
2394 BPF_MOV64_IMM(BPF_REG_0, 0),
2395 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2396 offsetof(struct __sk_buff, ifindex)),
2399 .errstr = "invalid bpf_context access",
2403 "check cb access: double, wrong type",
2405 BPF_MOV64_IMM(BPF_REG_0, 0),
2406 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2407 offsetof(struct __sk_buff, cb[0])),
2410 .errstr = "invalid bpf_context access",
2412 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2415 "check out of range skb->cb access",
2417 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2418 offsetof(struct __sk_buff, cb[0]) + 256),
2421 .errstr = "invalid bpf_context access",
2422 .errstr_unpriv = "",
2424 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2427 "write skb fields from socket prog",
2429 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2430 offsetof(struct __sk_buff, cb[4])),
2431 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2432 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2433 offsetof(struct __sk_buff, mark)),
2434 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2435 offsetof(struct __sk_buff, tc_index)),
2436 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2437 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2438 offsetof(struct __sk_buff, cb[0])),
2439 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2440 offsetof(struct __sk_buff, cb[2])),
2444 .errstr_unpriv = "R1 leaks addr",
2445 .result_unpriv = REJECT,
2448 "write skb fields from tc_cls_act prog",
2450 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2451 offsetof(struct __sk_buff, cb[0])),
2452 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2453 offsetof(struct __sk_buff, mark)),
2454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2455 offsetof(struct __sk_buff, tc_index)),
2456 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2457 offsetof(struct __sk_buff, tc_index)),
2458 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2459 offsetof(struct __sk_buff, cb[3])),
2462 .errstr_unpriv = "",
2463 .result_unpriv = REJECT,
2465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2468 "PTR_TO_STACK store/load",
2470 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2472 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2473 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2477 .retval = 0xfaceb00c,
2480 "PTR_TO_STACK store/load - bad alignment on off",
2482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2484 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2485 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2489 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2492 "PTR_TO_STACK store/load - bad alignment on reg",
2494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2496 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2501 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2504 "PTR_TO_STACK store/load - out of bounds low",
2506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2508 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2509 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2513 .errstr = "invalid stack off=-79992 size=8",
2514 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2517 "PTR_TO_STACK store/load - out of bounds high",
2519 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2521 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2526 .errstr = "invalid stack off=0 size=8",
2529 "unpriv: return pointer",
2531 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2535 .result_unpriv = REJECT,
2536 .errstr_unpriv = "R0 leaks addr",
2537 .retval = POINTER_VALUE,
2540 "unpriv: add const to pointer",
2542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2543 BPF_MOV64_IMM(BPF_REG_0, 0),
2549 "unpriv: add pointer to pointer",
2551 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2552 BPF_MOV64_IMM(BPF_REG_0, 0),
2556 .errstr = "R1 pointer += pointer",
2559 "unpriv: neg pointer",
2561 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2562 BPF_MOV64_IMM(BPF_REG_0, 0),
2566 .result_unpriv = REJECT,
2567 .errstr_unpriv = "R1 pointer arithmetic",
2570 "unpriv: cmp pointer with const",
2572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2573 BPF_MOV64_IMM(BPF_REG_0, 0),
2577 .result_unpriv = REJECT,
2578 .errstr_unpriv = "R1 pointer comparison",
2581 "unpriv: cmp pointer with pointer",
2583 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2584 BPF_MOV64_IMM(BPF_REG_0, 0),
2588 .result_unpriv = REJECT,
2589 .errstr_unpriv = "R10 pointer comparison",
2592 "unpriv: check that printk is disallowed",
2594 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2595 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2597 BPF_MOV64_IMM(BPF_REG_2, 8),
2598 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2600 BPF_FUNC_trace_printk),
2601 BPF_MOV64_IMM(BPF_REG_0, 0),
2604 .errstr_unpriv = "unknown func bpf_trace_printk#6",
2605 .result_unpriv = REJECT,
2609 "unpriv: pass pointer to helper function",
2611 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2614 BPF_LD_MAP_FD(BPF_REG_1, 0),
2615 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2616 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2618 BPF_FUNC_map_update_elem),
2619 BPF_MOV64_IMM(BPF_REG_0, 0),
2622 .fixup_map1 = { 3 },
2623 .errstr_unpriv = "R4 leaks addr",
2624 .result_unpriv = REJECT,
2628 "unpriv: indirectly pass pointer on stack to helper function",
2630 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2633 BPF_LD_MAP_FD(BPF_REG_1, 0),
2634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2635 BPF_FUNC_map_lookup_elem),
2636 BPF_MOV64_IMM(BPF_REG_0, 0),
2639 .fixup_map1 = { 3 },
2640 .errstr = "invalid indirect read from stack off -8+0 size 8",
2644 "unpriv: mangle pointer on stack 1",
2646 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2647 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2648 BPF_MOV64_IMM(BPF_REG_0, 0),
2651 .errstr_unpriv = "attempt to corrupt spilled",
2652 .result_unpriv = REJECT,
2656 "unpriv: mangle pointer on stack 2",
2658 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2659 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2660 BPF_MOV64_IMM(BPF_REG_0, 0),
2663 .errstr_unpriv = "attempt to corrupt spilled",
2664 .result_unpriv = REJECT,
2668 "unpriv: read pointer from stack in small chunks",
2670 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2671 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2672 BPF_MOV64_IMM(BPF_REG_0, 0),
2675 .errstr = "invalid size",
2679 "unpriv: write pointer into ctx",
2681 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2682 BPF_MOV64_IMM(BPF_REG_0, 0),
2685 .errstr_unpriv = "R1 leaks addr",
2686 .result_unpriv = REJECT,
2687 .errstr = "invalid bpf_context access",
2691 "unpriv: spill/fill of ctx",
2693 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2695 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2696 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2697 BPF_MOV64_IMM(BPF_REG_0, 0),
2703 "unpriv: spill/fill of ctx 2",
2705 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2707 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2708 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2710 BPF_FUNC_get_hash_recalc),
2711 BPF_MOV64_IMM(BPF_REG_0, 0),
2715 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2718 "unpriv: spill/fill of ctx 3",
2720 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2722 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2723 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2724 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2726 BPF_FUNC_get_hash_recalc),
2730 .errstr = "R1 type=fp expected=ctx",
2731 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2734 "unpriv: spill/fill of ctx 4",
2736 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2738 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2739 BPF_MOV64_IMM(BPF_REG_0, 1),
2740 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2742 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2744 BPF_FUNC_get_hash_recalc),
2748 .errstr = "R1 type=inv expected=ctx",
2749 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2752 "unpriv: spill/fill of different pointers stx",
2754 BPF_MOV64_IMM(BPF_REG_3, 42),
2755 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2761 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2762 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2763 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2764 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2765 offsetof(struct __sk_buff, mark)),
2766 BPF_MOV64_IMM(BPF_REG_0, 0),
2770 .errstr = "same insn cannot be used with different pointers",
2771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2774 "unpriv: spill/fill of different pointers ldx",
2776 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2779 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2781 -(__s32)offsetof(struct bpf_perf_event_data,
2782 sample_period) - 8),
2783 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2784 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2785 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2786 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2787 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2788 offsetof(struct bpf_perf_event_data,
2790 BPF_MOV64_IMM(BPF_REG_0, 0),
2794 .errstr = "same insn cannot be used with different pointers",
2795 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2798 "unpriv: write pointer into map elem value",
2800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2803 BPF_LD_MAP_FD(BPF_REG_1, 0),
2804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2805 BPF_FUNC_map_lookup_elem),
2806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2807 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2810 .fixup_map1 = { 3 },
2811 .errstr_unpriv = "R0 leaks addr",
2812 .result_unpriv = REJECT,
2816 "alu32: mov u32 const",
2818 BPF_MOV32_IMM(BPF_REG_7, 0),
2819 BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
2820 BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
2821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2822 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
2825 .errstr_unpriv = "R7 invalid mem access 'inv'",
2826 .result_unpriv = REJECT,
2831 "unpriv: partial copy of pointer",
2833 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2834 BPF_MOV64_IMM(BPF_REG_0, 0),
2837 .errstr_unpriv = "R10 partial copy",
2838 .result_unpriv = REJECT,
2842 "unpriv: pass pointer to tail_call",
2844 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2845 BPF_LD_MAP_FD(BPF_REG_2, 0),
2846 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2847 BPF_FUNC_tail_call),
2848 BPF_MOV64_IMM(BPF_REG_0, 0),
2851 .fixup_prog1 = { 1 },
2852 .errstr_unpriv = "R3 leaks addr into helper",
2853 .result_unpriv = REJECT,
2857 "unpriv: cmp map pointer with zero",
2859 BPF_MOV64_IMM(BPF_REG_1, 0),
2860 BPF_LD_MAP_FD(BPF_REG_1, 0),
2861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2862 BPF_MOV64_IMM(BPF_REG_0, 0),
2865 .fixup_map1 = { 1 },
2866 .errstr_unpriv = "R1 pointer comparison",
2867 .result_unpriv = REJECT,
2871 "unpriv: write into frame pointer",
2873 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2874 BPF_MOV64_IMM(BPF_REG_0, 0),
2877 .errstr = "frame pointer is read only",
2881 "unpriv: spill/fill frame pointer",
2883 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2885 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2886 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2887 BPF_MOV64_IMM(BPF_REG_0, 0),
2890 .errstr = "frame pointer is read only",
2894 "unpriv: cmp of frame pointer",
2896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2897 BPF_MOV64_IMM(BPF_REG_0, 0),
2900 .errstr_unpriv = "R10 pointer comparison",
2901 .result_unpriv = REJECT,
2905 "unpriv: adding of fp, reg",
2907 BPF_MOV64_IMM(BPF_REG_0, 0),
2908 BPF_MOV64_IMM(BPF_REG_1, 0),
2909 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2910 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2913 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2914 .result_unpriv = REJECT,
2918 "unpriv: adding of fp, imm",
2920 BPF_MOV64_IMM(BPF_REG_0, 0),
2921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
2923 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2926 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2927 .result_unpriv = REJECT,
2931 "unpriv: cmp of stack pointer",
2933 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2934 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2936 BPF_MOV64_IMM(BPF_REG_0, 0),
2939 .errstr_unpriv = "R2 pointer comparison",
2940 .result_unpriv = REJECT,
2944 "runtime/jit: tail_call within bounds, prog once",
2946 BPF_MOV64_IMM(BPF_REG_3, 0),
2947 BPF_LD_MAP_FD(BPF_REG_2, 0),
2948 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2949 BPF_FUNC_tail_call),
2950 BPF_MOV64_IMM(BPF_REG_0, 1),
2953 .fixup_prog1 = { 1 },
2958 "runtime/jit: tail_call within bounds, prog loop",
2960 BPF_MOV64_IMM(BPF_REG_3, 1),
2961 BPF_LD_MAP_FD(BPF_REG_2, 0),
2962 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2963 BPF_FUNC_tail_call),
2964 BPF_MOV64_IMM(BPF_REG_0, 1),
2967 .fixup_prog1 = { 1 },
2972 "runtime/jit: tail_call within bounds, no prog",
2974 BPF_MOV64_IMM(BPF_REG_3, 2),
2975 BPF_LD_MAP_FD(BPF_REG_2, 0),
2976 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2977 BPF_FUNC_tail_call),
2978 BPF_MOV64_IMM(BPF_REG_0, 1),
2981 .fixup_prog1 = { 1 },
2986 "runtime/jit: tail_call out of bounds",
2988 BPF_MOV64_IMM(BPF_REG_3, 256),
2989 BPF_LD_MAP_FD(BPF_REG_2, 0),
2990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2991 BPF_FUNC_tail_call),
2992 BPF_MOV64_IMM(BPF_REG_0, 2),
2995 .fixup_prog1 = { 1 },
3000 "runtime/jit: pass negative index to tail_call",
3002 BPF_MOV64_IMM(BPF_REG_3, -1),
3003 BPF_LD_MAP_FD(BPF_REG_2, 0),
3004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3005 BPF_FUNC_tail_call),
3006 BPF_MOV64_IMM(BPF_REG_0, 2),
3009 .fixup_prog1 = { 1 },
3014 "runtime/jit: pass > 32bit index to tail_call",
3016 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3017 BPF_LD_MAP_FD(BPF_REG_2, 0),
3018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3019 BPF_FUNC_tail_call),
3020 BPF_MOV64_IMM(BPF_REG_0, 2),
3023 .fixup_prog1 = { 2 },
3026 /* Verifier rewrite for unpriv skips tail call here. */
3030 "stack pointer arithmetic",
3032 BPF_MOV64_IMM(BPF_REG_1, 4),
3033 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3034 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3037 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3038 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3039 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3042 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3043 BPF_MOV64_IMM(BPF_REG_0, 0),
3049 "raw_stack: no skb_load_bytes",
3051 BPF_MOV64_IMM(BPF_REG_2, 4),
3052 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3054 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3055 BPF_MOV64_IMM(BPF_REG_4, 8),
3056 /* Call to skb_load_bytes() omitted. */
3057 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3061 .errstr = "invalid read from stack off -8+0 size 8",
3062 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3065 "raw_stack: skb_load_bytes, negative len",
3067 BPF_MOV64_IMM(BPF_REG_2, 4),
3068 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3070 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3071 BPF_MOV64_IMM(BPF_REG_4, -8),
3072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3073 BPF_FUNC_skb_load_bytes),
3074 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3078 .errstr = "R4 min value is negative",
3079 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3082 "raw_stack: skb_load_bytes, negative len 2",
3084 BPF_MOV64_IMM(BPF_REG_2, 4),
3085 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3087 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3088 BPF_MOV64_IMM(BPF_REG_4, ~0),
3089 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3090 BPF_FUNC_skb_load_bytes),
3091 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3095 .errstr = "R4 min value is negative",
3096 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3099 "raw_stack: skb_load_bytes, zero len",
3101 BPF_MOV64_IMM(BPF_REG_2, 4),
3102 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3104 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3105 BPF_MOV64_IMM(BPF_REG_4, 0),
3106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3107 BPF_FUNC_skb_load_bytes),
3108 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3112 .errstr = "invalid stack type R3",
3113 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3116 "raw_stack: skb_load_bytes, no init",
3118 BPF_MOV64_IMM(BPF_REG_2, 4),
3119 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3121 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3122 BPF_MOV64_IMM(BPF_REG_4, 8),
3123 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3124 BPF_FUNC_skb_load_bytes),
3125 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3129 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3132 "raw_stack: skb_load_bytes, init",
3134 BPF_MOV64_IMM(BPF_REG_2, 4),
3135 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3137 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3138 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3139 BPF_MOV64_IMM(BPF_REG_4, 8),
3140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3141 BPF_FUNC_skb_load_bytes),
3142 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3146 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3149 "raw_stack: skb_load_bytes, spilled regs around bounds",
3151 BPF_MOV64_IMM(BPF_REG_2, 4),
3152 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3154 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3155 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3156 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3157 BPF_MOV64_IMM(BPF_REG_4, 8),
3158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3159 BPF_FUNC_skb_load_bytes),
3160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3161 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3162 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3163 offsetof(struct __sk_buff, mark)),
3164 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3165 offsetof(struct __sk_buff, priority)),
3166 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3170 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3173 "raw_stack: skb_load_bytes, spilled regs corruption",
3175 BPF_MOV64_IMM(BPF_REG_2, 4),
3176 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3178 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3179 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3180 BPF_MOV64_IMM(BPF_REG_4, 8),
3181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3182 BPF_FUNC_skb_load_bytes),
3183 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3184 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3185 offsetof(struct __sk_buff, mark)),
3189 .errstr = "R0 invalid mem access 'inv'",
3190 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3191 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3194 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3196 BPF_MOV64_IMM(BPF_REG_2, 4),
3197 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3199 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3200 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3201 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3202 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3203 BPF_MOV64_IMM(BPF_REG_4, 8),
3204 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3205 BPF_FUNC_skb_load_bytes),
3206 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3207 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3208 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3209 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3210 offsetof(struct __sk_buff, mark)),
3211 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3212 offsetof(struct __sk_buff, priority)),
3213 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3214 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3215 offsetof(struct __sk_buff, pkt_type)),
3216 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3220 .errstr = "R3 invalid mem access 'inv'",
3221 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3222 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3225 "raw_stack: skb_load_bytes, spilled regs + data",
3227 BPF_MOV64_IMM(BPF_REG_2, 4),
3228 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3230 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3231 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3232 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3233 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3234 BPF_MOV64_IMM(BPF_REG_4, 8),
3235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3236 BPF_FUNC_skb_load_bytes),
3237 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3238 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3239 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3240 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3241 offsetof(struct __sk_buff, mark)),
3242 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3243 offsetof(struct __sk_buff, priority)),
3244 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3245 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3249 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3252 "raw_stack: skb_load_bytes, invalid access 1",
3254 BPF_MOV64_IMM(BPF_REG_2, 4),
3255 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3257 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3258 BPF_MOV64_IMM(BPF_REG_4, 8),
3259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3260 BPF_FUNC_skb_load_bytes),
3261 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3265 .errstr = "invalid stack type R3 off=-513 access_size=8",
3266 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3269 "raw_stack: skb_load_bytes, invalid access 2",
3271 BPF_MOV64_IMM(BPF_REG_2, 4),
3272 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3274 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3275 BPF_MOV64_IMM(BPF_REG_4, 8),
3276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3277 BPF_FUNC_skb_load_bytes),
3278 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3282 .errstr = "invalid stack type R3 off=-1 access_size=8",
3283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3286 "raw_stack: skb_load_bytes, invalid access 3",
3288 BPF_MOV64_IMM(BPF_REG_2, 4),
3289 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3292 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3294 BPF_FUNC_skb_load_bytes),
3295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3299 .errstr = "R4 min value is negative",
3300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3303 "raw_stack: skb_load_bytes, invalid access 4",
3305 BPF_MOV64_IMM(BPF_REG_2, 4),
3306 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3308 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3309 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3311 BPF_FUNC_skb_load_bytes),
3312 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3316 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3320 "raw_stack: skb_load_bytes, invalid access 5",
3322 BPF_MOV64_IMM(BPF_REG_2, 4),
3323 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3325 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3326 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3327 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3328 BPF_FUNC_skb_load_bytes),
3329 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3333 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3334 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3337 "raw_stack: skb_load_bytes, invalid access 6",
3339 BPF_MOV64_IMM(BPF_REG_2, 4),
3340 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3342 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3343 BPF_MOV64_IMM(BPF_REG_4, 0),
3344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3345 BPF_FUNC_skb_load_bytes),
3346 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3350 .errstr = "invalid stack type R3 off=-512 access_size=0",
3351 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3354 "raw_stack: skb_load_bytes, large access",
3356 BPF_MOV64_IMM(BPF_REG_2, 4),
3357 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3359 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3360 BPF_MOV64_IMM(BPF_REG_4, 512),
3361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3362 BPF_FUNC_skb_load_bytes),
3363 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3370 "context stores via ST",
3372 BPF_MOV64_IMM(BPF_REG_0, 0),
3373 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3376 .errstr = "BPF_ST stores into R1 context is not allowed",
3378 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3381 "context stores via XADD",
3383 BPF_MOV64_IMM(BPF_REG_0, 0),
3384 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3385 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3388 .errstr = "BPF_XADD stores into R1 context is not allowed",
3390 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3393 "direct packet access: test1",
3395 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3396 offsetof(struct __sk_buff, data)),
3397 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3398 offsetof(struct __sk_buff, data_end)),
3399 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3401 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3402 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3403 BPF_MOV64_IMM(BPF_REG_0, 0),
3407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3410 "direct packet access: test2",
3412 BPF_MOV64_IMM(BPF_REG_0, 1),
3413 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3414 offsetof(struct __sk_buff, data_end)),
3415 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3416 offsetof(struct __sk_buff, data)),
3417 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3419 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3420 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3421 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3422 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3423 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3424 offsetof(struct __sk_buff, data)),
3425 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3426 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3427 offsetof(struct __sk_buff, len)),
3428 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3429 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3430 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3433 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3434 offsetof(struct __sk_buff, data_end)),
3435 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3436 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3437 BPF_MOV64_IMM(BPF_REG_0, 0),
3441 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3444 "direct packet access: test3",
3446 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3447 offsetof(struct __sk_buff, data)),
3448 BPF_MOV64_IMM(BPF_REG_0, 0),
3451 .errstr = "invalid bpf_context access off=76",
3453 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3456 "direct packet access: test4 (write)",
3458 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3459 offsetof(struct __sk_buff, data)),
3460 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3461 offsetof(struct __sk_buff, data_end)),
3462 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3464 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3465 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3466 BPF_MOV64_IMM(BPF_REG_0, 0),
3470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3473 "direct packet access: test5 (pkt_end >= reg, good access)",
3475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3476 offsetof(struct __sk_buff, data)),
3477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3478 offsetof(struct __sk_buff, data_end)),
3479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3481 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3482 BPF_MOV64_IMM(BPF_REG_0, 1),
3484 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3485 BPF_MOV64_IMM(BPF_REG_0, 0),
3489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3492 "direct packet access: test6 (pkt_end >= reg, bad access)",
3494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3495 offsetof(struct __sk_buff, data)),
3496 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3497 offsetof(struct __sk_buff, data_end)),
3498 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3500 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3501 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3502 BPF_MOV64_IMM(BPF_REG_0, 1),
3504 BPF_MOV64_IMM(BPF_REG_0, 0),
3507 .errstr = "invalid access to packet",
3509 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3512 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3514 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3515 offsetof(struct __sk_buff, data)),
3516 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3517 offsetof(struct __sk_buff, data_end)),
3518 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3520 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3521 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3522 BPF_MOV64_IMM(BPF_REG_0, 1),
3524 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3525 BPF_MOV64_IMM(BPF_REG_0, 0),
3528 .errstr = "invalid access to packet",
3530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3533 "direct packet access: test8 (double test, variant 1)",
3535 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3536 offsetof(struct __sk_buff, data)),
3537 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3538 offsetof(struct __sk_buff, data_end)),
3539 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3541 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3542 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3543 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3544 BPF_MOV64_IMM(BPF_REG_0, 1),
3546 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3547 BPF_MOV64_IMM(BPF_REG_0, 0),
3551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3554 "direct packet access: test9 (double test, variant 2)",
3556 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3557 offsetof(struct __sk_buff, data)),
3558 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3559 offsetof(struct __sk_buff, data_end)),
3560 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3562 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3563 BPF_MOV64_IMM(BPF_REG_0, 1),
3565 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3566 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3567 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3568 BPF_MOV64_IMM(BPF_REG_0, 0),
3572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3575 "direct packet access: test10 (write invalid)",
3577 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3578 offsetof(struct __sk_buff, data)),
3579 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3580 offsetof(struct __sk_buff, data_end)),
3581 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3583 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3584 BPF_MOV64_IMM(BPF_REG_0, 0),
3586 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3587 BPF_MOV64_IMM(BPF_REG_0, 0),
3590 .errstr = "invalid access to packet",
3592 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3595 "direct packet access: test11 (shift, good access)",
3597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3598 offsetof(struct __sk_buff, data)),
3599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3600 offsetof(struct __sk_buff, data_end)),
3601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3603 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3604 BPF_MOV64_IMM(BPF_REG_3, 144),
3605 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3607 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3608 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3609 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3610 BPF_MOV64_IMM(BPF_REG_0, 1),
3612 BPF_MOV64_IMM(BPF_REG_0, 0),
3616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3620 "direct packet access: test12 (and, good access)",
3622 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3623 offsetof(struct __sk_buff, data)),
3624 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3625 offsetof(struct __sk_buff, data_end)),
3626 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3628 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3629 BPF_MOV64_IMM(BPF_REG_3, 144),
3630 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3632 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3633 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3634 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3635 BPF_MOV64_IMM(BPF_REG_0, 1),
3637 BPF_MOV64_IMM(BPF_REG_0, 0),
3641 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3645 "direct packet access: test13 (branches, good access)",
3647 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3648 offsetof(struct __sk_buff, data)),
3649 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3650 offsetof(struct __sk_buff, data_end)),
3651 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3653 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3654 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3655 offsetof(struct __sk_buff, mark)),
3656 BPF_MOV64_IMM(BPF_REG_4, 1),
3657 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3658 BPF_MOV64_IMM(BPF_REG_3, 14),
3659 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3660 BPF_MOV64_IMM(BPF_REG_3, 24),
3661 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3663 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3664 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3665 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3666 BPF_MOV64_IMM(BPF_REG_0, 1),
3668 BPF_MOV64_IMM(BPF_REG_0, 0),
3672 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3676 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3678 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3679 offsetof(struct __sk_buff, data)),
3680 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3681 offsetof(struct __sk_buff, data_end)),
3682 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3684 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3685 BPF_MOV64_IMM(BPF_REG_5, 12),
3686 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3687 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3688 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3690 BPF_MOV64_IMM(BPF_REG_0, 1),
3692 BPF_MOV64_IMM(BPF_REG_0, 0),
3696 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3700 "direct packet access: test15 (spill with xadd)",
3702 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3703 offsetof(struct __sk_buff, data)),
3704 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3705 offsetof(struct __sk_buff, data_end)),
3706 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3708 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3709 BPF_MOV64_IMM(BPF_REG_5, 4096),
3710 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3712 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3713 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3714 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3715 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3716 BPF_MOV64_IMM(BPF_REG_0, 0),
3719 .errstr = "R2 invalid mem access 'inv'",
3721 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3725 "direct packet access: test16 (arith on data_end)",
3727 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3728 offsetof(struct __sk_buff, data)),
3729 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3730 offsetof(struct __sk_buff, data_end)),
3731 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3734 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3735 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3736 BPF_MOV64_IMM(BPF_REG_0, 0),
3739 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
3741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3744 "direct packet access: test17 (pruning, alignment)",
3746 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3747 offsetof(struct __sk_buff, data)),
3748 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3749 offsetof(struct __sk_buff, data_end)),
3750 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3751 offsetof(struct __sk_buff, mark)),
3752 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3754 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3755 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3756 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3757 BPF_MOV64_IMM(BPF_REG_0, 0),
3759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3762 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3764 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3765 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3768 "direct packet access: test18 (imm += pkt_ptr, 1)",
3770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3771 offsetof(struct __sk_buff, data)),
3772 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3773 offsetof(struct __sk_buff, data_end)),
3774 BPF_MOV64_IMM(BPF_REG_0, 8),
3775 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3776 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3777 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3778 BPF_MOV64_IMM(BPF_REG_0, 0),
3782 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3785 "direct packet access: test19 (imm += pkt_ptr, 2)",
3787 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3788 offsetof(struct __sk_buff, data)),
3789 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3790 offsetof(struct __sk_buff, data_end)),
3791 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3793 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3794 BPF_MOV64_IMM(BPF_REG_4, 4),
3795 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3796 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3797 BPF_MOV64_IMM(BPF_REG_0, 0),
3801 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3804 "direct packet access: test20 (x += pkt_ptr, 1)",
3806 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3807 offsetof(struct __sk_buff, data)),
3808 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3809 offsetof(struct __sk_buff, data_end)),
3810 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3811 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3812 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3813 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3814 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3815 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3816 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3818 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3819 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3820 BPF_MOV64_IMM(BPF_REG_0, 0),
3823 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3825 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3828 "direct packet access: test21 (x += pkt_ptr, 2)",
3830 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3831 offsetof(struct __sk_buff, data)),
3832 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3833 offsetof(struct __sk_buff, data_end)),
3834 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3836 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3837 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3838 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3839 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3840 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3841 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3842 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3844 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3845 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3846 BPF_MOV64_IMM(BPF_REG_0, 0),
3849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3851 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3854 "direct packet access: test22 (x += pkt_ptr, 3)",
3856 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3857 offsetof(struct __sk_buff, data)),
3858 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3859 offsetof(struct __sk_buff, data_end)),
3860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3862 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3863 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3864 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3865 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3866 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3867 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3868 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3869 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3870 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3871 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3872 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3874 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3875 BPF_MOV64_IMM(BPF_REG_2, 1),
3876 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3877 BPF_MOV64_IMM(BPF_REG_0, 0),
3880 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3882 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3885 "direct packet access: test23 (x += pkt_ptr, 4)",
3887 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3888 offsetof(struct __sk_buff, data)),
3889 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3890 offsetof(struct __sk_buff, data_end)),
3891 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3892 offsetof(struct __sk_buff, mark)),
3893 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3894 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3895 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3896 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3897 BPF_MOV64_IMM(BPF_REG_0, 31),
3898 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3899 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3900 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3902 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3903 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3904 BPF_MOV64_IMM(BPF_REG_0, 0),
3907 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3909 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3910 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3913 "direct packet access: test24 (x += pkt_ptr, 5)",
3915 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3916 offsetof(struct __sk_buff, data)),
3917 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3918 offsetof(struct __sk_buff, data_end)),
3919 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3920 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3922 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3923 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3924 BPF_MOV64_IMM(BPF_REG_0, 64),
3925 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3927 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3929 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3930 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3931 BPF_MOV64_IMM(BPF_REG_0, 0),
3934 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3936 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3939 "direct packet access: test25 (marking on <, good access)",
3941 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3942 offsetof(struct __sk_buff, data)),
3943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3944 offsetof(struct __sk_buff, data_end)),
3945 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3947 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3948 BPF_MOV64_IMM(BPF_REG_0, 0),
3950 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3951 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3954 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3957 "direct packet access: test26 (marking on <, bad access)",
3959 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3960 offsetof(struct __sk_buff, data)),
3961 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3962 offsetof(struct __sk_buff, data_end)),
3963 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3965 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3966 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3967 BPF_MOV64_IMM(BPF_REG_0, 0),
3969 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3972 .errstr = "invalid access to packet",
3973 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3976 "direct packet access: test27 (marking on <=, good access)",
3978 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3979 offsetof(struct __sk_buff, data)),
3980 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3981 offsetof(struct __sk_buff, data_end)),
3982 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3984 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3985 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3986 BPF_MOV64_IMM(BPF_REG_0, 1),
3990 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3994 "direct packet access: test28 (marking on <=, bad access)",
3996 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3997 offsetof(struct __sk_buff, data)),
3998 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3999 offsetof(struct __sk_buff, data_end)),
4000 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4002 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4003 BPF_MOV64_IMM(BPF_REG_0, 1),
4005 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4006 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4009 .errstr = "invalid access to packet",
4010 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4013 "helper access to packet: test1, valid packet_ptr range",
4015 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4016 offsetof(struct xdp_md, data)),
4017 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4018 offsetof(struct xdp_md, data_end)),
4019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4021 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4022 BPF_LD_MAP_FD(BPF_REG_1, 0),
4023 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4024 BPF_MOV64_IMM(BPF_REG_4, 0),
4025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4026 BPF_FUNC_map_update_elem),
4027 BPF_MOV64_IMM(BPF_REG_0, 0),
4030 .fixup_map1 = { 5 },
4031 .result_unpriv = ACCEPT,
4033 .prog_type = BPF_PROG_TYPE_XDP,
4036 "helper access to packet: test2, unchecked packet_ptr",
4038 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4039 offsetof(struct xdp_md, data)),
4040 BPF_LD_MAP_FD(BPF_REG_1, 0),
4041 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4042 BPF_FUNC_map_lookup_elem),
4043 BPF_MOV64_IMM(BPF_REG_0, 0),
4046 .fixup_map1 = { 1 },
4048 .errstr = "invalid access to packet",
4049 .prog_type = BPF_PROG_TYPE_XDP,
4052 "helper access to packet: test3, variable add",
4054 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4055 offsetof(struct xdp_md, data)),
4056 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4057 offsetof(struct xdp_md, data_end)),
4058 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4060 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4061 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4062 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4063 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4064 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4066 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4067 BPF_LD_MAP_FD(BPF_REG_1, 0),
4068 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4070 BPF_FUNC_map_lookup_elem),
4071 BPF_MOV64_IMM(BPF_REG_0, 0),
4074 .fixup_map1 = { 11 },
4076 .prog_type = BPF_PROG_TYPE_XDP,
4079 "helper access to packet: test4, packet_ptr with bad range",
4081 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4082 offsetof(struct xdp_md, data)),
4083 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4084 offsetof(struct xdp_md, data_end)),
4085 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4087 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4088 BPF_MOV64_IMM(BPF_REG_0, 0),
4090 BPF_LD_MAP_FD(BPF_REG_1, 0),
4091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092 BPF_FUNC_map_lookup_elem),
4093 BPF_MOV64_IMM(BPF_REG_0, 0),
4096 .fixup_map1 = { 7 },
4098 .errstr = "invalid access to packet",
4099 .prog_type = BPF_PROG_TYPE_XDP,
4102 "helper access to packet: test5, packet_ptr with too short range",
4104 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4105 offsetof(struct xdp_md, data)),
4106 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4107 offsetof(struct xdp_md, data_end)),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4109 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4111 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4112 BPF_LD_MAP_FD(BPF_REG_1, 0),
4113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4114 BPF_FUNC_map_lookup_elem),
4115 BPF_MOV64_IMM(BPF_REG_0, 0),
4118 .fixup_map1 = { 6 },
4120 .errstr = "invalid access to packet",
4121 .prog_type = BPF_PROG_TYPE_XDP,
4124 "helper access to packet: test6, cls valid packet_ptr range",
4126 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4127 offsetof(struct __sk_buff, data)),
4128 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4129 offsetof(struct __sk_buff, data_end)),
4130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4132 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4133 BPF_LD_MAP_FD(BPF_REG_1, 0),
4134 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4135 BPF_MOV64_IMM(BPF_REG_4, 0),
4136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4137 BPF_FUNC_map_update_elem),
4138 BPF_MOV64_IMM(BPF_REG_0, 0),
4141 .fixup_map1 = { 5 },
4143 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4146 "helper access to packet: test7, cls unchecked packet_ptr",
4148 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4149 offsetof(struct __sk_buff, data)),
4150 BPF_LD_MAP_FD(BPF_REG_1, 0),
4151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4152 BPF_FUNC_map_lookup_elem),
4153 BPF_MOV64_IMM(BPF_REG_0, 0),
4156 .fixup_map1 = { 1 },
4158 .errstr = "invalid access to packet",
4159 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4162 "helper access to packet: test8, cls variable add",
4164 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4165 offsetof(struct __sk_buff, data)),
4166 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4167 offsetof(struct __sk_buff, data_end)),
4168 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4170 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4171 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4172 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4173 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4174 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4176 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4177 BPF_LD_MAP_FD(BPF_REG_1, 0),
4178 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4180 BPF_FUNC_map_lookup_elem),
4181 BPF_MOV64_IMM(BPF_REG_0, 0),
4184 .fixup_map1 = { 11 },
4186 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4189 "helper access to packet: test9, cls packet_ptr with bad range",
4191 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4192 offsetof(struct __sk_buff, data)),
4193 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4194 offsetof(struct __sk_buff, data_end)),
4195 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4197 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4198 BPF_MOV64_IMM(BPF_REG_0, 0),
4200 BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202 BPF_FUNC_map_lookup_elem),
4203 BPF_MOV64_IMM(BPF_REG_0, 0),
4206 .fixup_map1 = { 7 },
4208 .errstr = "invalid access to packet",
4209 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4212 "helper access to packet: test10, cls packet_ptr with too short range",
4214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4215 offsetof(struct __sk_buff, data)),
4216 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4217 offsetof(struct __sk_buff, data_end)),
4218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4219 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4221 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4222 BPF_LD_MAP_FD(BPF_REG_1, 0),
4223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4224 BPF_FUNC_map_lookup_elem),
4225 BPF_MOV64_IMM(BPF_REG_0, 0),
4228 .fixup_map1 = { 6 },
4230 .errstr = "invalid access to packet",
4231 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4234 "helper access to packet: test11, cls unsuitable helper 1",
4236 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4237 offsetof(struct __sk_buff, data)),
4238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4239 offsetof(struct __sk_buff, data_end)),
4240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4241 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4243 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4244 BPF_MOV64_IMM(BPF_REG_2, 0),
4245 BPF_MOV64_IMM(BPF_REG_4, 42),
4246 BPF_MOV64_IMM(BPF_REG_5, 0),
4247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248 BPF_FUNC_skb_store_bytes),
4249 BPF_MOV64_IMM(BPF_REG_0, 0),
4253 .errstr = "helper access to the packet",
4254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4257 "helper access to packet: test12, cls unsuitable helper 2",
4259 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4260 offsetof(struct __sk_buff, data)),
4261 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4262 offsetof(struct __sk_buff, data_end)),
4263 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4265 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4266 BPF_MOV64_IMM(BPF_REG_2, 0),
4267 BPF_MOV64_IMM(BPF_REG_4, 4),
4268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4269 BPF_FUNC_skb_load_bytes),
4270 BPF_MOV64_IMM(BPF_REG_0, 0),
4274 .errstr = "helper access to the packet",
4275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4278 "helper access to packet: test13, cls helper ok",
4280 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4281 offsetof(struct __sk_buff, data)),
4282 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4283 offsetof(struct __sk_buff, data_end)),
4284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4285 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4287 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4288 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4289 BPF_MOV64_IMM(BPF_REG_2, 4),
4290 BPF_MOV64_IMM(BPF_REG_3, 0),
4291 BPF_MOV64_IMM(BPF_REG_4, 0),
4292 BPF_MOV64_IMM(BPF_REG_5, 0),
4293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4294 BPF_FUNC_csum_diff),
4295 BPF_MOV64_IMM(BPF_REG_0, 0),
4299 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4302 "helper access to packet: test14, cls helper ok sub",
4304 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4305 offsetof(struct __sk_buff, data)),
4306 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4307 offsetof(struct __sk_buff, data_end)),
4308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4311 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4312 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4313 BPF_MOV64_IMM(BPF_REG_2, 4),
4314 BPF_MOV64_IMM(BPF_REG_3, 0),
4315 BPF_MOV64_IMM(BPF_REG_4, 0),
4316 BPF_MOV64_IMM(BPF_REG_5, 0),
4317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4318 BPF_FUNC_csum_diff),
4319 BPF_MOV64_IMM(BPF_REG_0, 0),
4323 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4326 "helper access to packet: test15, cls helper fail sub",
4328 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4329 offsetof(struct __sk_buff, data)),
4330 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4331 offsetof(struct __sk_buff, data_end)),
4332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4335 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4336 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4337 BPF_MOV64_IMM(BPF_REG_2, 4),
4338 BPF_MOV64_IMM(BPF_REG_3, 0),
4339 BPF_MOV64_IMM(BPF_REG_4, 0),
4340 BPF_MOV64_IMM(BPF_REG_5, 0),
4341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4342 BPF_FUNC_csum_diff),
4343 BPF_MOV64_IMM(BPF_REG_0, 0),
4347 .errstr = "invalid access to packet",
4348 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4351 "helper access to packet: test16, cls helper fail range 1",
4353 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4354 offsetof(struct __sk_buff, data)),
4355 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4356 offsetof(struct __sk_buff, data_end)),
4357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4360 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4361 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4362 BPF_MOV64_IMM(BPF_REG_2, 8),
4363 BPF_MOV64_IMM(BPF_REG_3, 0),
4364 BPF_MOV64_IMM(BPF_REG_4, 0),
4365 BPF_MOV64_IMM(BPF_REG_5, 0),
4366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4367 BPF_FUNC_csum_diff),
4368 BPF_MOV64_IMM(BPF_REG_0, 0),
4372 .errstr = "invalid access to packet",
4373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4376 "helper access to packet: test17, cls helper fail range 2",
4378 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4379 offsetof(struct __sk_buff, data)),
4380 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4381 offsetof(struct __sk_buff, data_end)),
4382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4383 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4385 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4387 BPF_MOV64_IMM(BPF_REG_2, -9),
4388 BPF_MOV64_IMM(BPF_REG_3, 0),
4389 BPF_MOV64_IMM(BPF_REG_4, 0),
4390 BPF_MOV64_IMM(BPF_REG_5, 0),
4391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4392 BPF_FUNC_csum_diff),
4393 BPF_MOV64_IMM(BPF_REG_0, 0),
4397 .errstr = "R2 min value is negative",
4398 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4401 "helper access to packet: test18, cls helper fail range 3",
4403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4404 offsetof(struct __sk_buff, data)),
4405 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4406 offsetof(struct __sk_buff, data_end)),
4407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4410 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412 BPF_MOV64_IMM(BPF_REG_2, ~0),
4413 BPF_MOV64_IMM(BPF_REG_3, 0),
4414 BPF_MOV64_IMM(BPF_REG_4, 0),
4415 BPF_MOV64_IMM(BPF_REG_5, 0),
4416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4417 BPF_FUNC_csum_diff),
4418 BPF_MOV64_IMM(BPF_REG_0, 0),
4422 .errstr = "R2 min value is negative",
4423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4426 "helper access to packet: test19, cls helper range zero",
4428 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4429 offsetof(struct __sk_buff, data)),
4430 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4431 offsetof(struct __sk_buff, data_end)),
4432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4433 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4435 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4436 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437 BPF_MOV64_IMM(BPF_REG_2, 0),
4438 BPF_MOV64_IMM(BPF_REG_3, 0),
4439 BPF_MOV64_IMM(BPF_REG_4, 0),
4440 BPF_MOV64_IMM(BPF_REG_5, 0),
4441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442 BPF_FUNC_csum_diff),
4443 BPF_MOV64_IMM(BPF_REG_0, 0),
4447 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4450 "helper access to packet: test20, pkt end as input",
4452 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4453 offsetof(struct __sk_buff, data)),
4454 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4455 offsetof(struct __sk_buff, data_end)),
4456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4459 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4461 BPF_MOV64_IMM(BPF_REG_2, 4),
4462 BPF_MOV64_IMM(BPF_REG_3, 0),
4463 BPF_MOV64_IMM(BPF_REG_4, 0),
4464 BPF_MOV64_IMM(BPF_REG_5, 0),
4465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4466 BPF_FUNC_csum_diff),
4467 BPF_MOV64_IMM(BPF_REG_0, 0),
4471 .errstr = "R1 type=pkt_end expected=fp",
4472 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4475 "helper access to packet: test21, wrong reg",
4477 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4478 offsetof(struct __sk_buff, data)),
4479 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4480 offsetof(struct __sk_buff, data_end)),
4481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4484 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4485 BPF_MOV64_IMM(BPF_REG_2, 4),
4486 BPF_MOV64_IMM(BPF_REG_3, 0),
4487 BPF_MOV64_IMM(BPF_REG_4, 0),
4488 BPF_MOV64_IMM(BPF_REG_5, 0),
4489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4490 BPF_FUNC_csum_diff),
4491 BPF_MOV64_IMM(BPF_REG_0, 0),
4495 .errstr = "invalid access to packet",
4496 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4499 "valid map access into an array with a constant",
4501 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4504 BPF_LD_MAP_FD(BPF_REG_1, 0),
4505 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4506 BPF_FUNC_map_lookup_elem),
4507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4508 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4509 offsetof(struct test_val, foo)),
4512 .fixup_map2 = { 3 },
4513 .errstr_unpriv = "R0 leaks addr",
4514 .result_unpriv = REJECT,
4518 "valid map access into an array with a register",
4520 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4521 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4523 BPF_LD_MAP_FD(BPF_REG_1, 0),
4524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4525 BPF_FUNC_map_lookup_elem),
4526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4527 BPF_MOV64_IMM(BPF_REG_1, 4),
4528 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4529 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4530 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4531 offsetof(struct test_val, foo)),
4534 .fixup_map2 = { 3 },
4535 .errstr_unpriv = "R0 leaks addr",
4536 .result_unpriv = REJECT,
4538 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4541 "valid map access into an array with a variable",
4543 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4544 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4546 BPF_LD_MAP_FD(BPF_REG_1, 0),
4547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4548 BPF_FUNC_map_lookup_elem),
4549 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4550 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4551 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4552 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4553 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4554 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4555 offsetof(struct test_val, foo)),
4558 .fixup_map2 = { 3 },
4559 .errstr_unpriv = "R0 leaks addr",
4560 .result_unpriv = REJECT,
4562 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4565 "valid map access into an array with a signed variable",
4567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572 BPF_FUNC_map_lookup_elem),
4573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4574 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4575 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4576 BPF_MOV32_IMM(BPF_REG_1, 0),
4577 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4578 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4579 BPF_MOV32_IMM(BPF_REG_1, 0),
4580 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4581 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4582 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4583 offsetof(struct test_val, foo)),
4586 .fixup_map2 = { 3 },
4587 .errstr_unpriv = "R0 leaks addr",
4588 .result_unpriv = REJECT,
4590 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4593 "invalid map access into an array with a constant",
4595 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4598 BPF_LD_MAP_FD(BPF_REG_1, 0),
4599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4600 BPF_FUNC_map_lookup_elem),
4601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4602 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4603 offsetof(struct test_val, foo)),
4606 .fixup_map2 = { 3 },
4607 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4611 "invalid map access into an array with a register",
4613 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4616 BPF_LD_MAP_FD(BPF_REG_1, 0),
4617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4618 BPF_FUNC_map_lookup_elem),
4619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4620 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4621 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4622 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4623 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4624 offsetof(struct test_val, foo)),
4627 .fixup_map2 = { 3 },
4628 .errstr = "R0 min value is outside of the array range",
4630 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4633 "invalid map access into an array with a variable",
4635 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4636 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4638 BPF_LD_MAP_FD(BPF_REG_1, 0),
4639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4640 BPF_FUNC_map_lookup_elem),
4641 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4642 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4643 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4644 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4645 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4646 offsetof(struct test_val, foo)),
4649 .fixup_map2 = { 3 },
4650 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4652 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4655 "invalid map access into an array with no floor check",
4657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4658 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4660 BPF_LD_MAP_FD(BPF_REG_1, 0),
4661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4662 BPF_FUNC_map_lookup_elem),
4663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4664 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4665 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4666 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4667 BPF_MOV32_IMM(BPF_REG_1, 0),
4668 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4671 offsetof(struct test_val, foo)),
4674 .fixup_map2 = { 3 },
4675 .errstr_unpriv = "R0 leaks addr",
4676 .errstr = "R0 unbounded memory access",
4677 .result_unpriv = REJECT,
4679 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4682 "invalid map access into an array with a invalid max check",
4684 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4687 BPF_LD_MAP_FD(BPF_REG_1, 0),
4688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4689 BPF_FUNC_map_lookup_elem),
4690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4691 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4692 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4693 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4694 BPF_MOV32_IMM(BPF_REG_1, 0),
4695 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4696 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4697 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4698 offsetof(struct test_val, foo)),
4701 .fixup_map2 = { 3 },
4702 .errstr_unpriv = "R0 leaks addr",
4703 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
4704 .result_unpriv = REJECT,
4706 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4709 "invalid map access into an array with a invalid max check",
4711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 BPF_LD_MAP_FD(BPF_REG_1, 0),
4715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716 BPF_FUNC_map_lookup_elem),
4717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4718 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4722 BPF_LD_MAP_FD(BPF_REG_1, 0),
4723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4724 BPF_FUNC_map_lookup_elem),
4725 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4726 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4727 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4728 offsetof(struct test_val, foo)),
4731 .fixup_map2 = { 3, 11 },
4732 .errstr = "R0 pointer += pointer",
4734 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4737 "valid cgroup storage access",
4739 BPF_MOV64_IMM(BPF_REG_2, 0),
4740 BPF_LD_MAP_FD(BPF_REG_1, 0),
4741 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4742 BPF_FUNC_get_local_storage),
4743 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4744 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4745 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4748 .fixup_cgroup_storage = { 1 },
4750 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4753 "invalid cgroup storage access 1",
4755 BPF_MOV64_IMM(BPF_REG_2, 0),
4756 BPF_LD_MAP_FD(BPF_REG_1, 0),
4757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4758 BPF_FUNC_get_local_storage),
4759 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4761 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4764 .fixup_map1 = { 1 },
4766 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4767 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4770 "invalid cgroup storage access 2",
4772 BPF_MOV64_IMM(BPF_REG_2, 0),
4773 BPF_LD_MAP_FD(BPF_REG_1, 1),
4774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4775 BPF_FUNC_get_local_storage),
4776 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4780 .errstr = "fd 1 is not pointing to valid bpf_map",
4781 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4784 "invalid per-cgroup storage access 3",
4786 BPF_MOV64_IMM(BPF_REG_2, 0),
4787 BPF_LD_MAP_FD(BPF_REG_1, 0),
4788 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4789 BPF_FUNC_get_local_storage),
4790 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4792 BPF_MOV64_IMM(BPF_REG_0, 0),
4795 .fixup_cgroup_storage = { 1 },
4797 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
4798 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4801 "invalid cgroup storage access 4",
4803 BPF_MOV64_IMM(BPF_REG_2, 0),
4804 BPF_LD_MAP_FD(BPF_REG_1, 0),
4805 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4806 BPF_FUNC_get_local_storage),
4807 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4808 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4812 .fixup_cgroup_storage = { 1 },
4814 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4815 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4816 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4819 "invalid cgroup storage access 5",
4821 BPF_MOV64_IMM(BPF_REG_2, 7),
4822 BPF_LD_MAP_FD(BPF_REG_1, 0),
4823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4824 BPF_FUNC_get_local_storage),
4825 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4826 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4827 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4830 .fixup_cgroup_storage = { 1 },
4832 .errstr = "get_local_storage() doesn't support non-zero flags",
4833 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4836 "invalid cgroup storage access 6",
4838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4839 BPF_LD_MAP_FD(BPF_REG_1, 0),
4840 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4841 BPF_FUNC_get_local_storage),
4842 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4843 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4844 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4847 .fixup_cgroup_storage = { 1 },
4849 .errstr = "get_local_storage() doesn't support non-zero flags",
4850 .errstr_unpriv = "R2 leaks addr into helper function",
4851 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4854 "multiple registers share map_lookup_elem result",
4856 BPF_MOV64_IMM(BPF_REG_1, 10),
4857 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4860 BPF_LD_MAP_FD(BPF_REG_1, 0),
4861 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4862 BPF_FUNC_map_lookup_elem),
4863 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4864 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4865 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4868 .fixup_map1 = { 4 },
4870 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4873 "alu ops on ptr_to_map_value_or_null, 1",
4875 BPF_MOV64_IMM(BPF_REG_1, 10),
4876 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4877 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4879 BPF_LD_MAP_FD(BPF_REG_1, 0),
4880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4881 BPF_FUNC_map_lookup_elem),
4882 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4886 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4889 .fixup_map1 = { 4 },
4890 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4892 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4895 "alu ops on ptr_to_map_value_or_null, 2",
4897 BPF_MOV64_IMM(BPF_REG_1, 10),
4898 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4901 BPF_LD_MAP_FD(BPF_REG_1, 0),
4902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4903 BPF_FUNC_map_lookup_elem),
4904 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4905 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4907 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4910 .fixup_map1 = { 4 },
4911 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4913 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4916 "alu ops on ptr_to_map_value_or_null, 3",
4918 BPF_MOV64_IMM(BPF_REG_1, 10),
4919 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 BPF_LD_MAP_FD(BPF_REG_1, 0),
4923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4924 BPF_FUNC_map_lookup_elem),
4925 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4926 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4927 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4928 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4931 .fixup_map1 = { 4 },
4932 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4934 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4937 "invalid memory access with multiple map_lookup_elem calls",
4939 BPF_MOV64_IMM(BPF_REG_1, 10),
4940 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4941 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4943 BPF_LD_MAP_FD(BPF_REG_1, 0),
4944 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4945 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4947 BPF_FUNC_map_lookup_elem),
4948 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4950 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4951 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4952 BPF_FUNC_map_lookup_elem),
4953 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4954 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4957 .fixup_map1 = { 4 },
4959 .errstr = "R4 !read_ok",
4960 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4963 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4965 BPF_MOV64_IMM(BPF_REG_1, 10),
4966 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4967 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4969 BPF_LD_MAP_FD(BPF_REG_1, 0),
4970 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4971 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973 BPF_FUNC_map_lookup_elem),
4974 BPF_MOV64_IMM(BPF_REG_2, 10),
4975 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4978 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4979 BPF_FUNC_map_lookup_elem),
4980 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4981 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4982 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4985 .fixup_map1 = { 4 },
4987 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4990 "invalid map access from else condition",
4992 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4993 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4995 BPF_LD_MAP_FD(BPF_REG_1, 0),
4996 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4997 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4998 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4999 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5001 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5002 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5003 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5006 .fixup_map2 = { 3 },
5007 .errstr = "R0 unbounded memory access",
5009 .errstr_unpriv = "R0 leaks addr",
5010 .result_unpriv = REJECT,
5011 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5014 "constant register |= constant should keep constant type",
5016 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5018 BPF_MOV64_IMM(BPF_REG_2, 34),
5019 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5020 BPF_MOV64_IMM(BPF_REG_3, 0),
5021 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5025 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5028 "constant register |= constant should not bypass stack boundary checks",
5030 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5032 BPF_MOV64_IMM(BPF_REG_2, 34),
5033 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5034 BPF_MOV64_IMM(BPF_REG_3, 0),
5035 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5038 .errstr = "invalid stack type R1 off=-48 access_size=58",
5040 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5043 "constant register |= constant register should keep constant type",
5045 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5046 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5047 BPF_MOV64_IMM(BPF_REG_2, 34),
5048 BPF_MOV64_IMM(BPF_REG_4, 13),
5049 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5050 BPF_MOV64_IMM(BPF_REG_3, 0),
5051 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5055 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5058 "constant register |= constant register should not bypass stack boundary checks",
5060 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5062 BPF_MOV64_IMM(BPF_REG_2, 34),
5063 BPF_MOV64_IMM(BPF_REG_4, 24),
5064 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5065 BPF_MOV64_IMM(BPF_REG_3, 0),
5066 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5069 .errstr = "invalid stack type R1 off=-48 access_size=58",
5071 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5074 "invalid direct packet write for LWT_IN",
5076 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5077 offsetof(struct __sk_buff, data)),
5078 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5079 offsetof(struct __sk_buff, data_end)),
5080 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5082 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5083 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5084 BPF_MOV64_IMM(BPF_REG_0, 0),
5087 .errstr = "cannot write into packet",
5089 .prog_type = BPF_PROG_TYPE_LWT_IN,
5092 "invalid direct packet write for LWT_OUT",
5094 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5095 offsetof(struct __sk_buff, data)),
5096 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5097 offsetof(struct __sk_buff, data_end)),
5098 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5100 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5101 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5102 BPF_MOV64_IMM(BPF_REG_0, 0),
5105 .errstr = "cannot write into packet",
5107 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5110 "direct packet write for LWT_XMIT",
5112 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5113 offsetof(struct __sk_buff, data)),
5114 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5115 offsetof(struct __sk_buff, data_end)),
5116 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5118 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5119 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5120 BPF_MOV64_IMM(BPF_REG_0, 0),
5124 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5127 "direct packet read for LWT_IN",
5129 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5130 offsetof(struct __sk_buff, data)),
5131 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5132 offsetof(struct __sk_buff, data_end)),
5133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5135 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5136 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5137 BPF_MOV64_IMM(BPF_REG_0, 0),
5141 .prog_type = BPF_PROG_TYPE_LWT_IN,
5144 "direct packet read for LWT_OUT",
5146 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5147 offsetof(struct __sk_buff, data)),
5148 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5149 offsetof(struct __sk_buff, data_end)),
5150 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5152 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5153 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5154 BPF_MOV64_IMM(BPF_REG_0, 0),
5158 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5161 "direct packet read for LWT_XMIT",
5163 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5164 offsetof(struct __sk_buff, data)),
5165 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5166 offsetof(struct __sk_buff, data_end)),
5167 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5169 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5170 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5171 BPF_MOV64_IMM(BPF_REG_0, 0),
5175 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5178 "overlapping checks for direct packet access",
5180 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5181 offsetof(struct __sk_buff, data)),
5182 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5183 offsetof(struct __sk_buff, data_end)),
5184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5186 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5187 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5189 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5190 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5191 BPF_MOV64_IMM(BPF_REG_0, 0),
5195 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5198 "make headroom for LWT_XMIT",
5200 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5201 BPF_MOV64_IMM(BPF_REG_2, 34),
5202 BPF_MOV64_IMM(BPF_REG_3, 0),
5203 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5204 /* split for s390 to succeed */
5205 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5206 BPF_MOV64_IMM(BPF_REG_2, 42),
5207 BPF_MOV64_IMM(BPF_REG_3, 0),
5208 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5209 BPF_MOV64_IMM(BPF_REG_0, 0),
5213 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5216 "invalid access of tc_classid for LWT_IN",
5218 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5219 offsetof(struct __sk_buff, tc_classid)),
5223 .errstr = "invalid bpf_context access",
5226 "invalid access of tc_classid for LWT_OUT",
5228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5229 offsetof(struct __sk_buff, tc_classid)),
5233 .errstr = "invalid bpf_context access",
5236 "invalid access of tc_classid for LWT_XMIT",
5238 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5239 offsetof(struct __sk_buff, tc_classid)),
5243 .errstr = "invalid bpf_context access",
5246 "leak pointer into ctx 1",
5248 BPF_MOV64_IMM(BPF_REG_0, 0),
5249 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5250 offsetof(struct __sk_buff, cb[0])),
5251 BPF_LD_MAP_FD(BPF_REG_2, 0),
5252 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5253 offsetof(struct __sk_buff, cb[0])),
5256 .fixup_map1 = { 2 },
5257 .errstr_unpriv = "R2 leaks addr into mem",
5258 .result_unpriv = REJECT,
5260 .errstr = "BPF_XADD stores into R1 context is not allowed",
5263 "leak pointer into ctx 2",
5265 BPF_MOV64_IMM(BPF_REG_0, 0),
5266 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5267 offsetof(struct __sk_buff, cb[0])),
5268 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5269 offsetof(struct __sk_buff, cb[0])),
5272 .errstr_unpriv = "R10 leaks addr into mem",
5273 .result_unpriv = REJECT,
5275 .errstr = "BPF_XADD stores into R1 context is not allowed",
5278 "leak pointer into ctx 3",
5280 BPF_MOV64_IMM(BPF_REG_0, 0),
5281 BPF_LD_MAP_FD(BPF_REG_2, 0),
5282 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5283 offsetof(struct __sk_buff, cb[0])),
5286 .fixup_map1 = { 1 },
5287 .errstr_unpriv = "R2 leaks addr into ctx",
5288 .result_unpriv = REJECT,
5292 "leak pointer into map val",
5294 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5295 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5298 BPF_LD_MAP_FD(BPF_REG_1, 0),
5299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5300 BPF_FUNC_map_lookup_elem),
5301 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5302 BPF_MOV64_IMM(BPF_REG_3, 0),
5303 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5304 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5305 BPF_MOV64_IMM(BPF_REG_0, 0),
5308 .fixup_map1 = { 4 },
5309 .errstr_unpriv = "R6 leaks addr into mem",
5310 .result_unpriv = REJECT,
5314 "helper access to map: full range",
5316 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5318 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5319 BPF_LD_MAP_FD(BPF_REG_1, 0),
5320 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5322 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5323 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5324 BPF_MOV64_IMM(BPF_REG_3, 0),
5325 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5328 .fixup_map2 = { 3 },
5330 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5333 "helper access to map: partial range",
5335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5337 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5338 BPF_LD_MAP_FD(BPF_REG_1, 0),
5339 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5342 BPF_MOV64_IMM(BPF_REG_2, 8),
5343 BPF_MOV64_IMM(BPF_REG_3, 0),
5344 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5347 .fixup_map2 = { 3 },
5349 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5352 "helper access to map: empty range",
5354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5356 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5357 BPF_LD_MAP_FD(BPF_REG_1, 0),
5358 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5359 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5360 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5361 BPF_MOV64_IMM(BPF_REG_2, 0),
5362 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5365 .fixup_map2 = { 3 },
5366 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5368 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5371 "helper access to map: out-of-bound range",
5373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5375 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5376 BPF_LD_MAP_FD(BPF_REG_1, 0),
5377 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5378 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5380 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5381 BPF_MOV64_IMM(BPF_REG_3, 0),
5382 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5385 .fixup_map2 = { 3 },
5386 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5388 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5391 "helper access to map: negative range",
5393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5395 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5396 BPF_LD_MAP_FD(BPF_REG_1, 0),
5397 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5398 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5400 BPF_MOV64_IMM(BPF_REG_2, -8),
5401 BPF_MOV64_IMM(BPF_REG_3, 0),
5402 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5405 .fixup_map2 = { 3 },
5406 .errstr = "R2 min value is negative",
5408 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5411 "helper access to adjusted map (via const imm): full range",
5413 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5415 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5416 BPF_LD_MAP_FD(BPF_REG_1, 0),
5417 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5418 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5421 offsetof(struct test_val, foo)),
5422 BPF_MOV64_IMM(BPF_REG_2,
5423 sizeof(struct test_val) -
5424 offsetof(struct test_val, foo)),
5425 BPF_MOV64_IMM(BPF_REG_3, 0),
5426 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5429 .fixup_map2 = { 3 },
5431 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5434 "helper access to adjusted map (via const imm): partial range",
5436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5438 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5439 BPF_LD_MAP_FD(BPF_REG_1, 0),
5440 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5444 offsetof(struct test_val, foo)),
5445 BPF_MOV64_IMM(BPF_REG_2, 8),
5446 BPF_MOV64_IMM(BPF_REG_3, 0),
5447 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5450 .fixup_map2 = { 3 },
5452 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5455 "helper access to adjusted map (via const imm): empty range",
5457 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5459 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5460 BPF_LD_MAP_FD(BPF_REG_1, 0),
5461 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5462 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5465 offsetof(struct test_val, foo)),
5466 BPF_MOV64_IMM(BPF_REG_2, 0),
5467 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5470 .fixup_map2 = { 3 },
5471 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
5473 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5476 "helper access to adjusted map (via const imm): out-of-bound range",
5478 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5480 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5481 BPF_LD_MAP_FD(BPF_REG_1, 0),
5482 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5484 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5486 offsetof(struct test_val, foo)),
5487 BPF_MOV64_IMM(BPF_REG_2,
5488 sizeof(struct test_val) -
5489 offsetof(struct test_val, foo) + 8),
5490 BPF_MOV64_IMM(BPF_REG_3, 0),
5491 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5494 .fixup_map2 = { 3 },
5495 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5497 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5500 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5504 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5505 BPF_LD_MAP_FD(BPF_REG_1, 0),
5506 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5508 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5510 offsetof(struct test_val, foo)),
5511 BPF_MOV64_IMM(BPF_REG_2, -8),
5512 BPF_MOV64_IMM(BPF_REG_3, 0),
5513 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5516 .fixup_map2 = { 3 },
5517 .errstr = "R2 min value is negative",
5519 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5522 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5524 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5526 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5527 BPF_LD_MAP_FD(BPF_REG_1, 0),
5528 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5529 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5532 offsetof(struct test_val, foo)),
5533 BPF_MOV64_IMM(BPF_REG_2, -1),
5534 BPF_MOV64_IMM(BPF_REG_3, 0),
5535 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5538 .fixup_map2 = { 3 },
5539 .errstr = "R2 min value is negative",
5541 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5544 "helper access to adjusted map (via const reg): full range",
5546 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5548 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5549 BPF_LD_MAP_FD(BPF_REG_1, 0),
5550 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5553 BPF_MOV64_IMM(BPF_REG_3,
5554 offsetof(struct test_val, foo)),
5555 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5556 BPF_MOV64_IMM(BPF_REG_2,
5557 sizeof(struct test_val) -
5558 offsetof(struct test_val, foo)),
5559 BPF_MOV64_IMM(BPF_REG_3, 0),
5560 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5563 .fixup_map2 = { 3 },
5565 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5568 "helper access to adjusted map (via const reg): partial range",
5570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5572 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5573 BPF_LD_MAP_FD(BPF_REG_1, 0),
5574 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5576 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5577 BPF_MOV64_IMM(BPF_REG_3,
5578 offsetof(struct test_val, foo)),
5579 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5580 BPF_MOV64_IMM(BPF_REG_2, 8),
5581 BPF_MOV64_IMM(BPF_REG_3, 0),
5582 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5585 .fixup_map2 = { 3 },
5587 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5590 "helper access to adjusted map (via const reg): empty range",
5592 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5594 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5595 BPF_LD_MAP_FD(BPF_REG_1, 0),
5596 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5599 BPF_MOV64_IMM(BPF_REG_3, 0),
5600 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5601 BPF_MOV64_IMM(BPF_REG_2, 0),
5602 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5605 .fixup_map2 = { 3 },
5606 .errstr = "R1 min value is outside of the array range",
5608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5611 "helper access to adjusted map (via const reg): out-of-bound range",
5613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5615 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5616 BPF_LD_MAP_FD(BPF_REG_1, 0),
5617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5620 BPF_MOV64_IMM(BPF_REG_3,
5621 offsetof(struct test_val, foo)),
5622 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5623 BPF_MOV64_IMM(BPF_REG_2,
5624 sizeof(struct test_val) -
5625 offsetof(struct test_val, foo) + 8),
5626 BPF_MOV64_IMM(BPF_REG_3, 0),
5627 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5630 .fixup_map2 = { 3 },
5631 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5633 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5636 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5640 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5641 BPF_LD_MAP_FD(BPF_REG_1, 0),
5642 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5645 BPF_MOV64_IMM(BPF_REG_3,
5646 offsetof(struct test_val, foo)),
5647 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5648 BPF_MOV64_IMM(BPF_REG_2, -8),
5649 BPF_MOV64_IMM(BPF_REG_3, 0),
5650 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5653 .fixup_map2 = { 3 },
5654 .errstr = "R2 min value is negative",
5656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5659 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5664 BPF_LD_MAP_FD(BPF_REG_1, 0),
5665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5668 BPF_MOV64_IMM(BPF_REG_3,
5669 offsetof(struct test_val, foo)),
5670 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5671 BPF_MOV64_IMM(BPF_REG_2, -1),
5672 BPF_MOV64_IMM(BPF_REG_3, 0),
5673 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5676 .fixup_map2 = { 3 },
5677 .errstr = "R2 min value is negative",
5679 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5682 "helper access to adjusted map (via variable): full range",
5684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5686 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5687 BPF_LD_MAP_FD(BPF_REG_1, 0),
5688 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5691 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5692 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5693 offsetof(struct test_val, foo), 4),
5694 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5695 BPF_MOV64_IMM(BPF_REG_2,
5696 sizeof(struct test_val) -
5697 offsetof(struct test_val, foo)),
5698 BPF_MOV64_IMM(BPF_REG_3, 0),
5699 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5702 .fixup_map2 = { 3 },
5704 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5707 "helper access to adjusted map (via variable): partial range",
5709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5711 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5712 BPF_LD_MAP_FD(BPF_REG_1, 0),
5713 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5716 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5717 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5718 offsetof(struct test_val, foo), 4),
5719 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5720 BPF_MOV64_IMM(BPF_REG_2, 8),
5721 BPF_MOV64_IMM(BPF_REG_3, 0),
5722 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5725 .fixup_map2 = { 3 },
5727 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5730 "helper access to adjusted map (via variable): empty range",
5732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5734 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5735 BPF_LD_MAP_FD(BPF_REG_1, 0),
5736 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5739 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5740 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5741 offsetof(struct test_val, foo), 3),
5742 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5743 BPF_MOV64_IMM(BPF_REG_2, 0),
5744 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5747 .fixup_map2 = { 3 },
5748 .errstr = "R1 min value is outside of the array range",
5750 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5753 "helper access to adjusted map (via variable): no max check",
5755 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5757 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5758 BPF_LD_MAP_FD(BPF_REG_1, 0),
5759 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5760 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5762 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5763 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5764 BPF_MOV64_IMM(BPF_REG_2, 1),
5765 BPF_MOV64_IMM(BPF_REG_3, 0),
5766 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5769 .fixup_map2 = { 3 },
5770 .errstr = "R1 unbounded memory access",
5772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5775 "helper access to adjusted map (via variable): wrong max check",
5777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5779 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5780 BPF_LD_MAP_FD(BPF_REG_1, 0),
5781 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5785 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5786 offsetof(struct test_val, foo), 4),
5787 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5788 BPF_MOV64_IMM(BPF_REG_2,
5789 sizeof(struct test_val) -
5790 offsetof(struct test_val, foo) + 1),
5791 BPF_MOV64_IMM(BPF_REG_3, 0),
5792 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5795 .fixup_map2 = { 3 },
5796 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5801 "helper access to map: bounds check using <, good access",
5803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5805 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5806 BPF_LD_MAP_FD(BPF_REG_1, 0),
5807 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5810 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5811 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5812 BPF_MOV64_IMM(BPF_REG_0, 0),
5814 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5815 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5816 BPF_MOV64_IMM(BPF_REG_0, 0),
5819 .fixup_map2 = { 3 },
5821 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5824 "helper access to map: bounds check using <, bad access",
5826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5828 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5829 BPF_LD_MAP_FD(BPF_REG_1, 0),
5830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5833 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5834 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5835 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5836 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5837 BPF_MOV64_IMM(BPF_REG_0, 0),
5839 BPF_MOV64_IMM(BPF_REG_0, 0),
5842 .fixup_map2 = { 3 },
5844 .errstr = "R1 unbounded memory access",
5845 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5848 "helper access to map: bounds check using <=, good access",
5850 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5852 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5853 BPF_LD_MAP_FD(BPF_REG_1, 0),
5854 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5856 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5857 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5858 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5859 BPF_MOV64_IMM(BPF_REG_0, 0),
5861 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5862 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5863 BPF_MOV64_IMM(BPF_REG_0, 0),
5866 .fixup_map2 = { 3 },
5868 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5871 "helper access to map: bounds check using <=, bad access",
5873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5875 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5876 BPF_LD_MAP_FD(BPF_REG_1, 0),
5877 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5880 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5881 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5882 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5883 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5884 BPF_MOV64_IMM(BPF_REG_0, 0),
5886 BPF_MOV64_IMM(BPF_REG_0, 0),
5889 .fixup_map2 = { 3 },
5891 .errstr = "R1 unbounded memory access",
5892 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5895 "helper access to map: bounds check using s<, good access",
5897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5899 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5900 BPF_LD_MAP_FD(BPF_REG_1, 0),
5901 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5904 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5905 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5906 BPF_MOV64_IMM(BPF_REG_0, 0),
5908 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5909 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5910 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5911 BPF_MOV64_IMM(BPF_REG_0, 0),
5914 .fixup_map2 = { 3 },
5916 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5919 "helper access to map: bounds check using s<, good access 2",
5921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5923 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5924 BPF_LD_MAP_FD(BPF_REG_1, 0),
5925 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5929 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5930 BPF_MOV64_IMM(BPF_REG_0, 0),
5932 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5933 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5934 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5935 BPF_MOV64_IMM(BPF_REG_0, 0),
5938 .fixup_map2 = { 3 },
5940 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5943 "helper access to map: bounds check using s<, bad access",
5945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5947 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5948 BPF_LD_MAP_FD(BPF_REG_1, 0),
5949 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5952 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5953 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5954 BPF_MOV64_IMM(BPF_REG_0, 0),
5956 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5957 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5958 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5959 BPF_MOV64_IMM(BPF_REG_0, 0),
5962 .fixup_map2 = { 3 },
5964 .errstr = "R1 min value is negative",
5965 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5968 "helper access to map: bounds check using s<=, good access",
5970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5972 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5973 BPF_LD_MAP_FD(BPF_REG_1, 0),
5974 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5977 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5978 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5979 BPF_MOV64_IMM(BPF_REG_0, 0),
5981 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5982 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5983 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5984 BPF_MOV64_IMM(BPF_REG_0, 0),
5987 .fixup_map2 = { 3 },
5989 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5992 "helper access to map: bounds check using s<=, good access 2",
5994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5996 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5997 BPF_LD_MAP_FD(BPF_REG_1, 0),
5998 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5999 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6001 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6002 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6003 BPF_MOV64_IMM(BPF_REG_0, 0),
6005 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6006 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6007 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6008 BPF_MOV64_IMM(BPF_REG_0, 0),
6011 .fixup_map2 = { 3 },
6013 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6016 "helper access to map: bounds check using s<=, bad access",
6018 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6020 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6021 BPF_LD_MAP_FD(BPF_REG_1, 0),
6022 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6024 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6025 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6026 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6027 BPF_MOV64_IMM(BPF_REG_0, 0),
6029 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6030 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6031 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6032 BPF_MOV64_IMM(BPF_REG_0, 0),
6035 .fixup_map2 = { 3 },
6037 .errstr = "R1 min value is negative",
6038 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6041 "map lookup helper access to map",
6043 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6045 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6046 BPF_LD_MAP_FD(BPF_REG_1, 0),
6047 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6048 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6049 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6050 BPF_LD_MAP_FD(BPF_REG_1, 0),
6051 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6054 .fixup_map3 = { 3, 8 },
6056 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6059 "map update helper access to map",
6061 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6063 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6064 BPF_LD_MAP_FD(BPF_REG_1, 0),
6065 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6066 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6067 BPF_MOV64_IMM(BPF_REG_4, 0),
6068 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6070 BPF_LD_MAP_FD(BPF_REG_1, 0),
6071 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6074 .fixup_map3 = { 3, 10 },
6076 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6079 "map update helper access to map: wrong size",
6081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6083 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6084 BPF_LD_MAP_FD(BPF_REG_1, 0),
6085 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6087 BPF_MOV64_IMM(BPF_REG_4, 0),
6088 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6090 BPF_LD_MAP_FD(BPF_REG_1, 0),
6091 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6094 .fixup_map1 = { 3 },
6095 .fixup_map3 = { 10 },
6097 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6098 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6101 "map helper access to adjusted map (via const imm)",
6103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6106 BPF_LD_MAP_FD(BPF_REG_1, 0),
6107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6111 offsetof(struct other_val, bar)),
6112 BPF_LD_MAP_FD(BPF_REG_1, 0),
6113 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6116 .fixup_map3 = { 3, 9 },
6118 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6121 "map helper access to adjusted map (via const imm): out-of-bound 1",
6123 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6125 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6126 BPF_LD_MAP_FD(BPF_REG_1, 0),
6127 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6128 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6131 sizeof(struct other_val) - 4),
6132 BPF_LD_MAP_FD(BPF_REG_1, 0),
6133 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6136 .fixup_map3 = { 3, 9 },
6138 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6139 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6142 "map helper access to adjusted map (via const imm): out-of-bound 2",
6144 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6146 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6147 BPF_LD_MAP_FD(BPF_REG_1, 0),
6148 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6152 BPF_LD_MAP_FD(BPF_REG_1, 0),
6153 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6156 .fixup_map3 = { 3, 9 },
6158 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6159 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6162 "map helper access to adjusted map (via const reg)",
6164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6166 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6167 BPF_LD_MAP_FD(BPF_REG_1, 0),
6168 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6170 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6171 BPF_MOV64_IMM(BPF_REG_3,
6172 offsetof(struct other_val, bar)),
6173 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6174 BPF_LD_MAP_FD(BPF_REG_1, 0),
6175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6178 .fixup_map3 = { 3, 10 },
6180 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6183 "map helper access to adjusted map (via const reg): out-of-bound 1",
6185 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6187 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6188 BPF_LD_MAP_FD(BPF_REG_1, 0),
6189 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6191 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6192 BPF_MOV64_IMM(BPF_REG_3,
6193 sizeof(struct other_val) - 4),
6194 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6195 BPF_LD_MAP_FD(BPF_REG_1, 0),
6196 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6199 .fixup_map3 = { 3, 10 },
6201 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6202 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6205 "map helper access to adjusted map (via const reg): out-of-bound 2",
6207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6210 BPF_LD_MAP_FD(BPF_REG_1, 0),
6211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6214 BPF_MOV64_IMM(BPF_REG_3, -4),
6215 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6216 BPF_LD_MAP_FD(BPF_REG_1, 0),
6217 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6220 .fixup_map3 = { 3, 10 },
6222 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6223 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6226 "map helper access to adjusted map (via variable)",
6228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6230 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6231 BPF_LD_MAP_FD(BPF_REG_1, 0),
6232 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6235 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6236 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6237 offsetof(struct other_val, bar), 4),
6238 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6239 BPF_LD_MAP_FD(BPF_REG_1, 0),
6240 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6243 .fixup_map3 = { 3, 11 },
6245 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6248 "map helper access to adjusted map (via variable): no max check",
6250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6252 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6253 BPF_LD_MAP_FD(BPF_REG_1, 0),
6254 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6257 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6258 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6259 BPF_LD_MAP_FD(BPF_REG_1, 0),
6260 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6263 .fixup_map3 = { 3, 10 },
6265 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6266 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6269 "map helper access to adjusted map (via variable): wrong max check",
6271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6273 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6274 BPF_LD_MAP_FD(BPF_REG_1, 0),
6275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6277 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6278 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6279 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6280 offsetof(struct other_val, bar) + 1, 4),
6281 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6282 BPF_LD_MAP_FD(BPF_REG_1, 0),
6283 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6286 .fixup_map3 = { 3, 11 },
6288 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
6289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6292 "map element value is preserved across register spilling",
6294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6296 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6297 BPF_LD_MAP_FD(BPF_REG_1, 0),
6298 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6300 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6303 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6304 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6305 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6308 .fixup_map2 = { 3 },
6309 .errstr_unpriv = "R0 leaks addr",
6311 .result_unpriv = REJECT,
6314 "map element value or null is marked on register spilling",
6316 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6318 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6319 BPF_LD_MAP_FD(BPF_REG_1, 0),
6320 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6323 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6325 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6326 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6329 .fixup_map2 = { 3 },
6330 .errstr_unpriv = "R0 leaks addr",
6332 .result_unpriv = REJECT,
6335 "map element value store of cleared call register",
6337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6339 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6340 BPF_LD_MAP_FD(BPF_REG_1, 0),
6341 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6343 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6346 .fixup_map2 = { 3 },
6347 .errstr_unpriv = "R1 !read_ok",
6348 .errstr = "R1 !read_ok",
6350 .result_unpriv = REJECT,
6353 "map element value with unaligned store",
6355 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6357 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6358 BPF_LD_MAP_FD(BPF_REG_1, 0),
6359 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6360 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6362 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6363 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6364 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6365 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6366 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6367 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6368 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6370 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6371 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6372 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6373 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6375 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6376 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6377 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6380 .fixup_map2 = { 3 },
6381 .errstr_unpriv = "R0 leaks addr",
6383 .result_unpriv = REJECT,
6384 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6387 "map element value with unaligned load",
6389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6391 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6392 BPF_LD_MAP_FD(BPF_REG_1, 0),
6393 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6395 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6396 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6398 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6399 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6400 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6401 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6402 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6404 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6405 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6408 .fixup_map2 = { 3 },
6409 .errstr_unpriv = "R0 leaks addr",
6411 .result_unpriv = REJECT,
6412 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6415 "map element value illegal alu op, 1",
6417 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6419 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6420 BPF_LD_MAP_FD(BPF_REG_1, 0),
6421 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6422 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6423 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6424 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6427 .fixup_map2 = { 3 },
6428 .errstr = "R0 bitwise operator &= on pointer",
6432 "map element value illegal alu op, 2",
6434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6436 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6437 BPF_LD_MAP_FD(BPF_REG_1, 0),
6438 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6439 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6440 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6441 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6444 .fixup_map2 = { 3 },
6445 .errstr = "R0 32-bit pointer arithmetic prohibited",
6449 "map element value illegal alu op, 3",
6451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6453 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6454 BPF_LD_MAP_FD(BPF_REG_1, 0),
6455 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6457 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6458 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6461 .fixup_map2 = { 3 },
6462 .errstr = "R0 pointer arithmetic with /= operator",
6466 "map element value illegal alu op, 4",
6468 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6470 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6471 BPF_LD_MAP_FD(BPF_REG_1, 0),
6472 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6474 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6475 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6478 .fixup_map2 = { 3 },
6479 .errstr_unpriv = "R0 pointer arithmetic prohibited",
6480 .errstr = "invalid mem access 'inv'",
6482 .result_unpriv = REJECT,
6483 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6486 "map element value illegal alu op, 5",
6488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6490 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6491 BPF_LD_MAP_FD(BPF_REG_1, 0),
6492 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6494 BPF_MOV64_IMM(BPF_REG_3, 4096),
6495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6497 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6498 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6499 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6500 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6503 .fixup_map2 = { 3 },
6504 .errstr = "R0 invalid mem access 'inv'",
6506 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6509 "map element value is preserved across register spilling",
6511 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6513 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6514 BPF_LD_MAP_FD(BPF_REG_1, 0),
6515 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6518 offsetof(struct test_val, foo)),
6519 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6522 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6523 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6524 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6527 .fixup_map2 = { 3 },
6528 .errstr_unpriv = "R0 leaks addr",
6530 .result_unpriv = REJECT,
6531 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6534 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6538 BPF_MOV64_IMM(BPF_REG_0, 0),
6539 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6540 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6541 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6542 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6543 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6544 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6545 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6546 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6547 BPF_MOV64_IMM(BPF_REG_2, 16),
6548 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6549 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6550 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6551 BPF_MOV64_IMM(BPF_REG_4, 0),
6552 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6553 BPF_MOV64_IMM(BPF_REG_3, 0),
6554 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6555 BPF_MOV64_IMM(BPF_REG_0, 0),
6559 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6562 "helper access to variable memory: stack, bitwise AND, zero included",
6564 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6565 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6567 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6568 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6569 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6570 BPF_MOV64_IMM(BPF_REG_3, 0),
6571 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6574 .errstr = "invalid indirect read from stack off -64+0 size 64",
6576 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6579 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6581 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6582 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6584 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6585 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6586 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6587 BPF_MOV64_IMM(BPF_REG_4, 0),
6588 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6589 BPF_MOV64_IMM(BPF_REG_3, 0),
6590 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6591 BPF_MOV64_IMM(BPF_REG_0, 0),
6594 .errstr = "invalid stack type R1 off=-64 access_size=65",
6596 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6599 "helper access to variable memory: stack, JMP, correct bounds",
6601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6603 BPF_MOV64_IMM(BPF_REG_0, 0),
6604 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6605 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6606 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6607 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6608 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6609 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6610 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6611 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6612 BPF_MOV64_IMM(BPF_REG_2, 16),
6613 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6614 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6615 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6616 BPF_MOV64_IMM(BPF_REG_4, 0),
6617 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6618 BPF_MOV64_IMM(BPF_REG_3, 0),
6619 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6620 BPF_MOV64_IMM(BPF_REG_0, 0),
6624 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6627 "helper access to variable memory: stack, JMP (signed), correct bounds",
6629 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6631 BPF_MOV64_IMM(BPF_REG_0, 0),
6632 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6633 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6634 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6635 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6636 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6637 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6638 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6639 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6640 BPF_MOV64_IMM(BPF_REG_2, 16),
6641 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6642 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6643 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6644 BPF_MOV64_IMM(BPF_REG_4, 0),
6645 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6646 BPF_MOV64_IMM(BPF_REG_3, 0),
6647 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6648 BPF_MOV64_IMM(BPF_REG_0, 0),
6652 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6655 "helper access to variable memory: stack, JMP, bounds + offset",
6657 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6658 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6660 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6661 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6662 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6663 BPF_MOV64_IMM(BPF_REG_4, 0),
6664 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6666 BPF_MOV64_IMM(BPF_REG_3, 0),
6667 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6668 BPF_MOV64_IMM(BPF_REG_0, 0),
6671 .errstr = "invalid stack type R1 off=-64 access_size=65",
6673 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6676 "helper access to variable memory: stack, JMP, wrong max",
6678 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6681 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6682 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6683 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6684 BPF_MOV64_IMM(BPF_REG_4, 0),
6685 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6686 BPF_MOV64_IMM(BPF_REG_3, 0),
6687 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6688 BPF_MOV64_IMM(BPF_REG_0, 0),
6691 .errstr = "invalid stack type R1 off=-64 access_size=65",
6693 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6696 "helper access to variable memory: stack, JMP, no max check",
6698 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6701 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6702 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6703 BPF_MOV64_IMM(BPF_REG_4, 0),
6704 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6705 BPF_MOV64_IMM(BPF_REG_3, 0),
6706 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6707 BPF_MOV64_IMM(BPF_REG_0, 0),
6710 /* because max wasn't checked, signed min is negative */
6711 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
6713 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6716 "helper access to variable memory: stack, JMP, no min check",
6718 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6721 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6722 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6723 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6724 BPF_MOV64_IMM(BPF_REG_3, 0),
6725 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6726 BPF_MOV64_IMM(BPF_REG_0, 0),
6729 .errstr = "invalid indirect read from stack off -64+0 size 64",
6731 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6734 "helper access to variable memory: stack, JMP (signed), no min check",
6736 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
6737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6739 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6740 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6741 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6742 BPF_MOV64_IMM(BPF_REG_3, 0),
6743 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6744 BPF_MOV64_IMM(BPF_REG_0, 0),
6747 .errstr = "R2 min value is negative",
6749 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6752 "helper access to variable memory: map, JMP, correct bounds",
6754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6756 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6757 BPF_LD_MAP_FD(BPF_REG_1, 0),
6758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6761 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6762 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6763 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6764 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6765 sizeof(struct test_val), 4),
6766 BPF_MOV64_IMM(BPF_REG_4, 0),
6767 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6768 BPF_MOV64_IMM(BPF_REG_3, 0),
6769 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6770 BPF_MOV64_IMM(BPF_REG_0, 0),
6773 .fixup_map2 = { 3 },
6775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6778 "helper access to variable memory: map, JMP, wrong max",
6780 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
6781 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6783 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6784 BPF_LD_MAP_FD(BPF_REG_1, 0),
6785 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6788 BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
6789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6790 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6791 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6792 sizeof(struct test_val) + 1, 4),
6793 BPF_MOV64_IMM(BPF_REG_4, 0),
6794 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6795 BPF_MOV64_IMM(BPF_REG_3, 0),
6796 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6800 .fixup_map2 = { 4 },
6801 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6803 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6806 "helper access to variable memory: map adjusted, JMP, correct bounds",
6808 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6810 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6811 BPF_LD_MAP_FD(BPF_REG_1, 0),
6812 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6816 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6817 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6818 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6819 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6820 sizeof(struct test_val) - 20, 4),
6821 BPF_MOV64_IMM(BPF_REG_4, 0),
6822 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6823 BPF_MOV64_IMM(BPF_REG_3, 0),
6824 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6825 BPF_MOV64_IMM(BPF_REG_0, 0),
6828 .fixup_map2 = { 3 },
6830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6833 "helper access to variable memory: map adjusted, JMP, wrong max",
6835 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
6836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6838 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6839 BPF_LD_MAP_FD(BPF_REG_1, 0),
6840 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6842 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
6845 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6846 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6847 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6848 sizeof(struct test_val) - 19, 4),
6849 BPF_MOV64_IMM(BPF_REG_4, 0),
6850 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6851 BPF_MOV64_IMM(BPF_REG_3, 0),
6852 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6853 BPF_MOV64_IMM(BPF_REG_0, 0),
6856 .fixup_map2 = { 4 },
6857 .errstr = "R1 min value is outside of the array range",
6859 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6862 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6864 BPF_MOV64_IMM(BPF_REG_1, 0),
6865 BPF_MOV64_IMM(BPF_REG_2, 0),
6866 BPF_MOV64_IMM(BPF_REG_3, 0),
6867 BPF_MOV64_IMM(BPF_REG_4, 0),
6868 BPF_MOV64_IMM(BPF_REG_5, 0),
6869 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6873 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6876 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6879 BPF_MOV64_IMM(BPF_REG_1, 0),
6880 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6881 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6882 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6883 BPF_MOV64_IMM(BPF_REG_3, 0),
6884 BPF_MOV64_IMM(BPF_REG_4, 0),
6885 BPF_MOV64_IMM(BPF_REG_5, 0),
6886 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6889 .errstr = "R1 type=inv expected=fp",
6891 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6894 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6896 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6898 BPF_MOV64_IMM(BPF_REG_2, 0),
6899 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6900 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6901 BPF_MOV64_IMM(BPF_REG_3, 0),
6902 BPF_MOV64_IMM(BPF_REG_4, 0),
6903 BPF_MOV64_IMM(BPF_REG_5, 0),
6904 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6908 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6911 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6914 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6916 BPF_LD_MAP_FD(BPF_REG_1, 0),
6917 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6918 BPF_FUNC_map_lookup_elem),
6919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6920 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6921 BPF_MOV64_IMM(BPF_REG_2, 0),
6922 BPF_MOV64_IMM(BPF_REG_3, 0),
6923 BPF_MOV64_IMM(BPF_REG_4, 0),
6924 BPF_MOV64_IMM(BPF_REG_5, 0),
6925 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6928 .fixup_map1 = { 3 },
6930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6933 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6935 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6938 BPF_LD_MAP_FD(BPF_REG_1, 0),
6939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6940 BPF_FUNC_map_lookup_elem),
6941 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6942 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6943 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6946 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6947 BPF_MOV64_IMM(BPF_REG_3, 0),
6948 BPF_MOV64_IMM(BPF_REG_4, 0),
6949 BPF_MOV64_IMM(BPF_REG_5, 0),
6950 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6953 .fixup_map1 = { 3 },
6955 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6958 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6960 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6963 BPF_LD_MAP_FD(BPF_REG_1, 0),
6964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6965 BPF_FUNC_map_lookup_elem),
6966 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6968 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6969 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6970 BPF_MOV64_IMM(BPF_REG_3, 0),
6971 BPF_MOV64_IMM(BPF_REG_4, 0),
6972 BPF_MOV64_IMM(BPF_REG_5, 0),
6973 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6976 .fixup_map1 = { 3 },
6978 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6981 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
6983 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6984 offsetof(struct __sk_buff, data)),
6985 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6986 offsetof(struct __sk_buff, data_end)),
6987 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6989 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6990 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6991 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6992 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6993 BPF_MOV64_IMM(BPF_REG_3, 0),
6994 BPF_MOV64_IMM(BPF_REG_4, 0),
6995 BPF_MOV64_IMM(BPF_REG_5, 0),
6996 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7000 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7001 .retval = 0 /* csum_diff of 64-byte packet */,
7002 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7005 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7007 BPF_MOV64_IMM(BPF_REG_1, 0),
7008 BPF_MOV64_IMM(BPF_REG_2, 0),
7009 BPF_MOV64_IMM(BPF_REG_3, 0),
7010 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7013 .errstr = "R1 type=inv expected=fp",
7015 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7018 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7020 BPF_MOV64_IMM(BPF_REG_1, 0),
7021 BPF_MOV64_IMM(BPF_REG_2, 1),
7022 BPF_MOV64_IMM(BPF_REG_3, 0),
7023 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7026 .errstr = "R1 type=inv expected=fp",
7028 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7031 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7035 BPF_MOV64_IMM(BPF_REG_2, 0),
7036 BPF_MOV64_IMM(BPF_REG_3, 0),
7037 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7041 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7044 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7046 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7047 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7049 BPF_LD_MAP_FD(BPF_REG_1, 0),
7050 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7051 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7053 BPF_MOV64_IMM(BPF_REG_2, 0),
7054 BPF_MOV64_IMM(BPF_REG_3, 0),
7055 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7058 .fixup_map1 = { 3 },
7060 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7063 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7065 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7066 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7068 BPF_LD_MAP_FD(BPF_REG_1, 0),
7069 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7071 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7072 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7073 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7075 BPF_MOV64_IMM(BPF_REG_3, 0),
7076 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7079 .fixup_map1 = { 3 },
7081 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7084 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7086 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7089 BPF_LD_MAP_FD(BPF_REG_1, 0),
7090 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7091 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7092 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7093 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7094 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7095 BPF_MOV64_IMM(BPF_REG_3, 0),
7096 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7099 .fixup_map1 = { 3 },
7101 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7104 "helper access to variable memory: 8 bytes leak",
7106 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
7107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7109 BPF_MOV64_IMM(BPF_REG_0, 0),
7110 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7111 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7112 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7113 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7114 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7115 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7116 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7117 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7118 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7119 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7121 BPF_MOV64_IMM(BPF_REG_3, 0),
7122 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7123 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7126 .errstr = "invalid indirect read from stack off -64+32 size 64",
7128 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7131 "helper access to variable memory: 8 bytes no leak (init memory)",
7133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7134 BPF_MOV64_IMM(BPF_REG_0, 0),
7135 BPF_MOV64_IMM(BPF_REG_0, 0),
7136 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7137 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7138 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7139 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7140 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7141 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7142 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7143 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7145 BPF_MOV64_IMM(BPF_REG_2, 0),
7146 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7148 BPF_MOV64_IMM(BPF_REG_3, 0),
7149 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7150 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7154 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7157 "invalid and of negative number",
7159 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7162 BPF_LD_MAP_FD(BPF_REG_1, 0),
7163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7164 BPF_FUNC_map_lookup_elem),
7165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7166 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7167 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7168 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7169 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7170 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7171 offsetof(struct test_val, foo)),
7174 .fixup_map2 = { 3 },
7175 .errstr = "R0 max value is outside of the array range",
7177 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7180 "invalid range check",
7182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7185 BPF_LD_MAP_FD(BPF_REG_1, 0),
7186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7187 BPF_FUNC_map_lookup_elem),
7188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7189 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7190 BPF_MOV64_IMM(BPF_REG_9, 1),
7191 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7192 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7193 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7194 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7195 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7196 BPF_MOV32_IMM(BPF_REG_3, 1),
7197 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7198 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7199 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7200 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7201 BPF_MOV64_REG(BPF_REG_0, 0),
7204 .fixup_map2 = { 3 },
7205 .errstr = "R0 max value is outside of the array range",
7207 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7210 "map in map access",
7212 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7215 BPF_LD_MAP_FD(BPF_REG_1, 0),
7216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem),
7218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7219 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7222 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7224 BPF_FUNC_map_lookup_elem),
7225 BPF_MOV64_IMM(BPF_REG_0, 0),
7228 .fixup_map_in_map = { 3 },
7232 "invalid inner map pointer",
7234 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7237 BPF_LD_MAP_FD(BPF_REG_1, 0),
7238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7239 BPF_FUNC_map_lookup_elem),
7240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7241 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7242 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7244 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7246 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7247 BPF_FUNC_map_lookup_elem),
7248 BPF_MOV64_IMM(BPF_REG_0, 0),
7251 .fixup_map_in_map = { 3 },
7252 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
7256 "forgot null checking on the inner map pointer",
7258 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7261 BPF_LD_MAP_FD(BPF_REG_1, 0),
7262 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7263 BPF_FUNC_map_lookup_elem),
7264 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7269 BPF_FUNC_map_lookup_elem),
7270 BPF_MOV64_IMM(BPF_REG_0, 0),
7273 .fixup_map_in_map = { 3 },
7274 .errstr = "R1 type=map_value_or_null expected=map_ptr",
7278 "ld_abs: check calling conv, r1",
7280 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7281 BPF_MOV64_IMM(BPF_REG_1, 0),
7282 BPF_LD_ABS(BPF_W, -0x200000),
7283 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7286 .errstr = "R1 !read_ok",
7290 "ld_abs: check calling conv, r2",
7292 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7293 BPF_MOV64_IMM(BPF_REG_2, 0),
7294 BPF_LD_ABS(BPF_W, -0x200000),
7295 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7298 .errstr = "R2 !read_ok",
7302 "ld_abs: check calling conv, r3",
7304 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7305 BPF_MOV64_IMM(BPF_REG_3, 0),
7306 BPF_LD_ABS(BPF_W, -0x200000),
7307 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7310 .errstr = "R3 !read_ok",
7314 "ld_abs: check calling conv, r4",
7316 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7317 BPF_MOV64_IMM(BPF_REG_4, 0),
7318 BPF_LD_ABS(BPF_W, -0x200000),
7319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7322 .errstr = "R4 !read_ok",
7326 "ld_abs: check calling conv, r5",
7328 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7329 BPF_MOV64_IMM(BPF_REG_5, 0),
7330 BPF_LD_ABS(BPF_W, -0x200000),
7331 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7334 .errstr = "R5 !read_ok",
7338 "ld_abs: check calling conv, r7",
7340 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7341 BPF_MOV64_IMM(BPF_REG_7, 0),
7342 BPF_LD_ABS(BPF_W, -0x200000),
7343 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7349 "ld_abs: tests on r6 and skb data reload helper",
7351 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7352 BPF_LD_ABS(BPF_B, 0),
7353 BPF_LD_ABS(BPF_H, 0),
7354 BPF_LD_ABS(BPF_W, 0),
7355 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7356 BPF_MOV64_IMM(BPF_REG_6, 0),
7357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7358 BPF_MOV64_IMM(BPF_REG_2, 1),
7359 BPF_MOV64_IMM(BPF_REG_3, 2),
7360 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7361 BPF_FUNC_skb_vlan_push),
7362 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7363 BPF_LD_ABS(BPF_B, 0),
7364 BPF_LD_ABS(BPF_H, 0),
7365 BPF_LD_ABS(BPF_W, 0),
7366 BPF_MOV64_IMM(BPF_REG_0, 42),
7369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7371 .retval = 42 /* ultimate return value */,
7374 "ld_ind: check calling conv, r1",
7376 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7377 BPF_MOV64_IMM(BPF_REG_1, 1),
7378 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7379 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7382 .errstr = "R1 !read_ok",
7386 "ld_ind: check calling conv, r2",
7388 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7389 BPF_MOV64_IMM(BPF_REG_2, 1),
7390 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7391 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7394 .errstr = "R2 !read_ok",
7398 "ld_ind: check calling conv, r3",
7400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7401 BPF_MOV64_IMM(BPF_REG_3, 1),
7402 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7403 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7406 .errstr = "R3 !read_ok",
7410 "ld_ind: check calling conv, r4",
7412 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7413 BPF_MOV64_IMM(BPF_REG_4, 1),
7414 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7418 .errstr = "R4 !read_ok",
7422 "ld_ind: check calling conv, r5",
7424 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7425 BPF_MOV64_IMM(BPF_REG_5, 1),
7426 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7427 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7430 .errstr = "R5 !read_ok",
7434 "ld_ind: check calling conv, r7",
7436 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7437 BPF_MOV64_IMM(BPF_REG_7, 1),
7438 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7446 "check bpf_perf_event_data->sample_period byte load permitted",
7448 BPF_MOV64_IMM(BPF_REG_0, 0),
7449 #if __BYTE_ORDER == __LITTLE_ENDIAN
7450 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7451 offsetof(struct bpf_perf_event_data, sample_period)),
7453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7454 offsetof(struct bpf_perf_event_data, sample_period) + 7),
7459 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7462 "check bpf_perf_event_data->sample_period half load permitted",
7464 BPF_MOV64_IMM(BPF_REG_0, 0),
7465 #if __BYTE_ORDER == __LITTLE_ENDIAN
7466 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7467 offsetof(struct bpf_perf_event_data, sample_period)),
7469 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7470 offsetof(struct bpf_perf_event_data, sample_period) + 6),
7475 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7478 "check bpf_perf_event_data->sample_period word load permitted",
7480 BPF_MOV64_IMM(BPF_REG_0, 0),
7481 #if __BYTE_ORDER == __LITTLE_ENDIAN
7482 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7483 offsetof(struct bpf_perf_event_data, sample_period)),
7485 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7486 offsetof(struct bpf_perf_event_data, sample_period) + 4),
7491 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7494 "check bpf_perf_event_data->sample_period dword load permitted",
7496 BPF_MOV64_IMM(BPF_REG_0, 0),
7497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7498 offsetof(struct bpf_perf_event_data, sample_period)),
7502 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7505 "check skb->data half load not permitted",
7507 BPF_MOV64_IMM(BPF_REG_0, 0),
7508 #if __BYTE_ORDER == __LITTLE_ENDIAN
7509 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7510 offsetof(struct __sk_buff, data)),
7512 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7513 offsetof(struct __sk_buff, data) + 2),
7518 .errstr = "invalid bpf_context access",
7521 "check skb->tc_classid half load not permitted for lwt prog",
7523 BPF_MOV64_IMM(BPF_REG_0, 0),
7524 #if __BYTE_ORDER == __LITTLE_ENDIAN
7525 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7526 offsetof(struct __sk_buff, tc_classid)),
7528 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7529 offsetof(struct __sk_buff, tc_classid) + 2),
7534 .errstr = "invalid bpf_context access",
7535 .prog_type = BPF_PROG_TYPE_LWT_IN,
7538 "bounds checks mixing signed and unsigned, positive bounds",
7540 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7541 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7543 BPF_LD_MAP_FD(BPF_REG_1, 0),
7544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7545 BPF_FUNC_map_lookup_elem),
7546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7548 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7549 BPF_MOV64_IMM(BPF_REG_2, 2),
7550 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7551 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7552 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7553 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7554 BPF_MOV64_IMM(BPF_REG_0, 0),
7557 .fixup_map1 = { 3 },
7558 .errstr = "unbounded min value",
7562 "bounds checks mixing signed and unsigned",
7564 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7565 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7567 BPF_LD_MAP_FD(BPF_REG_1, 0),
7568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7569 BPF_FUNC_map_lookup_elem),
7570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7571 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7572 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7573 BPF_MOV64_IMM(BPF_REG_2, -1),
7574 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7575 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7576 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7577 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7578 BPF_MOV64_IMM(BPF_REG_0, 0),
7581 .fixup_map1 = { 3 },
7582 .errstr = "unbounded min value",
7586 "bounds checks mixing signed and unsigned, variant 2",
7588 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7591 BPF_LD_MAP_FD(BPF_REG_1, 0),
7592 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7593 BPF_FUNC_map_lookup_elem),
7594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7595 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7596 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7597 BPF_MOV64_IMM(BPF_REG_2, -1),
7598 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7599 BPF_MOV64_IMM(BPF_REG_8, 0),
7600 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7601 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7602 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7603 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7604 BPF_MOV64_IMM(BPF_REG_0, 0),
7607 .fixup_map1 = { 3 },
7608 .errstr = "unbounded min value",
7612 "bounds checks mixing signed and unsigned, variant 3",
7614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7617 BPF_LD_MAP_FD(BPF_REG_1, 0),
7618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7619 BPF_FUNC_map_lookup_elem),
7620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7621 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7622 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7623 BPF_MOV64_IMM(BPF_REG_2, -1),
7624 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7625 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7626 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7627 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7628 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7629 BPF_MOV64_IMM(BPF_REG_0, 0),
7632 .fixup_map1 = { 3 },
7633 .errstr = "unbounded min value",
7637 "bounds checks mixing signed and unsigned, variant 4",
7639 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7640 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7642 BPF_LD_MAP_FD(BPF_REG_1, 0),
7643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7644 BPF_FUNC_map_lookup_elem),
7645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7646 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7647 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7648 BPF_MOV64_IMM(BPF_REG_2, 1),
7649 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7650 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7651 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7652 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7653 BPF_MOV64_IMM(BPF_REG_0, 0),
7656 .fixup_map1 = { 3 },
7660 "bounds checks mixing signed and unsigned, variant 5",
7662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7665 BPF_LD_MAP_FD(BPF_REG_1, 0),
7666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7667 BPF_FUNC_map_lookup_elem),
7668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7669 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7670 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7671 BPF_MOV64_IMM(BPF_REG_2, -1),
7672 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7673 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7675 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7676 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7677 BPF_MOV64_IMM(BPF_REG_0, 0),
7680 .fixup_map1 = { 3 },
7681 .errstr = "unbounded min value",
7685 "bounds checks mixing signed and unsigned, variant 6",
7687 BPF_MOV64_IMM(BPF_REG_2, 0),
7688 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7690 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7691 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7692 BPF_MOV64_IMM(BPF_REG_6, -1),
7693 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7694 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7696 BPF_MOV64_IMM(BPF_REG_5, 0),
7697 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7699 BPF_FUNC_skb_load_bytes),
7700 BPF_MOV64_IMM(BPF_REG_0, 0),
7703 .errstr = "R4 min value is negative, either use unsigned",
7707 "bounds checks mixing signed and unsigned, variant 7",
7709 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7712 BPF_LD_MAP_FD(BPF_REG_1, 0),
7713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7714 BPF_FUNC_map_lookup_elem),
7715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7716 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7717 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7718 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7719 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7720 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7721 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7722 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7723 BPF_MOV64_IMM(BPF_REG_0, 0),
7726 .fixup_map1 = { 3 },
7730 "bounds checks mixing signed and unsigned, variant 8",
7732 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7735 BPF_LD_MAP_FD(BPF_REG_1, 0),
7736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7737 BPF_FUNC_map_lookup_elem),
7738 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7740 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7741 BPF_MOV64_IMM(BPF_REG_2, -1),
7742 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7743 BPF_MOV64_IMM(BPF_REG_0, 0),
7745 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7746 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7747 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7748 BPF_MOV64_IMM(BPF_REG_0, 0),
7751 .fixup_map1 = { 3 },
7752 .errstr = "unbounded min value",
7756 "bounds checks mixing signed and unsigned, variant 9",
7758 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7761 BPF_LD_MAP_FD(BPF_REG_1, 0),
7762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7763 BPF_FUNC_map_lookup_elem),
7764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7765 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7766 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7767 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7768 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7769 BPF_MOV64_IMM(BPF_REG_0, 0),
7771 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7772 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7773 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7774 BPF_MOV64_IMM(BPF_REG_0, 0),
7777 .fixup_map1 = { 3 },
7781 "bounds checks mixing signed and unsigned, variant 10",
7783 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7784 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7786 BPF_LD_MAP_FD(BPF_REG_1, 0),
7787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7788 BPF_FUNC_map_lookup_elem),
7789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7790 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7791 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7792 BPF_MOV64_IMM(BPF_REG_2, 0),
7793 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7794 BPF_MOV64_IMM(BPF_REG_0, 0),
7796 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7797 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7798 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7799 BPF_MOV64_IMM(BPF_REG_0, 0),
7802 .fixup_map1 = { 3 },
7803 .errstr = "unbounded min value",
7807 "bounds checks mixing signed and unsigned, variant 11",
7809 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7810 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7812 BPF_LD_MAP_FD(BPF_REG_1, 0),
7813 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7814 BPF_FUNC_map_lookup_elem),
7815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7817 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7818 BPF_MOV64_IMM(BPF_REG_2, -1),
7819 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7821 BPF_MOV64_IMM(BPF_REG_0, 0),
7823 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7824 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7825 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7826 BPF_MOV64_IMM(BPF_REG_0, 0),
7829 .fixup_map1 = { 3 },
7830 .errstr = "unbounded min value",
7834 "bounds checks mixing signed and unsigned, variant 12",
7836 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7839 BPF_LD_MAP_FD(BPF_REG_1, 0),
7840 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7841 BPF_FUNC_map_lookup_elem),
7842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7843 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7844 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7845 BPF_MOV64_IMM(BPF_REG_2, -6),
7846 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7847 BPF_MOV64_IMM(BPF_REG_0, 0),
7849 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7850 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7851 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7852 BPF_MOV64_IMM(BPF_REG_0, 0),
7855 .fixup_map1 = { 3 },
7856 .errstr = "unbounded min value",
7860 "bounds checks mixing signed and unsigned, variant 13",
7862 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7863 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7865 BPF_LD_MAP_FD(BPF_REG_1, 0),
7866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7867 BPF_FUNC_map_lookup_elem),
7868 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7870 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7871 BPF_MOV64_IMM(BPF_REG_2, 2),
7872 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7873 BPF_MOV64_IMM(BPF_REG_7, 1),
7874 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7875 BPF_MOV64_IMM(BPF_REG_0, 0),
7877 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7878 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7879 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7880 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7881 BPF_MOV64_IMM(BPF_REG_0, 0),
7884 .fixup_map1 = { 3 },
7885 .errstr = "unbounded min value",
7889 "bounds checks mixing signed and unsigned, variant 14",
7891 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7892 offsetof(struct __sk_buff, mark)),
7893 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7896 BPF_LD_MAP_FD(BPF_REG_1, 0),
7897 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7898 BPF_FUNC_map_lookup_elem),
7899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7900 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7901 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7902 BPF_MOV64_IMM(BPF_REG_2, -1),
7903 BPF_MOV64_IMM(BPF_REG_8, 2),
7904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7905 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7906 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7907 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7908 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7909 BPF_MOV64_IMM(BPF_REG_0, 0),
7911 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7912 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7914 .fixup_map1 = { 4 },
7915 .errstr = "unbounded min value",
7919 "bounds checks mixing signed and unsigned, variant 15",
7921 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7922 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7924 BPF_LD_MAP_FD(BPF_REG_1, 0),
7925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7926 BPF_FUNC_map_lookup_elem),
7927 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7928 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7929 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7930 BPF_MOV64_IMM(BPF_REG_2, -6),
7931 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7932 BPF_MOV64_IMM(BPF_REG_0, 0),
7934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7935 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7936 BPF_MOV64_IMM(BPF_REG_0, 0),
7938 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7939 BPF_MOV64_IMM(BPF_REG_0, 0),
7942 .fixup_map1 = { 3 },
7943 .errstr = "unbounded min value",
7945 .result_unpriv = REJECT,
7948 "subtraction bounds (map value) variant 1",
7950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7953 BPF_LD_MAP_FD(BPF_REG_1, 0),
7954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7955 BPF_FUNC_map_lookup_elem),
7956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7957 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7958 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7959 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7960 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7961 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7962 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7963 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7964 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7966 BPF_MOV64_IMM(BPF_REG_0, 0),
7969 .fixup_map1 = { 3 },
7970 .errstr = "R0 max value is outside of the array range",
7974 "subtraction bounds (map value) variant 2",
7976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7979 BPF_LD_MAP_FD(BPF_REG_1, 0),
7980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7981 BPF_FUNC_map_lookup_elem),
7982 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7983 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7984 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7985 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7986 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7987 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7988 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7989 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7991 BPF_MOV64_IMM(BPF_REG_0, 0),
7994 .fixup_map1 = { 3 },
7995 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7996 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
8000 "bounds check based on zero-extended MOV",
8002 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8003 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8005 BPF_LD_MAP_FD(BPF_REG_1, 0),
8006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8007 BPF_FUNC_map_lookup_elem),
8008 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8009 /* r2 = 0x0000'0000'ffff'ffff */
8010 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8012 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8014 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8015 /* access at offset 0 */
8016 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8018 BPF_MOV64_IMM(BPF_REG_0, 0),
8021 .fixup_map1 = { 3 },
8025 "bounds check based on sign-extended MOV. test1",
8027 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8030 BPF_LD_MAP_FD(BPF_REG_1, 0),
8031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8032 BPF_FUNC_map_lookup_elem),
8033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8034 /* r2 = 0xffff'ffff'ffff'ffff */
8035 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8036 /* r2 = 0xffff'ffff */
8037 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8038 /* r0 = <oob pointer> */
8039 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8040 /* access to OOB pointer */
8041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8043 BPF_MOV64_IMM(BPF_REG_0, 0),
8046 .fixup_map1 = { 3 },
8047 .errstr = "map_value pointer and 4294967295",
8051 "bounds check based on sign-extended MOV. test2",
8053 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8056 BPF_LD_MAP_FD(BPF_REG_1, 0),
8057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8058 BPF_FUNC_map_lookup_elem),
8059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8060 /* r2 = 0xffff'ffff'ffff'ffff */
8061 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8062 /* r2 = 0xfff'ffff */
8063 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8064 /* r0 = <oob pointer> */
8065 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8066 /* access to OOB pointer */
8067 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8069 BPF_MOV64_IMM(BPF_REG_0, 0),
8072 .fixup_map1 = { 3 },
8073 .errstr = "R0 min value is outside of the array range",
8077 "bounds check based on reg_off + var_off + insn_off. test1",
8079 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8080 offsetof(struct __sk_buff, mark)),
8081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8084 BPF_LD_MAP_FD(BPF_REG_1, 0),
8085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8086 BPF_FUNC_map_lookup_elem),
8087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8088 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8090 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8092 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8093 BPF_MOV64_IMM(BPF_REG_0, 0),
8096 .fixup_map1 = { 4 },
8097 .errstr = "value_size=8 off=1073741825",
8099 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8102 "bounds check based on reg_off + var_off + insn_off. test2",
8104 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8105 offsetof(struct __sk_buff, mark)),
8106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8109 BPF_LD_MAP_FD(BPF_REG_1, 0),
8110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8111 BPF_FUNC_map_lookup_elem),
8112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8113 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8115 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8117 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8118 BPF_MOV64_IMM(BPF_REG_0, 0),
8121 .fixup_map1 = { 4 },
8122 .errstr = "value 1073741823",
8124 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8127 "bounds check after truncation of non-boundary-crossing range",
8129 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8130 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8132 BPF_LD_MAP_FD(BPF_REG_1, 0),
8133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8134 BPF_FUNC_map_lookup_elem),
8135 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8136 /* r1 = [0x00, 0xff] */
8137 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8138 BPF_MOV64_IMM(BPF_REG_2, 1),
8139 /* r2 = 0x10'0000'0000 */
8140 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8141 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8142 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8143 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8145 /* r1 = [0x00, 0xff] */
8146 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8148 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8150 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8151 /* access at offset 0 */
8152 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8154 BPF_MOV64_IMM(BPF_REG_0, 0),
8157 .fixup_map1 = { 3 },
8161 "bounds check after truncation of boundary-crossing range (1)",
8163 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8166 BPF_LD_MAP_FD(BPF_REG_1, 0),
8167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8168 BPF_FUNC_map_lookup_elem),
8169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8170 /* r1 = [0x00, 0xff] */
8171 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8173 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8175 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8176 * [0x0000'0000, 0x0000'007f]
8178 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8179 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8180 /* r1 = [0x00, 0xff] or
8181 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8183 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8185 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8187 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8188 /* no-op or OOB pointer computation */
8189 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8190 /* potentially OOB access */
8191 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8193 BPF_MOV64_IMM(BPF_REG_0, 0),
8196 .fixup_map1 = { 3 },
8197 /* not actually fully unbounded, but the bound is very high */
8198 .errstr = "R0 unbounded memory access",
8202 "bounds check after truncation of boundary-crossing range (2)",
8204 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8205 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8207 BPF_LD_MAP_FD(BPF_REG_1, 0),
8208 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8209 BPF_FUNC_map_lookup_elem),
8210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8211 /* r1 = [0x00, 0xff] */
8212 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8214 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8216 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8217 * [0x0000'0000, 0x0000'007f]
8218 * difference to previous test: truncation via MOV32
8221 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8222 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8223 /* r1 = [0x00, 0xff] or
8224 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8226 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8228 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8230 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8231 /* no-op or OOB pointer computation */
8232 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8233 /* potentially OOB access */
8234 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8236 BPF_MOV64_IMM(BPF_REG_0, 0),
8239 .fixup_map1 = { 3 },
8240 /* not actually fully unbounded, but the bound is very high */
8241 .errstr = "R0 unbounded memory access",
8245 "bounds check after wrapping 32-bit addition",
8247 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8250 BPF_LD_MAP_FD(BPF_REG_1, 0),
8251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8252 BPF_FUNC_map_lookup_elem),
8253 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8254 /* r1 = 0x7fff'ffff */
8255 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8256 /* r1 = 0xffff'fffe */
8257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8259 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8261 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8262 /* access at offset 0 */
8263 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8265 BPF_MOV64_IMM(BPF_REG_0, 0),
8268 .fixup_map1 = { 3 },
8272 "bounds check after shift with oversized count operand",
8274 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8275 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8277 BPF_LD_MAP_FD(BPF_REG_1, 0),
8278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8279 BPF_FUNC_map_lookup_elem),
8280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8281 BPF_MOV64_IMM(BPF_REG_2, 32),
8282 BPF_MOV64_IMM(BPF_REG_1, 1),
8283 /* r1 = (u32)1 << (u32)32 = ? */
8284 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8285 /* r1 = [0x0000, 0xffff] */
8286 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8287 /* computes unknown pointer, potentially OOB */
8288 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8289 /* potentially OOB access */
8290 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8292 BPF_MOV64_IMM(BPF_REG_0, 0),
8295 .fixup_map1 = { 3 },
8296 .errstr = "R0 max value is outside of the array range",
8300 "bounds check after right shift of maybe-negative number",
8302 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8303 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8305 BPF_LD_MAP_FD(BPF_REG_1, 0),
8306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8307 BPF_FUNC_map_lookup_elem),
8308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8309 /* r1 = [0x00, 0xff] */
8310 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8311 /* r1 = [-0x01, 0xfe] */
8312 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8313 /* r1 = 0 or 0xff'ffff'ffff'ffff */
8314 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8315 /* r1 = 0 or 0xffff'ffff'ffff */
8316 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8317 /* computes unknown pointer, potentially OOB */
8318 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8319 /* potentially OOB access */
8320 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8322 BPF_MOV64_IMM(BPF_REG_0, 0),
8325 .fixup_map1 = { 3 },
8326 .errstr = "R0 unbounded memory access",
8330 "bounds check map access with off+size signed 32bit overflow. test1",
8332 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8335 BPF_LD_MAP_FD(BPF_REG_1, 0),
8336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8337 BPF_FUNC_map_lookup_elem),
8338 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8341 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8345 .fixup_map1 = { 3 },
8346 .errstr = "map_value pointer and 2147483646",
8350 "bounds check map access with off+size signed 32bit overflow. test2",
8352 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8353 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8355 BPF_LD_MAP_FD(BPF_REG_1, 0),
8356 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8357 BPF_FUNC_map_lookup_elem),
8358 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8363 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8367 .fixup_map1 = { 3 },
8368 .errstr = "pointer offset 1073741822",
8369 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8373 "bounds check map access with off+size signed 32bit overflow. test3",
8375 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8378 BPF_LD_MAP_FD(BPF_REG_1, 0),
8379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8380 BPF_FUNC_map_lookup_elem),
8381 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8383 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8384 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8389 .fixup_map1 = { 3 },
8390 .errstr = "pointer offset -1073741822",
8391 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
8395 "bounds check map access with off+size signed 32bit overflow. test4",
8397 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8398 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8400 BPF_LD_MAP_FD(BPF_REG_1, 0),
8401 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8402 BPF_FUNC_map_lookup_elem),
8403 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8405 BPF_MOV64_IMM(BPF_REG_1, 1000000),
8406 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8407 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8408 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8412 .fixup_map1 = { 3 },
8413 .errstr = "map_value pointer and 1000000000000",
8417 "pointer/scalar confusion in state equality check (way 1)",
8419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8422 BPF_LD_MAP_FD(BPF_REG_1, 0),
8423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8424 BPF_FUNC_map_lookup_elem),
8425 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8426 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8428 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8432 .fixup_map1 = { 3 },
8434 .retval = POINTER_VALUE,
8435 .result_unpriv = REJECT,
8436 .errstr_unpriv = "R0 leaks addr as return value"
8439 "pointer/scalar confusion in state equality check (way 2)",
8441 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8442 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8444 BPF_LD_MAP_FD(BPF_REG_1, 0),
8445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8446 BPF_FUNC_map_lookup_elem),
8447 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8448 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8450 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8453 .fixup_map1 = { 3 },
8455 .retval = POINTER_VALUE,
8456 .result_unpriv = REJECT,
8457 .errstr_unpriv = "R0 leaks addr as return value"
8460 "variable-offset ctx access",
8462 /* Get an unknown value */
8463 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8464 /* Make it small and 4-byte aligned */
8465 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8466 /* add it to skb. We now have either &skb->len or
8467 * &skb->pkt_type, but we don't know which
8469 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8470 /* dereference it */
8471 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8474 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8476 .prog_type = BPF_PROG_TYPE_LWT_IN,
8479 "variable-offset stack access",
8481 /* Fill the top 8 bytes of the stack */
8482 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8483 /* Get an unknown value */
8484 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8485 /* Make it small and 4-byte aligned */
8486 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8487 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8488 /* add it to fp. We now have either fp-4 or fp-8, but
8489 * we don't know which
8491 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8492 /* dereference it */
8493 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8496 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8498 .prog_type = BPF_PROG_TYPE_LWT_IN,
8501 "indirect variable-offset stack access, out of bound",
8503 /* Fill the top 8 bytes of the stack */
8504 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8505 /* Get an unknown value */
8506 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8507 /* Make it small and 4-byte aligned */
8508 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8509 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8510 /* add it to fp. We now have either fp-4 or fp-8, but
8511 * we don't know which
8513 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8514 /* dereference it indirectly */
8515 BPF_LD_MAP_FD(BPF_REG_1, 0),
8516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8517 BPF_FUNC_map_lookup_elem),
8518 BPF_MOV64_IMM(BPF_REG_0, 0),
8521 .fixup_map1 = { 5 },
8522 .errstr = "invalid stack type R2 var_off",
8524 .prog_type = BPF_PROG_TYPE_LWT_IN,
8527 "indirect variable-offset stack access, max_off+size > max_initialized",
8529 /* Fill only the second from top 8 bytes of the stack. */
8530 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
8531 /* Get an unknown value. */
8532 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8533 /* Make it small and 4-byte aligned. */
8534 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8535 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8536 /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
8537 * which. fp-12 size 8 is partially uninitialized stack.
8539 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8540 /* Dereference it indirectly. */
8541 BPF_LD_MAP_FD(BPF_REG_1, 0),
8542 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8543 BPF_MOV64_IMM(BPF_REG_0, 0),
8546 .fixup_map1 = { 5 },
8547 .errstr = "invalid indirect read from stack var_off",
8549 .prog_type = BPF_PROG_TYPE_LWT_IN,
8552 "indirect variable-offset stack access, min_off < min_initialized",
8554 /* Fill only the top 8 bytes of the stack. */
8555 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8556 /* Get an unknown value */
8557 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8558 /* Make it small and 4-byte aligned. */
8559 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8560 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8561 /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
8562 * which. fp-16 size 8 is partially uninitialized stack.
8564 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8565 /* Dereference it indirectly. */
8566 BPF_LD_MAP_FD(BPF_REG_1, 0),
8567 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8568 BPF_MOV64_IMM(BPF_REG_0, 0),
8571 .fixup_map1 = { 5 },
8572 .errstr = "invalid indirect read from stack var_off",
8574 .prog_type = BPF_PROG_TYPE_LWT_IN,
8577 "indirect variable-offset stack access, ok",
8579 /* Fill the top 16 bytes of the stack. */
8580 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
8581 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8582 /* Get an unknown value. */
8583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8584 /* Make it small and 4-byte aligned. */
8585 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8586 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
8587 /* Add it to fp. We now have either fp-12 or fp-16, we don't know
8588 * which, but either way it points to initialized stack.
8590 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8591 /* Dereference it indirectly. */
8592 BPF_LD_MAP_FD(BPF_REG_1, 0),
8593 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8594 BPF_MOV64_IMM(BPF_REG_0, 0),
8597 .fixup_map1 = { 6 },
8599 .prog_type = BPF_PROG_TYPE_LWT_IN,
8602 "direct stack access with 32-bit wraparound. test1",
8604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8607 BPF_MOV32_IMM(BPF_REG_0, 0),
8608 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8611 .errstr = "fp pointer and 2147483647",
8615 "direct stack access with 32-bit wraparound. test2",
8617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8620 BPF_MOV32_IMM(BPF_REG_0, 0),
8621 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8624 .errstr = "fp pointer and 1073741823",
8628 "direct stack access with 32-bit wraparound. test3",
8630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8633 BPF_MOV32_IMM(BPF_REG_0, 0),
8634 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8637 .errstr = "fp pointer offset 1073741822",
8638 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
8642 "liveness pruning and write screening",
8644 /* Get an unknown value */
8645 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8646 /* branch conditions teach us nothing about R2 */
8647 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8648 BPF_MOV64_IMM(BPF_REG_0, 0),
8649 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8650 BPF_MOV64_IMM(BPF_REG_0, 0),
8653 .errstr = "R0 !read_ok",
8655 .prog_type = BPF_PROG_TYPE_LWT_IN,
8658 "varlen_map_value_access pruning",
8660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8663 BPF_LD_MAP_FD(BPF_REG_1, 0),
8664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8665 BPF_FUNC_map_lookup_elem),
8666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8667 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8668 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8669 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8670 BPF_MOV32_IMM(BPF_REG_1, 0),
8671 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8672 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8673 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8674 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8675 offsetof(struct test_val, foo)),
8678 .fixup_map2 = { 3 },
8679 .errstr_unpriv = "R0 leaks addr",
8680 .errstr = "R0 unbounded memory access",
8681 .result_unpriv = REJECT,
8683 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8686 "invalid 64-bit BPF_END",
8688 BPF_MOV32_IMM(BPF_REG_0, 0),
8690 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
8691 .dst_reg = BPF_REG_0,
8698 .errstr = "unknown opcode d7",
8702 "XDP, using ifindex from netdev",
8704 BPF_MOV64_IMM(BPF_REG_0, 0),
8705 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8706 offsetof(struct xdp_md, ingress_ifindex)),
8707 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8708 BPF_MOV64_IMM(BPF_REG_0, 1),
8712 .prog_type = BPF_PROG_TYPE_XDP,
8716 "meta access, test1",
8718 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8719 offsetof(struct xdp_md, data_meta)),
8720 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8721 offsetof(struct xdp_md, data)),
8722 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8724 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8725 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8726 BPF_MOV64_IMM(BPF_REG_0, 0),
8730 .prog_type = BPF_PROG_TYPE_XDP,
8733 "meta access, test2",
8735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8736 offsetof(struct xdp_md, data_meta)),
8737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8738 offsetof(struct xdp_md, data)),
8739 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8740 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8741 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8743 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8744 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8745 BPF_MOV64_IMM(BPF_REG_0, 0),
8749 .errstr = "invalid access to packet, off=-8",
8750 .prog_type = BPF_PROG_TYPE_XDP,
8753 "meta access, test3",
8755 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8756 offsetof(struct xdp_md, data_meta)),
8757 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8758 offsetof(struct xdp_md, data_end)),
8759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8761 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8762 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8763 BPF_MOV64_IMM(BPF_REG_0, 0),
8767 .errstr = "invalid access to packet",
8768 .prog_type = BPF_PROG_TYPE_XDP,
8771 "meta access, test4",
8773 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8774 offsetof(struct xdp_md, data_meta)),
8775 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8776 offsetof(struct xdp_md, data_end)),
8777 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8778 offsetof(struct xdp_md, data)),
8779 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8781 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8782 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8783 BPF_MOV64_IMM(BPF_REG_0, 0),
8787 .errstr = "invalid access to packet",
8788 .prog_type = BPF_PROG_TYPE_XDP,
8791 "meta access, test5",
8793 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8794 offsetof(struct xdp_md, data_meta)),
8795 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8796 offsetof(struct xdp_md, data)),
8797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8800 BPF_MOV64_IMM(BPF_REG_2, -8),
8801 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8802 BPF_FUNC_xdp_adjust_meta),
8803 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8804 BPF_MOV64_IMM(BPF_REG_0, 0),
8808 .errstr = "R3 !read_ok",
8809 .prog_type = BPF_PROG_TYPE_XDP,
8812 "meta access, test6",
8814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8815 offsetof(struct xdp_md, data_meta)),
8816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8817 offsetof(struct xdp_md, data)),
8818 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8820 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8822 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8823 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8824 BPF_MOV64_IMM(BPF_REG_0, 0),
8828 .errstr = "invalid access to packet",
8829 .prog_type = BPF_PROG_TYPE_XDP,
8832 "meta access, test7",
8834 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8835 offsetof(struct xdp_md, data_meta)),
8836 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8837 offsetof(struct xdp_md, data)),
8838 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8840 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8842 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8843 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8844 BPF_MOV64_IMM(BPF_REG_0, 0),
8848 .prog_type = BPF_PROG_TYPE_XDP,
8851 "meta access, test8",
8853 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8854 offsetof(struct xdp_md, data_meta)),
8855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8856 offsetof(struct xdp_md, data)),
8857 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8859 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8860 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8861 BPF_MOV64_IMM(BPF_REG_0, 0),
8865 .prog_type = BPF_PROG_TYPE_XDP,
8868 "meta access, test9",
8870 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8871 offsetof(struct xdp_md, data_meta)),
8872 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8873 offsetof(struct xdp_md, data)),
8874 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8877 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8878 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8879 BPF_MOV64_IMM(BPF_REG_0, 0),
8883 .errstr = "invalid access to packet",
8884 .prog_type = BPF_PROG_TYPE_XDP,
8887 "meta access, test10",
8889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8890 offsetof(struct xdp_md, data_meta)),
8891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8892 offsetof(struct xdp_md, data)),
8893 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8894 offsetof(struct xdp_md, data_end)),
8895 BPF_MOV64_IMM(BPF_REG_5, 42),
8896 BPF_MOV64_IMM(BPF_REG_6, 24),
8897 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8898 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8899 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8900 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8901 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8902 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8903 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8904 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8905 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8906 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8907 BPF_MOV64_IMM(BPF_REG_0, 0),
8911 .errstr = "invalid access to packet",
8912 .prog_type = BPF_PROG_TYPE_XDP,
8915 "meta access, test11",
8917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8918 offsetof(struct xdp_md, data_meta)),
8919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8920 offsetof(struct xdp_md, data)),
8921 BPF_MOV64_IMM(BPF_REG_5, 42),
8922 BPF_MOV64_IMM(BPF_REG_6, 24),
8923 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8924 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8925 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8926 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8927 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8928 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8929 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8931 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8932 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8933 BPF_MOV64_IMM(BPF_REG_0, 0),
8937 .prog_type = BPF_PROG_TYPE_XDP,
8940 "meta access, test12",
8942 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8943 offsetof(struct xdp_md, data_meta)),
8944 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8945 offsetof(struct xdp_md, data)),
8946 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8947 offsetof(struct xdp_md, data_end)),
8948 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8950 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8952 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8954 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8956 BPF_MOV64_IMM(BPF_REG_0, 0),
8960 .prog_type = BPF_PROG_TYPE_XDP,
8963 "arithmetic ops make PTR_TO_CTX unusable",
8965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8966 offsetof(struct __sk_buff, data) -
8967 offsetof(struct __sk_buff, mark)),
8968 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8969 offsetof(struct __sk_buff, mark)),
8972 .errstr = "dereference of modified ctx ptr",
8974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8977 "pkt_end - pkt_start is allowed",
8979 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8980 offsetof(struct __sk_buff, data_end)),
8981 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8982 offsetof(struct __sk_buff, data)),
8983 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8987 .retval = TEST_DATA_LEN,
8988 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8991 "XDP pkt read, pkt_end mangling, bad access 1",
8993 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8994 offsetof(struct xdp_md, data)),
8995 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8996 offsetof(struct xdp_md, data_end)),
8997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9000 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9001 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9002 BPF_MOV64_IMM(BPF_REG_0, 0),
9005 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
9007 .prog_type = BPF_PROG_TYPE_XDP,
9010 "XDP pkt read, pkt_end mangling, bad access 2",
9012 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9013 offsetof(struct xdp_md, data)),
9014 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9015 offsetof(struct xdp_md, data_end)),
9016 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9018 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9019 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9020 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9021 BPF_MOV64_IMM(BPF_REG_0, 0),
9024 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
9026 .prog_type = BPF_PROG_TYPE_XDP,
9029 "XDP pkt read, pkt_data' > pkt_end, good access",
9031 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9032 offsetof(struct xdp_md, data)),
9033 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9034 offsetof(struct xdp_md, data_end)),
9035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9037 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9038 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9039 BPF_MOV64_IMM(BPF_REG_0, 0),
9043 .prog_type = BPF_PROG_TYPE_XDP,
9044 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9047 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
9049 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9050 offsetof(struct xdp_md, data)),
9051 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9052 offsetof(struct xdp_md, data_end)),
9053 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9055 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9056 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9057 BPF_MOV64_IMM(BPF_REG_0, 0),
9060 .errstr = "R1 offset is outside of the packet",
9062 .prog_type = BPF_PROG_TYPE_XDP,
9063 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9066 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
9068 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9069 offsetof(struct xdp_md, data)),
9070 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9071 offsetof(struct xdp_md, data_end)),
9072 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9074 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9075 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9076 BPF_MOV64_IMM(BPF_REG_0, 0),
9079 .errstr = "R1 offset is outside of the packet",
9081 .prog_type = BPF_PROG_TYPE_XDP,
9082 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9085 "XDP pkt read, pkt_end > pkt_data', good access",
9087 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9088 offsetof(struct xdp_md, data)),
9089 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9090 offsetof(struct xdp_md, data_end)),
9091 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9093 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9094 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9095 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9096 BPF_MOV64_IMM(BPF_REG_0, 0),
9100 .prog_type = BPF_PROG_TYPE_XDP,
9101 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9104 "XDP pkt read, pkt_end > pkt_data', bad access 1",
9106 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9107 offsetof(struct xdp_md, data)),
9108 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9109 offsetof(struct xdp_md, data_end)),
9110 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9112 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9113 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9114 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9115 BPF_MOV64_IMM(BPF_REG_0, 0),
9118 .errstr = "R1 offset is outside of the packet",
9120 .prog_type = BPF_PROG_TYPE_XDP,
9121 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9124 "XDP pkt read, pkt_end > pkt_data', bad access 2",
9126 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9127 offsetof(struct xdp_md, data)),
9128 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9129 offsetof(struct xdp_md, data_end)),
9130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9132 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9133 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9134 BPF_MOV64_IMM(BPF_REG_0, 0),
9137 .errstr = "R1 offset is outside of the packet",
9139 .prog_type = BPF_PROG_TYPE_XDP,
9140 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9143 "XDP pkt read, pkt_data' < pkt_end, good access",
9145 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9146 offsetof(struct xdp_md, data)),
9147 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9148 offsetof(struct xdp_md, data_end)),
9149 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9151 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9152 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9153 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9154 BPF_MOV64_IMM(BPF_REG_0, 0),
9158 .prog_type = BPF_PROG_TYPE_XDP,
9159 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9162 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9164 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9165 offsetof(struct xdp_md, data)),
9166 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9167 offsetof(struct xdp_md, data_end)),
9168 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9170 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9171 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9172 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9173 BPF_MOV64_IMM(BPF_REG_0, 0),
9176 .errstr = "R1 offset is outside of the packet",
9178 .prog_type = BPF_PROG_TYPE_XDP,
9179 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9182 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9184 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9185 offsetof(struct xdp_md, data)),
9186 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9187 offsetof(struct xdp_md, data_end)),
9188 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9190 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9191 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9192 BPF_MOV64_IMM(BPF_REG_0, 0),
9195 .errstr = "R1 offset is outside of the packet",
9197 .prog_type = BPF_PROG_TYPE_XDP,
9198 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9201 "XDP pkt read, pkt_end < pkt_data', good access",
9203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9204 offsetof(struct xdp_md, data)),
9205 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9206 offsetof(struct xdp_md, data_end)),
9207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9209 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9210 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9211 BPF_MOV64_IMM(BPF_REG_0, 0),
9215 .prog_type = BPF_PROG_TYPE_XDP,
9216 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9219 "XDP pkt read, pkt_end < pkt_data', bad access 1",
9221 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9222 offsetof(struct xdp_md, data)),
9223 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9224 offsetof(struct xdp_md, data_end)),
9225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9227 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9228 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9229 BPF_MOV64_IMM(BPF_REG_0, 0),
9232 .errstr = "R1 offset is outside of the packet",
9234 .prog_type = BPF_PROG_TYPE_XDP,
9235 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9238 "XDP pkt read, pkt_end < pkt_data', bad access 2",
9240 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9241 offsetof(struct xdp_md, data)),
9242 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9243 offsetof(struct xdp_md, data_end)),
9244 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9246 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9247 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9248 BPF_MOV64_IMM(BPF_REG_0, 0),
9251 .errstr = "R1 offset is outside of the packet",
9253 .prog_type = BPF_PROG_TYPE_XDP,
9254 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9257 "XDP pkt read, pkt_data' >= pkt_end, good access",
9259 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9260 offsetof(struct xdp_md, data)),
9261 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9262 offsetof(struct xdp_md, data_end)),
9263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9265 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9266 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9267 BPF_MOV64_IMM(BPF_REG_0, 0),
9271 .prog_type = BPF_PROG_TYPE_XDP,
9272 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9275 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9277 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9278 offsetof(struct xdp_md, data)),
9279 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9280 offsetof(struct xdp_md, data_end)),
9281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9283 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9284 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9285 BPF_MOV64_IMM(BPF_REG_0, 0),
9288 .errstr = "R1 offset is outside of the packet",
9290 .prog_type = BPF_PROG_TYPE_XDP,
9291 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9294 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9296 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9297 offsetof(struct xdp_md, data)),
9298 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9299 offsetof(struct xdp_md, data_end)),
9300 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9302 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9303 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9304 BPF_MOV64_IMM(BPF_REG_0, 0),
9307 .errstr = "R1 offset is outside of the packet",
9309 .prog_type = BPF_PROG_TYPE_XDP,
9310 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9313 "XDP pkt read, pkt_end >= pkt_data', good access",
9315 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9316 offsetof(struct xdp_md, data)),
9317 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9318 offsetof(struct xdp_md, data_end)),
9319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9321 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9322 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9323 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9324 BPF_MOV64_IMM(BPF_REG_0, 0),
9328 .prog_type = BPF_PROG_TYPE_XDP,
9329 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9332 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
9334 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9335 offsetof(struct xdp_md, data)),
9336 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9337 offsetof(struct xdp_md, data_end)),
9338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9340 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9341 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9342 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9343 BPF_MOV64_IMM(BPF_REG_0, 0),
9346 .errstr = "R1 offset is outside of the packet",
9348 .prog_type = BPF_PROG_TYPE_XDP,
9349 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9352 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
9354 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9355 offsetof(struct xdp_md, data)),
9356 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9357 offsetof(struct xdp_md, data_end)),
9358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9360 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9362 BPF_MOV64_IMM(BPF_REG_0, 0),
9365 .errstr = "R1 offset is outside of the packet",
9367 .prog_type = BPF_PROG_TYPE_XDP,
9368 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9371 "XDP pkt read, pkt_data' <= pkt_end, good access",
9373 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9374 offsetof(struct xdp_md, data)),
9375 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9376 offsetof(struct xdp_md, data_end)),
9377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9379 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9380 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9381 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9382 BPF_MOV64_IMM(BPF_REG_0, 0),
9386 .prog_type = BPF_PROG_TYPE_XDP,
9387 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9390 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9392 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9393 offsetof(struct xdp_md, data)),
9394 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9395 offsetof(struct xdp_md, data_end)),
9396 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9398 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9399 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9400 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9401 BPF_MOV64_IMM(BPF_REG_0, 0),
9404 .errstr = "R1 offset is outside of the packet",
9406 .prog_type = BPF_PROG_TYPE_XDP,
9407 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9410 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9412 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9413 offsetof(struct xdp_md, data)),
9414 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9415 offsetof(struct xdp_md, data_end)),
9416 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9418 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9419 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9420 BPF_MOV64_IMM(BPF_REG_0, 0),
9423 .errstr = "R1 offset is outside of the packet",
9425 .prog_type = BPF_PROG_TYPE_XDP,
9426 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9429 "XDP pkt read, pkt_end <= pkt_data', good access",
9431 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9432 offsetof(struct xdp_md, data)),
9433 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9434 offsetof(struct xdp_md, data_end)),
9435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9437 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9439 BPF_MOV64_IMM(BPF_REG_0, 0),
9443 .prog_type = BPF_PROG_TYPE_XDP,
9444 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9447 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
9449 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9450 offsetof(struct xdp_md, data)),
9451 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9452 offsetof(struct xdp_md, data_end)),
9453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9455 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9456 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9457 BPF_MOV64_IMM(BPF_REG_0, 0),
9460 .errstr = "R1 offset is outside of the packet",
9462 .prog_type = BPF_PROG_TYPE_XDP,
9463 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9466 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
9468 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9469 offsetof(struct xdp_md, data)),
9470 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9471 offsetof(struct xdp_md, data_end)),
9472 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9474 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9475 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9476 BPF_MOV64_IMM(BPF_REG_0, 0),
9479 .errstr = "R1 offset is outside of the packet",
9481 .prog_type = BPF_PROG_TYPE_XDP,
9482 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9485 "XDP pkt read, pkt_meta' > pkt_data, good access",
9487 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9488 offsetof(struct xdp_md, data_meta)),
9489 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9490 offsetof(struct xdp_md, data)),
9491 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9493 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9494 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9495 BPF_MOV64_IMM(BPF_REG_0, 0),
9499 .prog_type = BPF_PROG_TYPE_XDP,
9500 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9503 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9505 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9506 offsetof(struct xdp_md, data_meta)),
9507 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9508 offsetof(struct xdp_md, data)),
9509 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9511 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9512 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9513 BPF_MOV64_IMM(BPF_REG_0, 0),
9516 .errstr = "R1 offset is outside of the packet",
9518 .prog_type = BPF_PROG_TYPE_XDP,
9519 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9522 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9524 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9525 offsetof(struct xdp_md, data_meta)),
9526 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9527 offsetof(struct xdp_md, data)),
9528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9530 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9531 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9532 BPF_MOV64_IMM(BPF_REG_0, 0),
9535 .errstr = "R1 offset is outside of the packet",
9537 .prog_type = BPF_PROG_TYPE_XDP,
9538 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9541 "XDP pkt read, pkt_data > pkt_meta', good access",
9543 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9544 offsetof(struct xdp_md, data_meta)),
9545 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9546 offsetof(struct xdp_md, data)),
9547 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9549 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9550 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9551 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9552 BPF_MOV64_IMM(BPF_REG_0, 0),
9556 .prog_type = BPF_PROG_TYPE_XDP,
9557 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9560 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9562 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9563 offsetof(struct xdp_md, data_meta)),
9564 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9565 offsetof(struct xdp_md, data)),
9566 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9568 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9569 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9570 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9571 BPF_MOV64_IMM(BPF_REG_0, 0),
9574 .errstr = "R1 offset is outside of the packet",
9576 .prog_type = BPF_PROG_TYPE_XDP,
9577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9580 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9583 offsetof(struct xdp_md, data_meta)),
9584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9585 offsetof(struct xdp_md, data)),
9586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9588 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9590 BPF_MOV64_IMM(BPF_REG_0, 0),
9593 .errstr = "R1 offset is outside of the packet",
9595 .prog_type = BPF_PROG_TYPE_XDP,
9596 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9599 "XDP pkt read, pkt_meta' < pkt_data, good access",
9601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9602 offsetof(struct xdp_md, data_meta)),
9603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9604 offsetof(struct xdp_md, data)),
9605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9607 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9608 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9609 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9610 BPF_MOV64_IMM(BPF_REG_0, 0),
9614 .prog_type = BPF_PROG_TYPE_XDP,
9615 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9618 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9621 offsetof(struct xdp_md, data_meta)),
9622 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9623 offsetof(struct xdp_md, data)),
9624 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9626 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9627 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9628 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9629 BPF_MOV64_IMM(BPF_REG_0, 0),
9632 .errstr = "R1 offset is outside of the packet",
9634 .prog_type = BPF_PROG_TYPE_XDP,
9635 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9638 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9640 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9641 offsetof(struct xdp_md, data_meta)),
9642 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9643 offsetof(struct xdp_md, data)),
9644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9646 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9647 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9648 BPF_MOV64_IMM(BPF_REG_0, 0),
9651 .errstr = "R1 offset is outside of the packet",
9653 .prog_type = BPF_PROG_TYPE_XDP,
9654 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9657 "XDP pkt read, pkt_data < pkt_meta', good access",
9659 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9660 offsetof(struct xdp_md, data_meta)),
9661 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9662 offsetof(struct xdp_md, data)),
9663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9665 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9666 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9667 BPF_MOV64_IMM(BPF_REG_0, 0),
9671 .prog_type = BPF_PROG_TYPE_XDP,
9672 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9675 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9678 offsetof(struct xdp_md, data_meta)),
9679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9680 offsetof(struct xdp_md, data)),
9681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9683 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9684 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9685 BPF_MOV64_IMM(BPF_REG_0, 0),
9688 .errstr = "R1 offset is outside of the packet",
9690 .prog_type = BPF_PROG_TYPE_XDP,
9691 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9694 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
9696 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9697 offsetof(struct xdp_md, data_meta)),
9698 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9699 offsetof(struct xdp_md, data)),
9700 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9702 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9703 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9704 BPF_MOV64_IMM(BPF_REG_0, 0),
9707 .errstr = "R1 offset is outside of the packet",
9709 .prog_type = BPF_PROG_TYPE_XDP,
9710 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9713 "XDP pkt read, pkt_meta' >= pkt_data, good access",
9715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9716 offsetof(struct xdp_md, data_meta)),
9717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9718 offsetof(struct xdp_md, data)),
9719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9721 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9722 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9723 BPF_MOV64_IMM(BPF_REG_0, 0),
9727 .prog_type = BPF_PROG_TYPE_XDP,
9728 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9731 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9734 offsetof(struct xdp_md, data_meta)),
9735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9736 offsetof(struct xdp_md, data)),
9737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9739 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9740 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9741 BPF_MOV64_IMM(BPF_REG_0, 0),
9744 .errstr = "R1 offset is outside of the packet",
9746 .prog_type = BPF_PROG_TYPE_XDP,
9747 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9750 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9752 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9753 offsetof(struct xdp_md, data_meta)),
9754 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9755 offsetof(struct xdp_md, data)),
9756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9758 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9759 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9760 BPF_MOV64_IMM(BPF_REG_0, 0),
9763 .errstr = "R1 offset is outside of the packet",
9765 .prog_type = BPF_PROG_TYPE_XDP,
9766 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9769 "XDP pkt read, pkt_data >= pkt_meta', good access",
9771 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9772 offsetof(struct xdp_md, data_meta)),
9773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9774 offsetof(struct xdp_md, data)),
9775 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9777 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9778 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9779 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9780 BPF_MOV64_IMM(BPF_REG_0, 0),
9784 .prog_type = BPF_PROG_TYPE_XDP,
9785 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9788 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9791 offsetof(struct xdp_md, data_meta)),
9792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9793 offsetof(struct xdp_md, data)),
9794 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9796 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9797 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9798 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9799 BPF_MOV64_IMM(BPF_REG_0, 0),
9802 .errstr = "R1 offset is outside of the packet",
9804 .prog_type = BPF_PROG_TYPE_XDP,
9805 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9808 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9811 offsetof(struct xdp_md, data_meta)),
9812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9813 offsetof(struct xdp_md, data)),
9814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9816 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9817 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9818 BPF_MOV64_IMM(BPF_REG_0, 0),
9821 .errstr = "R1 offset is outside of the packet",
9823 .prog_type = BPF_PROG_TYPE_XDP,
9824 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9827 "XDP pkt read, pkt_meta' <= pkt_data, good access",
9829 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9830 offsetof(struct xdp_md, data_meta)),
9831 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9832 offsetof(struct xdp_md, data)),
9833 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9835 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9836 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9837 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9838 BPF_MOV64_IMM(BPF_REG_0, 0),
9842 .prog_type = BPF_PROG_TYPE_XDP,
9843 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9846 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9848 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9849 offsetof(struct xdp_md, data_meta)),
9850 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9851 offsetof(struct xdp_md, data)),
9852 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9854 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9855 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9856 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9857 BPF_MOV64_IMM(BPF_REG_0, 0),
9860 .errstr = "R1 offset is outside of the packet",
9862 .prog_type = BPF_PROG_TYPE_XDP,
9863 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9866 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9868 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9869 offsetof(struct xdp_md, data_meta)),
9870 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9871 offsetof(struct xdp_md, data)),
9872 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9874 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9875 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9876 BPF_MOV64_IMM(BPF_REG_0, 0),
9879 .errstr = "R1 offset is outside of the packet",
9881 .prog_type = BPF_PROG_TYPE_XDP,
9882 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9885 "XDP pkt read, pkt_data <= pkt_meta', good access",
9887 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9888 offsetof(struct xdp_md, data_meta)),
9889 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9890 offsetof(struct xdp_md, data)),
9891 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9893 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9895 BPF_MOV64_IMM(BPF_REG_0, 0),
9899 .prog_type = BPF_PROG_TYPE_XDP,
9900 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9903 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9905 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9906 offsetof(struct xdp_md, data_meta)),
9907 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9908 offsetof(struct xdp_md, data)),
9909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
9911 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9912 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
9913 BPF_MOV64_IMM(BPF_REG_0, 0),
9916 .errstr = "R1 offset is outside of the packet",
9918 .prog_type = BPF_PROG_TYPE_XDP,
9919 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9922 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9924 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9925 offsetof(struct xdp_md, data_meta)),
9926 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9927 offsetof(struct xdp_md, data)),
9928 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9930 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9931 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9932 BPF_MOV64_IMM(BPF_REG_0, 0),
9935 .errstr = "R1 offset is outside of the packet",
9937 .prog_type = BPF_PROG_TYPE_XDP,
9938 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9941 "check deducing bounds from const, 1",
9943 BPF_MOV64_IMM(BPF_REG_0, 1),
9944 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9945 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9948 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9949 .errstr = "R0 tried to subtract pointer from scalar",
9953 "check deducing bounds from const, 2",
9955 BPF_MOV64_IMM(BPF_REG_0, 1),
9956 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9958 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9960 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9963 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9964 .result_unpriv = REJECT,
9969 "check deducing bounds from const, 3",
9971 BPF_MOV64_IMM(BPF_REG_0, 0),
9972 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9973 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9976 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
9977 .errstr = "R0 tried to subtract pointer from scalar",
9981 "check deducing bounds from const, 4",
9983 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9984 BPF_MOV64_IMM(BPF_REG_0, 0),
9985 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9987 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9989 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
9992 .errstr_unpriv = "R6 has pointer with unsupported alu operation",
9993 .result_unpriv = REJECT,
9997 "check deducing bounds from const, 5",
9999 BPF_MOV64_IMM(BPF_REG_0, 0),
10000 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10001 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10004 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10005 .errstr = "R0 tried to subtract pointer from scalar",
10009 "check deducing bounds from const, 6",
10011 BPF_MOV64_IMM(BPF_REG_0, 0),
10012 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10014 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10017 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10018 .errstr = "R0 tried to subtract pointer from scalar",
10022 "check deducing bounds from const, 7",
10024 BPF_MOV64_IMM(BPF_REG_0, ~0),
10025 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10026 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10027 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10028 offsetof(struct __sk_buff, mark)),
10031 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10032 .errstr = "dereference of modified ctx ptr",
10034 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10037 "check deducing bounds from const, 8",
10039 BPF_MOV64_IMM(BPF_REG_0, ~0),
10040 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10041 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10043 offsetof(struct __sk_buff, mark)),
10046 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10047 .errstr = "dereference of modified ctx ptr",
10049 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10052 "check deducing bounds from const, 9",
10054 BPF_MOV64_IMM(BPF_REG_0, 0),
10055 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10056 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10059 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
10060 .errstr = "R0 tried to subtract pointer from scalar",
10064 "check deducing bounds from const, 10",
10066 BPF_MOV64_IMM(BPF_REG_0, 0),
10067 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10068 /* Marks reg as unknown. */
10069 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10070 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10073 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10077 "bpf_exit with invalid return code. test1",
10079 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10082 .errstr = "R0 has value (0x0; 0xffffffff)",
10084 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10087 "bpf_exit with invalid return code. test2",
10089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10090 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10094 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10097 "bpf_exit with invalid return code. test3",
10099 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10100 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10103 .errstr = "R0 has value (0x0; 0x3)",
10105 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10108 "bpf_exit with invalid return code. test4",
10110 BPF_MOV64_IMM(BPF_REG_0, 1),
10114 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10117 "bpf_exit with invalid return code. test5",
10119 BPF_MOV64_IMM(BPF_REG_0, 2),
10122 .errstr = "R0 has value (0x2; 0x0)",
10124 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10127 "bpf_exit with invalid return code. test6",
10129 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10132 .errstr = "R0 is not a known value (ctx)",
10134 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10137 "bpf_exit with invalid return code. test7",
10139 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10140 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10141 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10144 .errstr = "R0 has unknown scalar value",
10146 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10149 "calls: basic sanity",
10151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10152 BPF_MOV64_IMM(BPF_REG_0, 1),
10154 BPF_MOV64_IMM(BPF_REG_0, 2),
10157 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10161 "calls: not on unpriviledged",
10163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10164 BPF_MOV64_IMM(BPF_REG_0, 1),
10166 BPF_MOV64_IMM(BPF_REG_0, 2),
10169 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10170 .result_unpriv = REJECT,
10175 "calls: div by 0 in subprog",
10177 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10180 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10181 offsetof(struct __sk_buff, data_end)),
10182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10184 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10185 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10186 BPF_MOV64_IMM(BPF_REG_0, 1),
10188 BPF_MOV32_IMM(BPF_REG_2, 0),
10189 BPF_MOV32_IMM(BPF_REG_3, 1),
10190 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10191 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10192 offsetof(struct __sk_buff, data)),
10195 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10200 "calls: multiple ret types in subprog 1",
10202 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10204 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10205 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10206 offsetof(struct __sk_buff, data_end)),
10207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10209 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10210 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10211 BPF_MOV64_IMM(BPF_REG_0, 1),
10213 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10214 offsetof(struct __sk_buff, data)),
10215 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10216 BPF_MOV32_IMM(BPF_REG_0, 42),
10219 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10221 .errstr = "R0 invalid mem access 'inv'",
10224 "calls: multiple ret types in subprog 2",
10226 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10227 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10229 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10230 offsetof(struct __sk_buff, data_end)),
10231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10233 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10234 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10235 BPF_MOV64_IMM(BPF_REG_0, 1),
10237 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10238 offsetof(struct __sk_buff, data)),
10239 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10240 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10241 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10242 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10244 BPF_LD_MAP_FD(BPF_REG_1, 0),
10245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10246 BPF_FUNC_map_lookup_elem),
10247 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10248 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10249 offsetof(struct __sk_buff, data)),
10250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10253 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10254 .fixup_map1 = { 16 },
10256 .errstr = "R0 min value is outside of the array range",
10259 "calls: overlapping caller/callee",
10261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10262 BPF_MOV64_IMM(BPF_REG_0, 1),
10265 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10266 .errstr = "last insn is not an exit or jmp",
10270 "calls: wrong recursive calls",
10272 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10273 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10277 BPF_MOV64_IMM(BPF_REG_0, 1),
10280 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10281 .errstr = "jump out of range",
10285 "calls: wrong src reg",
10287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10288 BPF_MOV64_IMM(BPF_REG_0, 1),
10291 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10292 .errstr = "BPF_CALL uses reserved fields",
10296 "calls: wrong off value",
10298 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10299 BPF_MOV64_IMM(BPF_REG_0, 1),
10301 BPF_MOV64_IMM(BPF_REG_0, 2),
10304 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10305 .errstr = "BPF_CALL uses reserved fields",
10309 "calls: jump back loop",
10311 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10312 BPF_MOV64_IMM(BPF_REG_0, 1),
10315 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10316 .errstr = "back-edge from insn 0 to 0",
10320 "calls: conditional call",
10322 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10323 offsetof(struct __sk_buff, mark)),
10324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10326 BPF_MOV64_IMM(BPF_REG_0, 1),
10328 BPF_MOV64_IMM(BPF_REG_0, 2),
10331 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10332 .errstr = "jump out of range",
10336 "calls: conditional call 2",
10338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10339 offsetof(struct __sk_buff, mark)),
10340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10342 BPF_MOV64_IMM(BPF_REG_0, 1),
10344 BPF_MOV64_IMM(BPF_REG_0, 2),
10346 BPF_MOV64_IMM(BPF_REG_0, 3),
10349 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10353 "calls: conditional call 3",
10355 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10356 offsetof(struct __sk_buff, mark)),
10357 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10358 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10359 BPF_MOV64_IMM(BPF_REG_0, 1),
10361 BPF_MOV64_IMM(BPF_REG_0, 1),
10362 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10363 BPF_MOV64_IMM(BPF_REG_0, 3),
10364 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10366 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10367 .errstr = "back-edge from insn",
10371 "calls: conditional call 4",
10373 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10374 offsetof(struct __sk_buff, mark)),
10375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10377 BPF_MOV64_IMM(BPF_REG_0, 1),
10379 BPF_MOV64_IMM(BPF_REG_0, 1),
10380 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10381 BPF_MOV64_IMM(BPF_REG_0, 3),
10384 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10388 "calls: conditional call 5",
10390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10391 offsetof(struct __sk_buff, mark)),
10392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10394 BPF_MOV64_IMM(BPF_REG_0, 1),
10396 BPF_MOV64_IMM(BPF_REG_0, 1),
10397 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10398 BPF_MOV64_IMM(BPF_REG_0, 3),
10401 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10402 .errstr = "back-edge from insn",
10406 "calls: conditional call 6",
10408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10411 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10412 offsetof(struct __sk_buff, mark)),
10415 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10416 .errstr = "back-edge from insn",
10420 "calls: using r0 returned by callee",
10422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10424 BPF_MOV64_IMM(BPF_REG_0, 2),
10427 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10431 "calls: using uninit r0 from callee",
10433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10437 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10438 .errstr = "!read_ok",
10442 "calls: callee is using r1",
10444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10446 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10447 offsetof(struct __sk_buff, len)),
10450 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
10452 .retval = TEST_DATA_LEN,
10455 "calls: callee using args1",
10457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10459 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10462 .errstr_unpriv = "allowed for root only",
10463 .result_unpriv = REJECT,
10465 .retval = POINTER_VALUE,
10468 "calls: callee using wrong args2",
10470 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10472 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10475 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10476 .errstr = "R2 !read_ok",
10480 "calls: callee using two args",
10482 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10483 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10484 offsetof(struct __sk_buff, len)),
10485 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10486 offsetof(struct __sk_buff, len)),
10487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10489 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10490 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10493 .errstr_unpriv = "allowed for root only",
10494 .result_unpriv = REJECT,
10496 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
10499 "calls: callee changing pkt pointers",
10501 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10502 offsetof(struct xdp_md, data)),
10503 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10504 offsetof(struct xdp_md, data_end)),
10505 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10507 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10509 /* clear_all_pkt_pointers() has to walk all frames
10510 * to make sure that pkt pointers in the caller
10511 * are cleared when callee is calling a helper that
10512 * adjusts packet size
10514 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10515 BPF_MOV32_IMM(BPF_REG_0, 0),
10517 BPF_MOV64_IMM(BPF_REG_2, 0),
10518 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10519 BPF_FUNC_xdp_adjust_head),
10523 .errstr = "R6 invalid mem access 'inv'",
10524 .prog_type = BPF_PROG_TYPE_XDP,
10525 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10528 "calls: two calls with args",
10530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10532 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10534 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10535 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10537 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10538 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10540 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10541 offsetof(struct __sk_buff, len)),
10544 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10546 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
10549 "calls: calls with stack arith",
10551 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10559 BPF_MOV64_IMM(BPF_REG_0, 42),
10560 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10563 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10568 "calls: calls with misaligned stack access",
10570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10575 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10578 BPF_MOV64_IMM(BPF_REG_0, 42),
10579 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10582 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10583 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10584 .errstr = "misaligned stack access",
10588 "calls: calls control flow, jump test",
10590 BPF_MOV64_IMM(BPF_REG_0, 42),
10591 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10592 BPF_MOV64_IMM(BPF_REG_0, 43),
10593 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10594 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10597 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10602 "calls: calls control flow, jump test 2",
10604 BPF_MOV64_IMM(BPF_REG_0, 42),
10605 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10606 BPF_MOV64_IMM(BPF_REG_0, 43),
10607 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10611 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10612 .errstr = "jump out of range from insn 1 to 4",
10616 "calls: two calls with bad jump",
10618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10620 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10621 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10622 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10624 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10625 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10626 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10628 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10629 offsetof(struct __sk_buff, len)),
10630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10633 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10634 .errstr = "jump out of range from insn 11 to 9",
10638 "calls: recursive call. test1",
10640 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10645 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10646 .errstr = "back-edge",
10650 "calls: recursive call. test2",
10652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10657 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10658 .errstr = "back-edge",
10662 "calls: unreachable code",
10664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10668 BPF_MOV64_IMM(BPF_REG_0, 0),
10670 BPF_MOV64_IMM(BPF_REG_0, 0),
10673 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10674 .errstr = "unreachable insn 6",
10678 "calls: invalid call",
10680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10685 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10686 .errstr = "invalid destination",
10690 "calls: invalid call 2",
10692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10694 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10697 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10698 .errstr = "invalid destination",
10702 "calls: jumping across function bodies. test1",
10704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10705 BPF_MOV64_IMM(BPF_REG_0, 0),
10707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10710 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10711 .errstr = "jump out of range",
10715 "calls: jumping across function bodies. test2",
10717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10719 BPF_MOV64_IMM(BPF_REG_0, 0),
10723 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10724 .errstr = "jump out of range",
10728 "calls: call without exit",
10730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10734 BPF_MOV64_IMM(BPF_REG_0, 0),
10735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10737 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10738 .errstr = "not an exit",
10742 "calls: call into middle of ld_imm64",
10744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10746 BPF_MOV64_IMM(BPF_REG_0, 0),
10748 BPF_LD_IMM64(BPF_REG_0, 0),
10751 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10752 .errstr = "last insn",
10756 "calls: call into middle of other call",
10758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10760 BPF_MOV64_IMM(BPF_REG_0, 0),
10762 BPF_MOV64_IMM(BPF_REG_0, 0),
10763 BPF_MOV64_IMM(BPF_REG_0, 0),
10766 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10767 .errstr = "last insn",
10771 "calls: ld_abs with changing ctx data in callee",
10773 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10774 BPF_LD_ABS(BPF_B, 0),
10775 BPF_LD_ABS(BPF_H, 0),
10776 BPF_LD_ABS(BPF_W, 0),
10777 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10779 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10780 BPF_LD_ABS(BPF_B, 0),
10781 BPF_LD_ABS(BPF_H, 0),
10782 BPF_LD_ABS(BPF_W, 0),
10784 BPF_MOV64_IMM(BPF_REG_2, 1),
10785 BPF_MOV64_IMM(BPF_REG_3, 2),
10786 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10787 BPF_FUNC_skb_vlan_push),
10790 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10791 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10795 "calls: two calls with bad fallthrough",
10797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10799 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10801 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10804 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10806 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10807 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10808 offsetof(struct __sk_buff, len)),
10811 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10812 .errstr = "not an exit",
10816 "calls: two calls with stack read",
10818 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10823 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10825 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10828 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10831 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10834 .prog_type = BPF_PROG_TYPE_XDP,
10838 "calls: two calls with stack write",
10841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10842 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10846 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10851 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10852 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10854 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10856 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10857 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10859 /* write into stack frame of main prog */
10860 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10864 /* read from stack frame of main prog */
10865 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10868 .prog_type = BPF_PROG_TYPE_XDP,
10872 "calls: stack overflow using two frames (pre-call access)",
10875 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10876 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10880 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10881 BPF_MOV64_IMM(BPF_REG_0, 0),
10884 .prog_type = BPF_PROG_TYPE_XDP,
10885 .errstr = "combined stack size",
10889 "calls: stack overflow using two frames (post-call access)",
10892 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10893 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10897 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10898 BPF_MOV64_IMM(BPF_REG_0, 0),
10901 .prog_type = BPF_PROG_TYPE_XDP,
10902 .errstr = "combined stack size",
10906 "calls: stack depth check using three frames. test1",
10909 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10910 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10911 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10912 BPF_MOV64_IMM(BPF_REG_0, 0),
10915 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10918 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10919 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10922 .prog_type = BPF_PROG_TYPE_XDP,
10923 /* stack_main=32, stack_A=256, stack_B=64
10924 * and max(main+A, main+A+B) < 512
10929 "calls: stack depth check using three frames. test2",
10932 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10933 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10934 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10935 BPF_MOV64_IMM(BPF_REG_0, 0),
10938 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10941 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10942 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10945 .prog_type = BPF_PROG_TYPE_XDP,
10946 /* stack_main=32, stack_A=64, stack_B=256
10947 * and max(main+A, main+A+B) < 512
10952 "calls: stack depth check using three frames. test3",
10955 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10956 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10958 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10959 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10960 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10961 BPF_MOV64_IMM(BPF_REG_0, 0),
10964 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10966 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10967 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10969 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10970 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10971 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10974 .prog_type = BPF_PROG_TYPE_XDP,
10975 /* stack_main=64, stack_A=224, stack_B=256
10976 * and max(main+A, main+A+B) > 512
10978 .errstr = "combined stack",
10982 "calls: stack depth check using three frames. test4",
10983 /* void main(void) {
10988 * void func1(int alloc_or_recurse) {
10989 * if (alloc_or_recurse) {
10990 * frame_pointer[-300] = 1;
10992 * func2(alloc_or_recurse);
10995 * void func2(int alloc_or_recurse) {
10996 * if (alloc_or_recurse) {
10997 * frame_pointer[-300] = 1;
11003 BPF_MOV64_IMM(BPF_REG_1, 0),
11004 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11005 BPF_MOV64_IMM(BPF_REG_1, 1),
11006 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11007 BPF_MOV64_IMM(BPF_REG_1, 1),
11008 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11009 BPF_MOV64_IMM(BPF_REG_0, 0),
11012 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11013 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11015 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11019 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11022 .prog_type = BPF_PROG_TYPE_XDP,
11024 .errstr = "combined stack",
11027 "calls: stack depth check using three frames. test5",
11030 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11033 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11036 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11039 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11042 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11045 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11048 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11051 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11054 BPF_MOV64_IMM(BPF_REG_0, 0),
11057 .prog_type = BPF_PROG_TYPE_XDP,
11058 .errstr = "call stack",
11062 "calls: spill into caller stack frame",
11064 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11067 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11069 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11070 BPF_MOV64_IMM(BPF_REG_0, 0),
11073 .prog_type = BPF_PROG_TYPE_XDP,
11074 .errstr = "cannot spill",
11078 "calls: write into caller stack frame",
11080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11082 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11084 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11086 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11087 BPF_MOV64_IMM(BPF_REG_0, 0),
11090 .prog_type = BPF_PROG_TYPE_XDP,
11095 "calls: write into callee stack frame",
11097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11098 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11100 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11104 .prog_type = BPF_PROG_TYPE_XDP,
11105 .errstr = "cannot return stack pointer",
11109 "calls: two calls with stack write and void return",
11112 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11113 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11115 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11118 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11122 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11123 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11130 /* write into stack frame of main prog */
11131 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11132 BPF_EXIT_INSN(), /* void return */
11134 .prog_type = BPF_PROG_TYPE_XDP,
11138 "calls: ambiguous return value",
11140 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11142 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11144 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11145 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11148 BPF_MOV64_IMM(BPF_REG_0, 0),
11151 .errstr_unpriv = "allowed for root only",
11152 .result_unpriv = REJECT,
11153 .errstr = "R0 !read_ok",
11157 "calls: two calls that return map_value",
11160 /* pass fp-16, fp-8 into a function */
11161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11163 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11167 /* fetch map_value_ptr from the stack of this function */
11168 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11170 /* write into map value */
11171 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11172 /* fetch secound map_value_ptr from the stack */
11173 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11175 /* write into map value */
11176 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11177 BPF_MOV64_IMM(BPF_REG_0, 0),
11181 /* call 3rd function twice */
11182 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11183 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11184 /* first time with fp-8 */
11185 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11187 /* second time with fp-16 */
11188 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11192 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11193 /* lookup from map */
11194 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11195 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11197 BPF_LD_MAP_FD(BPF_REG_1, 0),
11198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11199 BPF_FUNC_map_lookup_elem),
11200 /* write map_value_ptr into stack frame of main prog */
11201 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11202 BPF_MOV64_IMM(BPF_REG_0, 0),
11203 BPF_EXIT_INSN(), /* return 0 */
11205 .prog_type = BPF_PROG_TYPE_XDP,
11206 .fixup_map1 = { 23 },
11210 "calls: two calls that return map_value with bool condition",
11213 /* pass fp-16, fp-8 into a function */
11214 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11219 BPF_MOV64_IMM(BPF_REG_0, 0),
11223 /* call 3rd function twice */
11224 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11225 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11226 /* first time with fp-8 */
11227 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11228 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11229 /* fetch map_value_ptr from the stack of this function */
11230 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11231 /* write into map value */
11232 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11234 /* second time with fp-16 */
11235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11236 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11237 /* fetch secound map_value_ptr from the stack */
11238 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11239 /* write into map value */
11240 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11244 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11245 /* lookup from map */
11246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11249 BPF_LD_MAP_FD(BPF_REG_1, 0),
11250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11251 BPF_FUNC_map_lookup_elem),
11252 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11253 BPF_MOV64_IMM(BPF_REG_0, 0),
11254 BPF_EXIT_INSN(), /* return 0 */
11255 /* write map_value_ptr into stack frame of main prog */
11256 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11257 BPF_MOV64_IMM(BPF_REG_0, 1),
11258 BPF_EXIT_INSN(), /* return 1 */
11260 .prog_type = BPF_PROG_TYPE_XDP,
11261 .fixup_map1 = { 23 },
11265 "calls: two calls that return map_value with incorrect bool check",
11268 /* pass fp-16, fp-8 into a function */
11269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11274 BPF_MOV64_IMM(BPF_REG_0, 0),
11278 /* call 3rd function twice */
11279 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11280 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11281 /* first time with fp-8 */
11282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11283 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11284 /* fetch map_value_ptr from the stack of this function */
11285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11286 /* write into map value */
11287 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11288 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11289 /* second time with fp-16 */
11290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11291 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11292 /* fetch secound map_value_ptr from the stack */
11293 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11294 /* write into map value */
11295 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11299 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11300 /* lookup from map */
11301 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11302 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11304 BPF_LD_MAP_FD(BPF_REG_1, 0),
11305 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11306 BPF_FUNC_map_lookup_elem),
11307 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11308 BPF_MOV64_IMM(BPF_REG_0, 0),
11309 BPF_EXIT_INSN(), /* return 0 */
11310 /* write map_value_ptr into stack frame of main prog */
11311 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11312 BPF_MOV64_IMM(BPF_REG_0, 1),
11313 BPF_EXIT_INSN(), /* return 1 */
11315 .prog_type = BPF_PROG_TYPE_XDP,
11316 .fixup_map1 = { 23 },
11318 .errstr = "invalid read from stack off -16+0 size 8",
11321 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11324 /* pass fp-16, fp-8 into a function */
11325 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11327 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11330 BPF_MOV64_IMM(BPF_REG_0, 0),
11334 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11335 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11336 /* 1st lookup from map */
11337 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11338 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11340 BPF_LD_MAP_FD(BPF_REG_1, 0),
11341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11342 BPF_FUNC_map_lookup_elem),
11343 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11344 BPF_MOV64_IMM(BPF_REG_8, 0),
11345 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11346 /* write map_value_ptr into stack frame of main prog at fp-8 */
11347 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11348 BPF_MOV64_IMM(BPF_REG_8, 1),
11350 /* 2nd lookup from map */
11351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11353 BPF_LD_MAP_FD(BPF_REG_1, 0),
11354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11355 BPF_FUNC_map_lookup_elem),
11356 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11357 BPF_MOV64_IMM(BPF_REG_9, 0),
11358 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11359 /* write map_value_ptr into stack frame of main prog at fp-16 */
11360 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11361 BPF_MOV64_IMM(BPF_REG_9, 1),
11363 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11365 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11366 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11367 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11368 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11372 /* if arg2 == 1 do *arg1 = 0 */
11373 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11374 /* fetch map_value_ptr from the stack of this function */
11375 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11376 /* write into map value */
11377 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11379 /* if arg4 == 1 do *arg3 = 0 */
11380 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11381 /* fetch map_value_ptr from the stack of this function */
11382 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11383 /* write into map value */
11384 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11387 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11388 .fixup_map1 = { 12, 22 },
11390 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11391 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11394 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11397 /* pass fp-16, fp-8 into a function */
11398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11400 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11402 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11403 BPF_MOV64_IMM(BPF_REG_0, 0),
11407 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11408 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11409 /* 1st lookup from map */
11410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11413 BPF_LD_MAP_FD(BPF_REG_1, 0),
11414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11415 BPF_FUNC_map_lookup_elem),
11416 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11417 BPF_MOV64_IMM(BPF_REG_8, 0),
11418 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11419 /* write map_value_ptr into stack frame of main prog at fp-8 */
11420 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11421 BPF_MOV64_IMM(BPF_REG_8, 1),
11423 /* 2nd lookup from map */
11424 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11426 BPF_LD_MAP_FD(BPF_REG_1, 0),
11427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11428 BPF_FUNC_map_lookup_elem),
11429 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11430 BPF_MOV64_IMM(BPF_REG_9, 0),
11431 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11432 /* write map_value_ptr into stack frame of main prog at fp-16 */
11433 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11434 BPF_MOV64_IMM(BPF_REG_9, 1),
11436 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11439 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11440 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11445 /* if arg2 == 1 do *arg1 = 0 */
11446 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11447 /* fetch map_value_ptr from the stack of this function */
11448 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11449 /* write into map value */
11450 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11452 /* if arg4 == 1 do *arg3 = 0 */
11453 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11454 /* fetch map_value_ptr from the stack of this function */
11455 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11456 /* write into map value */
11457 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11461 .fixup_map1 = { 12, 22 },
11465 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11468 /* pass fp-16, fp-8 into a function */
11469 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11471 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11473 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11474 BPF_MOV64_IMM(BPF_REG_0, 0),
11478 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11479 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11480 /* 1st lookup from map */
11481 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11482 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11484 BPF_LD_MAP_FD(BPF_REG_1, 0),
11485 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11486 BPF_FUNC_map_lookup_elem),
11487 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11488 BPF_MOV64_IMM(BPF_REG_8, 0),
11489 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11490 /* write map_value_ptr into stack frame of main prog at fp-8 */
11491 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11492 BPF_MOV64_IMM(BPF_REG_8, 1),
11494 /* 2nd lookup from map */
11495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11497 BPF_LD_MAP_FD(BPF_REG_1, 0),
11498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11499 BPF_FUNC_map_lookup_elem),
11500 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11501 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
11502 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11503 /* write map_value_ptr into stack frame of main prog at fp-16 */
11504 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11505 BPF_MOV64_IMM(BPF_REG_9, 1),
11507 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11508 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11509 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11510 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11511 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11512 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11513 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11516 /* if arg2 == 1 do *arg1 = 0 */
11517 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11518 /* fetch map_value_ptr from the stack of this function */
11519 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11520 /* write into map value */
11521 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11523 /* if arg4 == 1 do *arg3 = 0 */
11524 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11525 /* fetch map_value_ptr from the stack of this function */
11526 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11527 /* write into map value */
11528 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11529 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11532 .fixup_map1 = { 12, 22 },
11534 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11535 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11538 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
11541 /* pass fp-16, fp-8 into a function */
11542 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11544 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11547 BPF_MOV64_IMM(BPF_REG_0, 0),
11551 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11552 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11553 /* 1st lookup from map */
11554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11557 BPF_LD_MAP_FD(BPF_REG_1, 0),
11558 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11559 BPF_FUNC_map_lookup_elem),
11560 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11561 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11562 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11563 BPF_MOV64_IMM(BPF_REG_8, 0),
11564 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11565 BPF_MOV64_IMM(BPF_REG_8, 1),
11567 /* 2nd lookup from map */
11568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11570 BPF_LD_MAP_FD(BPF_REG_1, 0),
11571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11572 BPF_FUNC_map_lookup_elem),
11573 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11574 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11575 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11576 BPF_MOV64_IMM(BPF_REG_9, 0),
11577 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11578 BPF_MOV64_IMM(BPF_REG_9, 1),
11580 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11581 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11583 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11584 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11589 /* if arg2 == 1 do *arg1 = 0 */
11590 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11591 /* fetch map_value_ptr from the stack of this function */
11592 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11593 /* write into map value */
11594 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11596 /* if arg4 == 1 do *arg3 = 0 */
11597 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11598 /* fetch map_value_ptr from the stack of this function */
11599 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11600 /* write into map value */
11601 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11605 .fixup_map1 = { 12, 22 },
11609 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11612 /* pass fp-16, fp-8 into a function */
11613 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11618 BPF_MOV64_IMM(BPF_REG_0, 0),
11622 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11623 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11624 /* 1st lookup from map */
11625 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11628 BPF_LD_MAP_FD(BPF_REG_1, 0),
11629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11630 BPF_FUNC_map_lookup_elem),
11631 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11632 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11633 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11634 BPF_MOV64_IMM(BPF_REG_8, 0),
11635 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11636 BPF_MOV64_IMM(BPF_REG_8, 1),
11638 /* 2nd lookup from map */
11639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11641 BPF_LD_MAP_FD(BPF_REG_1, 0),
11642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11643 BPF_FUNC_map_lookup_elem),
11644 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11645 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11646 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11647 BPF_MOV64_IMM(BPF_REG_9, 0),
11648 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11649 BPF_MOV64_IMM(BPF_REG_9, 1),
11651 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11652 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11653 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11654 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11655 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11660 /* if arg2 == 1 do *arg1 = 0 */
11661 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11662 /* fetch map_value_ptr from the stack of this function */
11663 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11664 /* write into map value */
11665 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11667 /* if arg4 == 0 do *arg3 = 0 */
11668 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11669 /* fetch map_value_ptr from the stack of this function */
11670 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11671 /* write into map value */
11672 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11676 .fixup_map1 = { 12, 22 },
11678 .errstr = "R0 invalid mem access 'inv'",
11681 "calls: pkt_ptr spill into caller stack",
11683 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11689 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11690 offsetof(struct __sk_buff, data)),
11691 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11692 offsetof(struct __sk_buff, data_end)),
11693 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11695 /* spill unchecked pkt_ptr into stack of caller */
11696 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11697 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11698 /* now the pkt range is verified, read pkt_ptr from stack */
11699 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11700 /* write 4 bytes into packet */
11701 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11705 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11706 .retval = POINTER_VALUE,
11707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11710 "calls: pkt_ptr spill into caller stack 2",
11712 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11714 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11715 /* Marking is still kept, but not in all cases safe. */
11716 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11717 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11721 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11722 offsetof(struct __sk_buff, data)),
11723 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11724 offsetof(struct __sk_buff, data_end)),
11725 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11727 /* spill unchecked pkt_ptr into stack of caller */
11728 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11729 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11730 /* now the pkt range is verified, read pkt_ptr from stack */
11731 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11732 /* write 4 bytes into packet */
11733 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11736 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11737 .errstr = "invalid access to packet",
11739 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11742 "calls: pkt_ptr spill into caller stack 3",
11744 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11748 /* Marking is still kept and safe here. */
11749 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11750 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11754 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11755 offsetof(struct __sk_buff, data)),
11756 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11757 offsetof(struct __sk_buff, data_end)),
11758 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11760 /* spill unchecked pkt_ptr into stack of caller */
11761 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11762 BPF_MOV64_IMM(BPF_REG_5, 0),
11763 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11764 BPF_MOV64_IMM(BPF_REG_5, 1),
11765 /* now the pkt range is verified, read pkt_ptr from stack */
11766 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11767 /* write 4 bytes into packet */
11768 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11769 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11772 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11775 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11778 "calls: pkt_ptr spill into caller stack 4",
11780 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11782 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11784 /* Check marking propagated. */
11785 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11786 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11791 offsetof(struct __sk_buff, data)),
11792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11793 offsetof(struct __sk_buff, data_end)),
11794 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11796 /* spill unchecked pkt_ptr into stack of caller */
11797 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11798 BPF_MOV64_IMM(BPF_REG_5, 0),
11799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11800 BPF_MOV64_IMM(BPF_REG_5, 1),
11801 /* don't read back pkt_ptr from stack here */
11802 /* write 4 bytes into packet */
11803 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11804 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11810 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11813 "calls: pkt_ptr spill into caller stack 5",
11815 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11817 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11819 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11820 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11824 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11825 offsetof(struct __sk_buff, data)),
11826 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11827 offsetof(struct __sk_buff, data_end)),
11828 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11830 BPF_MOV64_IMM(BPF_REG_5, 0),
11831 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11832 /* spill checked pkt_ptr into stack of caller */
11833 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11834 BPF_MOV64_IMM(BPF_REG_5, 1),
11835 /* don't read back pkt_ptr from stack here */
11836 /* write 4 bytes into packet */
11837 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11838 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11842 .errstr = "same insn cannot be used with different",
11844 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11847 "calls: pkt_ptr spill into caller stack 6",
11849 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11850 offsetof(struct __sk_buff, data_end)),
11851 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11853 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11854 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11855 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11860 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11861 offsetof(struct __sk_buff, data)),
11862 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11863 offsetof(struct __sk_buff, data_end)),
11864 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11866 BPF_MOV64_IMM(BPF_REG_5, 0),
11867 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11868 /* spill checked pkt_ptr into stack of caller */
11869 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11870 BPF_MOV64_IMM(BPF_REG_5, 1),
11871 /* don't read back pkt_ptr from stack here */
11872 /* write 4 bytes into packet */
11873 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11874 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11877 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11878 .errstr = "R4 invalid mem access",
11880 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11883 "calls: pkt_ptr spill into caller stack 7",
11885 BPF_MOV64_IMM(BPF_REG_2, 0),
11886 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11888 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11889 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11890 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11891 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11896 offsetof(struct __sk_buff, data)),
11897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11898 offsetof(struct __sk_buff, data_end)),
11899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11901 BPF_MOV64_IMM(BPF_REG_5, 0),
11902 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11903 /* spill checked pkt_ptr into stack of caller */
11904 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11905 BPF_MOV64_IMM(BPF_REG_5, 1),
11906 /* don't read back pkt_ptr from stack here */
11907 /* write 4 bytes into packet */
11908 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11909 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11912 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11913 .errstr = "R4 invalid mem access",
11915 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11918 "calls: pkt_ptr spill into caller stack 8",
11920 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11921 offsetof(struct __sk_buff, data)),
11922 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11923 offsetof(struct __sk_buff, data_end)),
11924 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11926 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11928 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11930 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11932 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11937 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11938 offsetof(struct __sk_buff, data)),
11939 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11940 offsetof(struct __sk_buff, data_end)),
11941 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11943 BPF_MOV64_IMM(BPF_REG_5, 0),
11944 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11945 /* spill checked pkt_ptr into stack of caller */
11946 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11947 BPF_MOV64_IMM(BPF_REG_5, 1),
11948 /* don't read back pkt_ptr from stack here */
11949 /* write 4 bytes into packet */
11950 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11951 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11954 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11956 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11959 "calls: pkt_ptr spill into caller stack 9",
11961 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11962 offsetof(struct __sk_buff, data)),
11963 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11964 offsetof(struct __sk_buff, data_end)),
11965 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11966 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11967 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11969 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11971 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11973 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11974 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11978 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11979 offsetof(struct __sk_buff, data)),
11980 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11981 offsetof(struct __sk_buff, data_end)),
11982 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11984 BPF_MOV64_IMM(BPF_REG_5, 0),
11985 /* spill unchecked pkt_ptr into stack of caller */
11986 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11987 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11988 BPF_MOV64_IMM(BPF_REG_5, 1),
11989 /* don't read back pkt_ptr from stack here */
11990 /* write 4 bytes into packet */
11991 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11992 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11995 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11996 .errstr = "invalid access to packet",
11998 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12001 "calls: caller stack init to zero or map_value_or_null",
12003 BPF_MOV64_IMM(BPF_REG_0, 0),
12004 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12005 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12008 /* fetch map_value_or_null or const_zero from stack */
12009 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12010 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12011 /* store into map_value */
12012 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12016 /* if (ctx == 0) return; */
12017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12018 /* else bpf_map_lookup() and *(fp - 8) = r0 */
12019 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12020 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12022 BPF_LD_MAP_FD(BPF_REG_1, 0),
12023 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12024 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12025 BPF_FUNC_map_lookup_elem),
12026 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12027 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12030 .fixup_map1 = { 13 },
12032 .prog_type = BPF_PROG_TYPE_XDP,
12035 "calls: stack init to zero and pruning",
12037 /* first make allocated_stack 16 byte */
12038 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12039 /* now fork the execution such that the false branch
12040 * of JGT insn will be verified second and it skisp zero
12041 * init of fp-8 stack slot. If stack liveness marking
12042 * is missing live_read marks from call map_lookup
12043 * processing then pruning will incorrectly assume
12044 * that fp-8 stack slot was unused in the fall-through
12045 * branch and will accept the program incorrectly
12047 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12048 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12049 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12052 BPF_LD_MAP_FD(BPF_REG_1, 0),
12053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12054 BPF_FUNC_map_lookup_elem),
12057 .fixup_map2 = { 6 },
12058 .errstr = "invalid indirect read from stack off -8+0 size 8",
12060 .prog_type = BPF_PROG_TYPE_XDP,
12063 "calls: two calls returning different map pointers for lookup (hash, array)",
12066 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12068 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12070 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12074 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12075 BPF_FUNC_map_lookup_elem),
12076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12078 offsetof(struct test_val, foo)),
12079 BPF_MOV64_IMM(BPF_REG_0, 1),
12082 BPF_LD_MAP_FD(BPF_REG_0, 0),
12085 BPF_LD_MAP_FD(BPF_REG_0, 0),
12088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12089 .fixup_map2 = { 13 },
12090 .fixup_map4 = { 16 },
12095 "calls: two calls returning different map pointers for lookup (hash, map in map)",
12098 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12100 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12103 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12104 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12107 BPF_FUNC_map_lookup_elem),
12108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12109 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12110 offsetof(struct test_val, foo)),
12111 BPF_MOV64_IMM(BPF_REG_0, 1),
12114 BPF_LD_MAP_FD(BPF_REG_0, 0),
12117 BPF_LD_MAP_FD(BPF_REG_0, 0),
12120 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12121 .fixup_map_in_map = { 16 },
12122 .fixup_map4 = { 13 },
12124 .errstr = "R0 invalid mem access 'map_ptr'",
12127 "cond: two branches returning different map pointers for lookup (tail, tail)",
12129 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12130 offsetof(struct __sk_buff, mark)),
12131 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12132 BPF_LD_MAP_FD(BPF_REG_2, 0),
12133 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12134 BPF_LD_MAP_FD(BPF_REG_2, 0),
12135 BPF_MOV64_IMM(BPF_REG_3, 7),
12136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12137 BPF_FUNC_tail_call),
12138 BPF_MOV64_IMM(BPF_REG_0, 1),
12141 .fixup_prog1 = { 5 },
12142 .fixup_prog2 = { 2 },
12143 .result_unpriv = REJECT,
12144 .errstr_unpriv = "tail_call abusing map_ptr",
12149 "cond: two branches returning same map pointers for lookup (tail, tail)",
12151 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12152 offsetof(struct __sk_buff, mark)),
12153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12154 BPF_LD_MAP_FD(BPF_REG_2, 0),
12155 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12156 BPF_LD_MAP_FD(BPF_REG_2, 0),
12157 BPF_MOV64_IMM(BPF_REG_3, 7),
12158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12159 BPF_FUNC_tail_call),
12160 BPF_MOV64_IMM(BPF_REG_0, 1),
12163 .fixup_prog2 = { 2, 5 },
12164 .result_unpriv = ACCEPT,
12169 "search pruning: all branches should be verified (nop operation)",
12171 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12173 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12174 BPF_LD_MAP_FD(BPF_REG_1, 0),
12175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12177 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12179 BPF_MOV64_IMM(BPF_REG_4, 0),
12181 BPF_MOV64_IMM(BPF_REG_4, 1),
12182 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12183 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12184 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12185 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12186 BPF_MOV64_IMM(BPF_REG_6, 0),
12187 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12190 .fixup_map1 = { 3 },
12191 .errstr = "R6 invalid mem access 'inv'",
12193 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12196 "search pruning: all branches should be verified (invalid stack access)",
12198 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12200 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12201 BPF_LD_MAP_FD(BPF_REG_1, 0),
12202 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12203 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12204 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12205 BPF_MOV64_IMM(BPF_REG_4, 0),
12206 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12207 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12209 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12210 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12211 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12214 .fixup_map1 = { 3 },
12215 .errstr = "invalid read from stack off -16+0 size 8",
12217 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12220 "jit: lsh, rsh, arsh by 1",
12222 BPF_MOV64_IMM(BPF_REG_0, 1),
12223 BPF_MOV64_IMM(BPF_REG_1, 0xff),
12224 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12225 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12228 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12229 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12232 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12235 BPF_MOV64_IMM(BPF_REG_0, 2),
12242 "jit: mov32 for ldimm64, 1",
12244 BPF_MOV64_IMM(BPF_REG_0, 2),
12245 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12246 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12247 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12248 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12249 BPF_MOV64_IMM(BPF_REG_0, 1),
12256 "jit: mov32 for ldimm64, 2",
12258 BPF_MOV64_IMM(BPF_REG_0, 1),
12259 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12260 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12261 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12262 BPF_MOV64_IMM(BPF_REG_0, 2),
12269 "jit: various mul tests",
12271 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12272 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12273 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12274 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12275 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12276 BPF_MOV64_IMM(BPF_REG_0, 1),
12278 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12279 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12280 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12281 BPF_MOV64_IMM(BPF_REG_0, 1),
12283 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12284 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12285 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12286 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12287 BPF_MOV64_IMM(BPF_REG_0, 1),
12289 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12290 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12291 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12292 BPF_MOV64_IMM(BPF_REG_0, 1),
12294 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12295 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12296 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12297 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12298 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12299 BPF_MOV64_IMM(BPF_REG_0, 1),
12301 BPF_MOV64_IMM(BPF_REG_0, 2),
12308 "xadd/w check unaligned stack",
12310 BPF_MOV64_IMM(BPF_REG_0, 1),
12311 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12312 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12313 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12317 .errstr = "misaligned stack access off",
12318 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12321 "xadd/w check unaligned map",
12323 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12326 BPF_LD_MAP_FD(BPF_REG_1, 0),
12327 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12328 BPF_FUNC_map_lookup_elem),
12329 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12331 BPF_MOV64_IMM(BPF_REG_1, 1),
12332 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12333 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12336 .fixup_map1 = { 3 },
12338 .errstr = "misaligned value access off",
12339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12342 "xadd/w check unaligned pkt",
12344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12345 offsetof(struct xdp_md, data)),
12346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12347 offsetof(struct xdp_md, data_end)),
12348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12350 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12351 BPF_MOV64_IMM(BPF_REG_0, 99),
12352 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12353 BPF_MOV64_IMM(BPF_REG_0, 1),
12354 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12355 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12356 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12357 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12362 .errstr = "BPF_XADD stores into R2 packet",
12363 .prog_type = BPF_PROG_TYPE_XDP,
12364 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12367 "xadd/w check whether src/dst got mangled, 1",
12369 BPF_MOV64_IMM(BPF_REG_0, 1),
12370 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12371 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12372 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12373 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12374 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12375 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12376 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12379 BPF_MOV64_IMM(BPF_REG_0, 42),
12383 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12387 "xadd/w check whether src/dst got mangled, 2",
12389 BPF_MOV64_IMM(BPF_REG_0, 1),
12390 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12391 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12392 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12393 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12394 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12395 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12396 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12397 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12399 BPF_MOV64_IMM(BPF_REG_0, 42),
12403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12407 "bpf_get_stack return R0 within range",
12409 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12411 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12413 BPF_LD_MAP_FD(BPF_REG_1, 0),
12414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12415 BPF_FUNC_map_lookup_elem),
12416 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12417 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12418 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
12419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12421 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
12422 BPF_MOV64_IMM(BPF_REG_4, 256),
12423 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12424 BPF_MOV64_IMM(BPF_REG_1, 0),
12425 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12426 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12427 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12428 BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16),
12429 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12431 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12433 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12434 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12435 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12436 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12438 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
12439 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12440 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12442 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12443 BPF_MOV64_IMM(BPF_REG_4, 0),
12444 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12447 .fixup_map2 = { 4 },
12449 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12452 "ld_abs: invalid op 1",
12454 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12455 BPF_LD_ABS(BPF_DW, 0),
12458 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12460 .errstr = "unknown opcode",
12463 "ld_abs: invalid op 2",
12465 BPF_MOV32_IMM(BPF_REG_0, 256),
12466 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12467 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12472 .errstr = "unknown opcode",
12475 "ld_abs: nmap reduced",
12477 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12478 BPF_LD_ABS(BPF_H, 12),
12479 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12480 BPF_LD_ABS(BPF_H, 12),
12481 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12482 BPF_MOV32_IMM(BPF_REG_0, 18),
12483 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12484 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12485 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12486 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12487 BPF_MOV32_IMM(BPF_REG_0, 280971478),
12488 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12489 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12490 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12491 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12492 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12493 BPF_LD_ABS(BPF_H, 12),
12494 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12495 BPF_MOV32_IMM(BPF_REG_0, 22),
12496 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12497 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12498 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12499 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12500 BPF_MOV32_IMM(BPF_REG_0, 17366),
12501 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12502 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12504 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12505 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12506 BPF_MOV32_IMM(BPF_REG_0, 256),
12508 BPF_MOV32_IMM(BPF_REG_0, 0),
12512 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12513 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12514 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12516 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12521 "ld_abs: div + abs, test 1",
12523 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12524 BPF_LD_ABS(BPF_B, 3),
12525 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12526 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12527 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12528 BPF_LD_ABS(BPF_B, 4),
12529 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12530 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12534 10, 20, 30, 40, 50,
12536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12541 "ld_abs: div + abs, test 2",
12543 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12544 BPF_LD_ABS(BPF_B, 3),
12545 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12546 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12547 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12548 BPF_LD_ABS(BPF_B, 128),
12549 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12550 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12554 10, 20, 30, 40, 50,
12556 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12561 "ld_abs: div + abs, test 3",
12563 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12564 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12565 BPF_LD_ABS(BPF_B, 3),
12566 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12570 10, 20, 30, 40, 50,
12572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12577 "ld_abs: div + abs, test 4",
12579 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12580 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12581 BPF_LD_ABS(BPF_B, 256),
12582 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12586 10, 20, 30, 40, 50,
12588 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12593 "ld_abs: vlan + abs, test 1",
12598 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12599 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12604 "ld_abs: vlan + abs, test 2",
12606 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12607 BPF_LD_ABS(BPF_B, 0),
12608 BPF_LD_ABS(BPF_H, 0),
12609 BPF_LD_ABS(BPF_W, 0),
12610 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12611 BPF_MOV64_IMM(BPF_REG_6, 0),
12612 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12613 BPF_MOV64_IMM(BPF_REG_2, 1),
12614 BPF_MOV64_IMM(BPF_REG_3, 2),
12615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12616 BPF_FUNC_skb_vlan_push),
12617 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12618 BPF_LD_ABS(BPF_B, 0),
12619 BPF_LD_ABS(BPF_H, 0),
12620 BPF_LD_ABS(BPF_W, 0),
12621 BPF_MOV64_IMM(BPF_REG_0, 42),
12627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12632 "ld_abs: jump around ld_abs",
12637 .fill_helper = bpf_fill_jump_around_ld_abs,
12638 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12643 "ld_dw: xor semi-random 64 bit imms, test 1",
12646 .fill_helper = bpf_fill_rand_ld_dw,
12647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12652 "ld_dw: xor semi-random 64 bit imms, test 2",
12655 .fill_helper = bpf_fill_rand_ld_dw,
12656 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12661 "ld_dw: xor semi-random 64 bit imms, test 3",
12664 .fill_helper = bpf_fill_rand_ld_dw,
12665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12670 "ld_dw: xor semi-random 64 bit imms, test 4",
12673 .fill_helper = bpf_fill_rand_ld_dw,
12674 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12679 "pass unmodified ctx pointer to helper",
12681 BPF_MOV64_IMM(BPF_REG_2, 0),
12682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12683 BPF_FUNC_csum_update),
12684 BPF_MOV64_IMM(BPF_REG_0, 0),
12687 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12691 "pass modified ctx pointer to helper, 1",
12693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12694 BPF_MOV64_IMM(BPF_REG_2, 0),
12695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12696 BPF_FUNC_csum_update),
12697 BPF_MOV64_IMM(BPF_REG_0, 0),
12700 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12702 .errstr = "dereference of modified ctx ptr",
12705 "pass modified ctx pointer to helper, 2",
12707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12709 BPF_FUNC_get_socket_cookie),
12710 BPF_MOV64_IMM(BPF_REG_0, 0),
12713 .result_unpriv = REJECT,
12715 .errstr_unpriv = "dereference of modified ctx ptr",
12716 .errstr = "dereference of modified ctx ptr",
12719 "pass modified ctx pointer to helper, 3",
12721 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
12722 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
12723 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
12724 BPF_MOV64_IMM(BPF_REG_2, 0),
12725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12726 BPF_FUNC_csum_update),
12727 BPF_MOV64_IMM(BPF_REG_0, 0),
12730 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12732 .errstr = "variable ctx access var_off=(0x0; 0x4)",
12735 "mov64 src == dst",
12737 BPF_MOV64_IMM(BPF_REG_2, 0),
12738 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
12739 // Check bounds are OK
12740 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12741 BPF_MOV64_IMM(BPF_REG_0, 0),
12744 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12748 "mov64 src != dst",
12750 BPF_MOV64_IMM(BPF_REG_3, 0),
12751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
12752 // Check bounds are OK
12753 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12754 BPF_MOV64_IMM(BPF_REG_0, 0),
12757 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12761 "calls: ctx read at start of subprog",
12763 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12765 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
12766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12767 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12770 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
12771 BPF_MOV64_IMM(BPF_REG_0, 0),
12774 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
12775 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
12776 .result_unpriv = REJECT,
12781 static int probe_filter_length(const struct bpf_insn *fp)
12785 for (len = MAX_INSNS - 1; len > 0; --len)
12786 if (fp[len].code != 0 || fp[len].imm != 0)
12791 static int create_map(uint32_t type, uint32_t size_key,
12792 uint32_t size_value, uint32_t max_elem)
12796 fd = bpf_create_map(type, size_key, size_value, max_elem,
12797 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
12799 printf("Failed to create hash map '%s'!\n", strerror(errno));
12804 static int create_prog_dummy1(enum bpf_map_type prog_type)
12806 struct bpf_insn prog[] = {
12807 BPF_MOV64_IMM(BPF_REG_0, 42),
12811 return bpf_load_program(prog_type, prog,
12812 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12815 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
12817 struct bpf_insn prog[] = {
12818 BPF_MOV64_IMM(BPF_REG_3, idx),
12819 BPF_LD_MAP_FD(BPF_REG_2, mfd),
12820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12821 BPF_FUNC_tail_call),
12822 BPF_MOV64_IMM(BPF_REG_0, 41),
12826 return bpf_load_program(prog_type, prog,
12827 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
12830 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
12834 int mfd, p1fd, p2fd;
12836 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
12837 sizeof(int), max_elem, 0);
12839 printf("Failed to create prog array '%s'!\n", strerror(errno));
12843 p1fd = create_prog_dummy1(prog_type);
12844 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
12845 if (p1fd < 0 || p2fd < 0)
12847 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
12849 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
12862 static int create_map_in_map(void)
12864 int inner_map_fd, outer_map_fd;
12866 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12867 sizeof(int), 1, 0);
12868 if (inner_map_fd < 0) {
12869 printf("Failed to create array '%s'!\n", strerror(errno));
12870 return inner_map_fd;
12873 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
12874 sizeof(int), inner_map_fd, 1, 0);
12875 if (outer_map_fd < 0)
12876 printf("Failed to create array of maps '%s'!\n",
12879 close(inner_map_fd);
12881 return outer_map_fd;
12884 static int create_cgroup_storage(void)
12888 fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
12889 sizeof(struct bpf_cgroup_storage_key),
12890 TEST_DATA_LEN, 0, 0);
12892 printf("Failed to create array '%s'!\n", strerror(errno));
12897 static char bpf_vlog[UINT_MAX >> 8];
12899 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
12900 struct bpf_insn *prog, int *map_fds)
12902 int *fixup_map1 = test->fixup_map1;
12903 int *fixup_map2 = test->fixup_map2;
12904 int *fixup_map3 = test->fixup_map3;
12905 int *fixup_map4 = test->fixup_map4;
12906 int *fixup_prog1 = test->fixup_prog1;
12907 int *fixup_prog2 = test->fixup_prog2;
12908 int *fixup_map_in_map = test->fixup_map_in_map;
12909 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
12911 if (test->fill_helper)
12912 test->fill_helper(test);
12914 /* Allocating HTs with 1 elem is fine here, since we only test
12915 * for verifier and not do a runtime lookup, so the only thing
12916 * that really matters is value size in this case.
12919 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12920 sizeof(long long), 1);
12922 prog[*fixup_map1].imm = map_fds[0];
12924 } while (*fixup_map1);
12928 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12929 sizeof(struct test_val), 1);
12931 prog[*fixup_map2].imm = map_fds[1];
12933 } while (*fixup_map2);
12937 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
12938 sizeof(struct other_val), 1);
12940 prog[*fixup_map3].imm = map_fds[2];
12942 } while (*fixup_map3);
12946 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
12947 sizeof(struct test_val), 1);
12949 prog[*fixup_map4].imm = map_fds[3];
12951 } while (*fixup_map4);
12954 if (*fixup_prog1) {
12955 map_fds[4] = create_prog_array(prog_type, 4, 0);
12957 prog[*fixup_prog1].imm = map_fds[4];
12959 } while (*fixup_prog1);
12962 if (*fixup_prog2) {
12963 map_fds[5] = create_prog_array(prog_type, 8, 7);
12965 prog[*fixup_prog2].imm = map_fds[5];
12967 } while (*fixup_prog2);
12970 if (*fixup_map_in_map) {
12971 map_fds[6] = create_map_in_map();
12973 prog[*fixup_map_in_map].imm = map_fds[6];
12974 fixup_map_in_map++;
12975 } while (*fixup_map_in_map);
12978 if (*fixup_cgroup_storage) {
12979 map_fds[7] = create_cgroup_storage();
12981 prog[*fixup_cgroup_storage].imm = map_fds[7];
12982 fixup_cgroup_storage++;
12983 } while (*fixup_cgroup_storage);
12987 static int set_admin(bool admin)
12990 const cap_value_t cap_val = CAP_SYS_ADMIN;
12993 caps = cap_get_proc();
12995 perror("cap_get_proc");
12998 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
12999 admin ? CAP_SET : CAP_CLEAR)) {
13000 perror("cap_set_flag");
13003 if (cap_set_proc(caps)) {
13004 perror("cap_set_proc");
13009 if (cap_free(caps))
13010 perror("cap_free");
13014 static void do_test_single(struct bpf_test *test, bool unpriv,
13015 int *passes, int *errors)
13017 int fd_prog, expected_ret, alignment_prevented_execution;
13018 int prog_len, prog_type = test->prog_type;
13019 struct bpf_insn *prog = test->insns;
13020 int map_fds[MAX_NR_MAPS];
13021 const char *expected_err;
13022 uint32_t expected_val;
13027 for (i = 0; i < MAX_NR_MAPS; i++)
13031 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
13032 do_test_fixup(test, prog_type, prog, map_fds);
13033 prog_len = probe_filter_length(prog);
13036 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
13037 pflags |= BPF_F_STRICT_ALIGNMENT;
13038 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
13039 pflags |= BPF_F_ANY_ALIGNMENT;
13040 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
13041 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
13043 expected_ret = unpriv && test->result_unpriv != UNDEF ?
13044 test->result_unpriv : test->result;
13045 expected_err = unpriv && test->errstr_unpriv ?
13046 test->errstr_unpriv : test->errstr;
13047 expected_val = unpriv && test->retval_unpriv ?
13048 test->retval_unpriv : test->retval;
13050 alignment_prevented_execution = 0;
13052 if (expected_ret == ACCEPT) {
13054 printf("FAIL\nFailed to load prog '%s'!\n",
13058 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13059 if (fd_prog >= 0 &&
13060 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
13061 alignment_prevented_execution = 1;
13066 if (fd_prog >= 0) {
13067 printf("FAIL\nUnexpected success to load!\n");
13070 if (!strstr(bpf_vlog, expected_err)) {
13071 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
13072 expected_err, bpf_vlog);
13077 if (fd_prog >= 0) {
13078 __u8 tmp[TEST_DATA_LEN << 2];
13079 __u32 size_tmp = sizeof(tmp);
13083 err = bpf_prog_test_run(fd_prog, 1, test->data,
13084 sizeof(test->data), tmp, &size_tmp,
13088 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13089 printf("Unexpected bpf_prog_test_run error\n");
13092 if (!err && retval != expected_val &&
13093 expected_val != POINTER_VALUE) {
13094 printf("FAIL retval %d != %d\n", retval, expected_val);
13098 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13102 printf("OK%s\n", alignment_prevented_execution ?
13103 " (NOTE: not executed due to unknown alignment)" : "");
13106 for (i = 0; i < MAX_NR_MAPS; i++)
13112 printf("%s", bpf_vlog);
13116 static bool is_admin(void)
13119 cap_flag_value_t sysadmin = CAP_CLEAR;
13120 const cap_value_t cap_val = CAP_SYS_ADMIN;
13122 #ifdef CAP_IS_SUPPORTED
13123 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
13124 perror("cap_get_flag");
13128 caps = cap_get_proc();
13130 perror("cap_get_proc");
13133 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
13134 perror("cap_get_flag");
13135 if (cap_free(caps))
13136 perror("cap_free");
13137 return (sysadmin == CAP_SET);
13140 static void get_unpriv_disabled()
13145 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
13147 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
13148 unpriv_disabled = true;
13151 if (fgets(buf, 2, fd) == buf && atoi(buf))
13152 unpriv_disabled = true;
13156 static bool test_as_unpriv(struct bpf_test *test)
13158 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13159 /* Some architectures have strict alignment requirements. In
13160 * that case, the BPF verifier detects if a program has
13161 * unaligned accesses and rejects them. A user can pass
13162 * BPF_F_ANY_ALIGNMENT to a program to override this
13163 * check. That, however, will only work when a privileged user
13164 * loads a program. An unprivileged user loading a program
13165 * with this flag will be rejected prior entering the
13168 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
13171 return !test->prog_type ||
13172 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
13173 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
13176 static int do_test(bool unpriv, unsigned int from, unsigned int to)
13178 int i, passes = 0, errors = 0, skips = 0;
13180 for (i = from; i < to; i++) {
13181 struct bpf_test *test = &tests[i];
13183 /* Program types that are not supported by non-root we
13186 if (test_as_unpriv(test) && unpriv_disabled) {
13187 printf("#%d/u %s SKIP\n", i, test->descr);
13189 } else if (test_as_unpriv(test)) {
13192 printf("#%d/u %s ", i, test->descr);
13193 do_test_single(test, true, &passes, &errors);
13199 printf("#%d/p %s SKIP\n", i, test->descr);
13202 printf("#%d/p %s ", i, test->descr);
13203 do_test_single(test, false, &passes, &errors);
13207 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
13209 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
13212 int main(int argc, char **argv)
13214 unsigned int from = 0, to = ARRAY_SIZE(tests);
13215 bool unpriv = !is_admin();
13218 unsigned int l = atoi(argv[argc - 2]);
13219 unsigned int u = atoi(argv[argc - 1]);
13221 if (l < to && u < to) {
13225 } else if (argc == 2) {
13226 unsigned int t = atoi(argv[argc - 1]);
13234 get_unpriv_disabled();
13235 if (unpriv && unpriv_disabled) {
13236 printf("Cannot run as unprivileged user with sysctl %s.\n",
13238 return EXIT_FAILURE;
13241 bpf_semi_rand_init();
13242 return do_test(unpriv, from, to);