2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 #include <linux/perf_event.h>
35 #include <linux/nospec.h>
37 #include <asm/barrier.h>
38 #include <asm/unaligned.h>
41 #define BPF_R0 regs[BPF_REG_0]
42 #define BPF_R1 regs[BPF_REG_1]
43 #define BPF_R2 regs[BPF_REG_2]
44 #define BPF_R3 regs[BPF_REG_3]
45 #define BPF_R4 regs[BPF_REG_4]
46 #define BPF_R5 regs[BPF_REG_5]
47 #define BPF_R6 regs[BPF_REG_6]
48 #define BPF_R7 regs[BPF_REG_7]
49 #define BPF_R8 regs[BPF_REG_8]
50 #define BPF_R9 regs[BPF_REG_9]
51 #define BPF_R10 regs[BPF_REG_10]
54 #define DST regs[insn->dst_reg]
55 #define SRC regs[insn->src_reg]
56 #define FP regs[BPF_REG_FP]
57 #define AX regs[BPF_REG_AX]
58 #define ARG1 regs[BPF_REG_ARG1]
59 #define CTX regs[BPF_REG_CTX]
62 /* No hurry in this branch
64 * Exported for the bpf jit load helper.
66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
70 if (k >= SKF_NET_OFF) {
71 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
72 } else if (k >= SKF_LL_OFF) {
73 if (unlikely(!skb_mac_header_was_set(skb)))
75 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
77 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
83 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
85 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
86 struct bpf_prog_aux *aux;
89 size = round_up(size, PAGE_SIZE);
90 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
94 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
100 fp->pages = size / PAGE_SIZE;
103 fp->jit_requested = ebpf_jit_enabled();
105 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
109 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
111 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
112 gfp_t gfp_extra_flags)
114 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
119 BUG_ON(fp_old == NULL);
121 size = round_up(size, PAGE_SIZE);
122 pages = size / PAGE_SIZE;
123 if (pages <= fp_old->pages)
126 delta = pages - fp_old->pages;
127 ret = __bpf_prog_charge(fp_old->aux->user, delta);
131 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
133 __bpf_prog_uncharge(fp_old->aux->user, delta);
135 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
139 /* We keep fp->aux from fp_old around in the new
140 * reallocated structure.
143 __bpf_prog_free(fp_old);
149 void __bpf_prog_free(struct bpf_prog *fp)
155 int bpf_prog_calc_tag(struct bpf_prog *fp)
157 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
158 u32 raw_size = bpf_prog_tag_scratch_size(fp);
159 u32 digest[SHA_DIGEST_WORDS];
160 u32 ws[SHA_WORKSPACE_WORDS];
161 u32 i, bsize, psize, blocks;
162 struct bpf_insn *dst;
168 raw = vmalloc(raw_size);
173 memset(ws, 0, sizeof(ws));
175 /* We need to take out the map fd for the digest calculation
176 * since they are unstable from user space side.
179 for (i = 0, was_ld_map = false; i < fp->len; i++) {
180 dst[i] = fp->insnsi[i];
182 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
183 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
186 } else if (was_ld_map &&
188 dst[i].dst_reg == 0 &&
189 dst[i].src_reg == 0 &&
198 psize = bpf_prog_insn_size(fp);
199 memset(&raw[psize], 0, raw_size - psize);
202 bsize = round_up(psize, SHA_MESSAGE_BYTES);
203 blocks = bsize / SHA_MESSAGE_BYTES;
205 if (bsize - psize >= sizeof(__be64)) {
206 bits = (__be64 *)(todo + bsize - sizeof(__be64));
208 bits = (__be64 *)(todo + bsize + bits_offset);
211 *bits = cpu_to_be64((psize - 1) << 3);
214 sha_transform(digest, todo, ws);
215 todo += SHA_MESSAGE_BYTES;
218 result = (__force __be32 *)digest;
219 for (i = 0; i < SHA_DIGEST_WORDS; i++)
220 result[i] = cpu_to_be32(digest[i]);
221 memcpy(fp->tag, result, sizeof(fp->tag));
227 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
228 u32 curr, const bool probe_pass)
230 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
233 if (curr < pos && curr + imm + 1 > pos)
235 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
237 if (imm < imm_min || imm > imm_max)
244 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
245 u32 curr, const bool probe_pass)
247 const s32 off_min = S16_MIN, off_max = S16_MAX;
250 if (curr < pos && curr + off + 1 > pos)
252 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
254 if (off < off_min || off > off_max)
261 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
262 const bool probe_pass)
264 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
265 struct bpf_insn *insn = prog->insnsi;
268 for (i = 0; i < insn_cnt; i++, insn++) {
271 /* In the probing pass we still operate on the original,
272 * unpatched image in order to check overflows before we
273 * do any other adjustments. Therefore skip the patchlet.
275 if (probe_pass && i == pos) {
280 if (BPF_CLASS(code) != BPF_JMP ||
281 BPF_OP(code) == BPF_EXIT)
283 /* Adjust offset of jmps if we cross patch boundaries. */
284 if (BPF_OP(code) == BPF_CALL) {
285 if (insn->src_reg != BPF_PSEUDO_CALL)
287 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
290 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
300 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
301 const struct bpf_insn *patch, u32 len)
303 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
304 const u32 cnt_max = S16_MAX;
305 struct bpf_prog *prog_adj;
307 /* Since our patchlet doesn't expand the image, we're done. */
308 if (insn_delta == 0) {
309 memcpy(prog->insnsi + off, patch, sizeof(*patch));
313 insn_adj_cnt = prog->len + insn_delta;
315 /* Reject anything that would potentially let the insn->off
316 * target overflow when we have excessive program expansions.
317 * We need to probe here before we do any reallocation where
318 * we afterwards may not fail anymore.
320 if (insn_adj_cnt > cnt_max &&
321 bpf_adj_branches(prog, off, insn_delta, true))
324 /* Several new instructions need to be inserted. Make room
325 * for them. Likely, there's no need for a new allocation as
326 * last page could have large enough tailroom.
328 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
333 prog_adj->len = insn_adj_cnt;
335 /* Patching happens in 3 steps:
337 * 1) Move over tail of insnsi from next instruction onwards,
338 * so we can patch the single target insn with one or more
339 * new ones (patching is always from 1 to n insns, n > 0).
340 * 2) Inject new instructions at the target location.
341 * 3) Adjust branch offsets if necessary.
343 insn_rest = insn_adj_cnt - off - len;
345 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
346 sizeof(*patch) * insn_rest);
347 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
349 /* We are guaranteed to not fail at this point, otherwise
350 * the ship has sailed to reverse to the original state. An
351 * overflow cannot happen at this point.
353 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
358 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
362 for (i = 0; i < fp->aux->func_cnt; i++)
363 bpf_prog_kallsyms_del(fp->aux->func[i]);
366 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
368 bpf_prog_kallsyms_del_subprogs(fp);
369 bpf_prog_kallsyms_del(fp);
372 #ifdef CONFIG_BPF_JIT
373 /* All BPF JIT sysctl knobs here. */
374 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
375 int bpf_jit_harden __read_mostly;
376 int bpf_jit_kallsyms __read_mostly;
377 long bpf_jit_limit __read_mostly;
378 long bpf_jit_limit_max __read_mostly;
380 static __always_inline void
381 bpf_get_prog_addr_region(const struct bpf_prog *prog,
382 unsigned long *symbol_start,
383 unsigned long *symbol_end)
385 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
386 unsigned long addr = (unsigned long)hdr;
388 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
390 *symbol_start = addr;
391 *symbol_end = addr + hdr->pages * PAGE_SIZE;
394 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
396 const char *end = sym + KSYM_NAME_LEN;
398 BUILD_BUG_ON(sizeof("bpf_prog_") +
399 sizeof(prog->tag) * 2 +
400 /* name has been null terminated.
401 * We should need +1 for the '_' preceding
402 * the name. However, the null character
403 * is double counted between the name and the
404 * sizeof("bpf_prog_") above, so we omit
407 sizeof(prog->aux->name) > KSYM_NAME_LEN);
409 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
410 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
411 if (prog->aux->name[0])
412 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
417 static __always_inline unsigned long
418 bpf_get_prog_addr_start(struct latch_tree_node *n)
420 unsigned long symbol_start, symbol_end;
421 const struct bpf_prog_aux *aux;
423 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
424 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
429 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
430 struct latch_tree_node *b)
432 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
435 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
437 unsigned long val = (unsigned long)key;
438 unsigned long symbol_start, symbol_end;
439 const struct bpf_prog_aux *aux;
441 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
442 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
444 if (val < symbol_start)
446 if (val >= symbol_end)
452 static const struct latch_tree_ops bpf_tree_ops = {
453 .less = bpf_tree_less,
454 .comp = bpf_tree_comp,
457 static DEFINE_SPINLOCK(bpf_lock);
458 static LIST_HEAD(bpf_kallsyms);
459 static struct latch_tree_root bpf_tree __cacheline_aligned;
461 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
463 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
464 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
465 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
468 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
470 if (list_empty(&aux->ksym_lnode))
473 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
474 list_del_rcu(&aux->ksym_lnode);
477 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
479 return fp->jited && !bpf_prog_was_classic(fp);
482 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
484 return list_empty(&fp->aux->ksym_lnode) ||
485 fp->aux->ksym_lnode.prev == LIST_POISON2;
488 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
490 if (!bpf_prog_kallsyms_candidate(fp) ||
491 !capable(CAP_SYS_ADMIN))
494 spin_lock_bh(&bpf_lock);
495 bpf_prog_ksym_node_add(fp->aux);
496 spin_unlock_bh(&bpf_lock);
499 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
501 if (!bpf_prog_kallsyms_candidate(fp))
504 spin_lock_bh(&bpf_lock);
505 bpf_prog_ksym_node_del(fp->aux);
506 spin_unlock_bh(&bpf_lock);
509 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
511 struct latch_tree_node *n;
513 if (!bpf_jit_kallsyms_enabled())
516 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
518 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
522 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
523 unsigned long *off, char *sym)
525 unsigned long symbol_start, symbol_end;
526 struct bpf_prog *prog;
530 prog = bpf_prog_kallsyms_find(addr);
532 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
533 bpf_get_prog_name(prog, sym);
537 *size = symbol_end - symbol_start;
539 *off = addr - symbol_start;
546 bool is_bpf_text_address(unsigned long addr)
551 ret = bpf_prog_kallsyms_find(addr) != NULL;
557 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
560 unsigned long symbol_start, symbol_end;
561 struct bpf_prog_aux *aux;
565 if (!bpf_jit_kallsyms_enabled())
569 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
573 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
574 bpf_get_prog_name(aux->prog, sym);
576 *value = symbol_start;
577 *type = BPF_SYM_ELF_TYPE;
587 static atomic_long_t bpf_jit_current;
589 /* Can be overridden by an arch's JIT compiler if it has a custom,
590 * dedicated BPF backend memory area, or if neither of the two
593 u64 __weak bpf_jit_alloc_exec_limit(void)
595 #if defined(MODULES_VADDR)
596 return MODULES_END - MODULES_VADDR;
598 return VMALLOC_END - VMALLOC_START;
602 static int __init bpf_jit_charge_init(void)
604 /* Only used as heuristic here to derive limit. */
605 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
606 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
607 PAGE_SIZE), LONG_MAX);
610 pure_initcall(bpf_jit_charge_init);
612 static int bpf_jit_charge_modmem(u32 pages)
614 if (atomic_long_add_return(pages, &bpf_jit_current) >
615 (bpf_jit_limit >> PAGE_SHIFT)) {
616 if (!capable(CAP_SYS_ADMIN)) {
617 atomic_long_sub(pages, &bpf_jit_current);
625 static void bpf_jit_uncharge_modmem(u32 pages)
627 atomic_long_sub(pages, &bpf_jit_current);
630 struct bpf_binary_header *
631 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
632 unsigned int alignment,
633 bpf_jit_fill_hole_t bpf_fill_ill_insns)
635 struct bpf_binary_header *hdr;
636 u32 size, hole, start, pages;
638 /* Most of BPF filters are really small, but if some of them
639 * fill a page, allow at least 128 extra bytes to insert a
640 * random section of illegal instructions.
642 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
643 pages = size / PAGE_SIZE;
645 if (bpf_jit_charge_modmem(pages))
647 hdr = module_alloc(size);
649 bpf_jit_uncharge_modmem(pages);
653 /* Fill space with illegal/arch-dep instructions. */
654 bpf_fill_ill_insns(hdr, size);
657 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
658 PAGE_SIZE - sizeof(*hdr));
659 start = (get_random_int() % hole) & ~(alignment - 1);
661 /* Leave a random number of instructions before BPF code. */
662 *image_ptr = &hdr->image[start];
667 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
669 u32 pages = hdr->pages;
672 bpf_jit_uncharge_modmem(pages);
675 /* This symbol is only overridden by archs that have different
676 * requirements than the usual eBPF JITs, f.e. when they only
677 * implement cBPF JIT, do not set images read-only, etc.
679 void __weak bpf_jit_free(struct bpf_prog *fp)
682 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
684 bpf_jit_binary_unlock_ro(hdr);
685 bpf_jit_binary_free(hdr);
687 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
690 bpf_prog_unlock_free(fp);
693 static int bpf_jit_blind_insn(const struct bpf_insn *from,
694 const struct bpf_insn *aux,
695 struct bpf_insn *to_buff)
697 struct bpf_insn *to = to_buff;
698 u32 imm_rnd = get_random_int();
701 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
702 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
704 /* Constraints on AX register:
706 * AX register is inaccessible from user space. It is mapped in
707 * all JITs, and used here for constant blinding rewrites. It is
708 * typically "stateless" meaning its contents are only valid within
709 * the executed instruction, but not across several instructions.
710 * There are a few exceptions however which are further detailed
713 * Constant blinding is only used by JITs, not in the interpreter.
714 * In restricted circumstances, the verifier can also use the AX
715 * register for rewrites as long as they do not interfere with
718 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
721 if (from->imm == 0 &&
722 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
723 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
724 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
728 switch (from->code) {
729 case BPF_ALU | BPF_ADD | BPF_K:
730 case BPF_ALU | BPF_SUB | BPF_K:
731 case BPF_ALU | BPF_AND | BPF_K:
732 case BPF_ALU | BPF_OR | BPF_K:
733 case BPF_ALU | BPF_XOR | BPF_K:
734 case BPF_ALU | BPF_MUL | BPF_K:
735 case BPF_ALU | BPF_MOV | BPF_K:
736 case BPF_ALU | BPF_DIV | BPF_K:
737 case BPF_ALU | BPF_MOD | BPF_K:
738 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
739 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
740 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
743 case BPF_ALU64 | BPF_ADD | BPF_K:
744 case BPF_ALU64 | BPF_SUB | BPF_K:
745 case BPF_ALU64 | BPF_AND | BPF_K:
746 case BPF_ALU64 | BPF_OR | BPF_K:
747 case BPF_ALU64 | BPF_XOR | BPF_K:
748 case BPF_ALU64 | BPF_MUL | BPF_K:
749 case BPF_ALU64 | BPF_MOV | BPF_K:
750 case BPF_ALU64 | BPF_DIV | BPF_K:
751 case BPF_ALU64 | BPF_MOD | BPF_K:
752 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
753 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
754 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
757 case BPF_JMP | BPF_JEQ | BPF_K:
758 case BPF_JMP | BPF_JNE | BPF_K:
759 case BPF_JMP | BPF_JGT | BPF_K:
760 case BPF_JMP | BPF_JLT | BPF_K:
761 case BPF_JMP | BPF_JGE | BPF_K:
762 case BPF_JMP | BPF_JLE | BPF_K:
763 case BPF_JMP | BPF_JSGT | BPF_K:
764 case BPF_JMP | BPF_JSLT | BPF_K:
765 case BPF_JMP | BPF_JSGE | BPF_K:
766 case BPF_JMP | BPF_JSLE | BPF_K:
767 case BPF_JMP | BPF_JSET | BPF_K:
768 /* Accommodate for extra offset in case of a backjump. */
772 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
773 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
774 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
777 case BPF_LD | BPF_IMM | BPF_DW:
778 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
779 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
780 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
781 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
783 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
784 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
785 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
786 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
789 case BPF_ST | BPF_MEM | BPF_DW:
790 case BPF_ST | BPF_MEM | BPF_W:
791 case BPF_ST | BPF_MEM | BPF_H:
792 case BPF_ST | BPF_MEM | BPF_B:
793 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
794 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
795 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
802 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
803 gfp_t gfp_extra_flags)
805 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
808 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
810 /* aux->prog still points to the fp_other one, so
811 * when promoting the clone to the real program,
812 * this still needs to be adapted.
814 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
820 static void bpf_prog_clone_free(struct bpf_prog *fp)
822 /* aux was stolen by the other clone, so we cannot free
823 * it from this path! It will be freed eventually by the
824 * other program on release.
826 * At this point, we don't need a deferred release since
827 * clone is guaranteed to not be locked.
833 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
835 /* We have to repoint aux->prog to self, as we don't
836 * know whether fp here is the clone or the original.
839 bpf_prog_clone_free(fp_other);
842 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
844 struct bpf_insn insn_buff[16], aux[2];
845 struct bpf_prog *clone, *tmp;
846 int insn_delta, insn_cnt;
847 struct bpf_insn *insn;
850 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
853 clone = bpf_prog_clone_create(prog, GFP_USER);
855 return ERR_PTR(-ENOMEM);
857 insn_cnt = clone->len;
858 insn = clone->insnsi;
860 for (i = 0; i < insn_cnt; i++, insn++) {
861 /* We temporarily need to hold the original ld64 insn
862 * so that we can still access the first part in the
863 * second blinding run.
865 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
867 memcpy(aux, insn, sizeof(aux));
869 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
873 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
875 /* Patching may have repointed aux->prog during
876 * realloc from the original one, so we need to
877 * fix it up here on error.
879 bpf_jit_prog_release_other(prog, clone);
880 return ERR_PTR(-ENOMEM);
884 insn_delta = rewritten - 1;
886 /* Walk new program and skip insns we just inserted. */
887 insn = clone->insnsi + i + insn_delta;
888 insn_cnt += insn_delta;
895 #endif /* CONFIG_BPF_JIT */
897 /* Base function for offset calculation. Needs to go into .text section,
898 * therefore keeping it non-static as well; will also be used by JITs
899 * anyway later on, so do not let the compiler omit it. This also needs
900 * to go into kallsyms for correlation from e.g. bpftool, so naming
903 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
907 EXPORT_SYMBOL_GPL(__bpf_call_base);
909 /* All UAPI available opcodes. */
910 #define BPF_INSN_MAP(INSN_2, INSN_3) \
911 /* 32 bit ALU operations. */ \
912 /* Register based. */ \
913 INSN_3(ALU, ADD, X), \
914 INSN_3(ALU, SUB, X), \
915 INSN_3(ALU, AND, X), \
916 INSN_3(ALU, OR, X), \
917 INSN_3(ALU, LSH, X), \
918 INSN_3(ALU, RSH, X), \
919 INSN_3(ALU, XOR, X), \
920 INSN_3(ALU, MUL, X), \
921 INSN_3(ALU, MOV, X), \
922 INSN_3(ALU, DIV, X), \
923 INSN_3(ALU, MOD, X), \
925 INSN_3(ALU, END, TO_BE), \
926 INSN_3(ALU, END, TO_LE), \
927 /* Immediate based. */ \
928 INSN_3(ALU, ADD, K), \
929 INSN_3(ALU, SUB, K), \
930 INSN_3(ALU, AND, K), \
931 INSN_3(ALU, OR, K), \
932 INSN_3(ALU, LSH, K), \
933 INSN_3(ALU, RSH, K), \
934 INSN_3(ALU, XOR, K), \
935 INSN_3(ALU, MUL, K), \
936 INSN_3(ALU, MOV, K), \
937 INSN_3(ALU, DIV, K), \
938 INSN_3(ALU, MOD, K), \
939 /* 64 bit ALU operations. */ \
940 /* Register based. */ \
941 INSN_3(ALU64, ADD, X), \
942 INSN_3(ALU64, SUB, X), \
943 INSN_3(ALU64, AND, X), \
944 INSN_3(ALU64, OR, X), \
945 INSN_3(ALU64, LSH, X), \
946 INSN_3(ALU64, RSH, X), \
947 INSN_3(ALU64, XOR, X), \
948 INSN_3(ALU64, MUL, X), \
949 INSN_3(ALU64, MOV, X), \
950 INSN_3(ALU64, ARSH, X), \
951 INSN_3(ALU64, DIV, X), \
952 INSN_3(ALU64, MOD, X), \
953 INSN_2(ALU64, NEG), \
954 /* Immediate based. */ \
955 INSN_3(ALU64, ADD, K), \
956 INSN_3(ALU64, SUB, K), \
957 INSN_3(ALU64, AND, K), \
958 INSN_3(ALU64, OR, K), \
959 INSN_3(ALU64, LSH, K), \
960 INSN_3(ALU64, RSH, K), \
961 INSN_3(ALU64, XOR, K), \
962 INSN_3(ALU64, MUL, K), \
963 INSN_3(ALU64, MOV, K), \
964 INSN_3(ALU64, ARSH, K), \
965 INSN_3(ALU64, DIV, K), \
966 INSN_3(ALU64, MOD, K), \
967 /* Call instruction. */ \
969 /* Exit instruction. */ \
971 /* Jump instructions. */ \
972 /* Register based. */ \
973 INSN_3(JMP, JEQ, X), \
974 INSN_3(JMP, JNE, X), \
975 INSN_3(JMP, JGT, X), \
976 INSN_3(JMP, JLT, X), \
977 INSN_3(JMP, JGE, X), \
978 INSN_3(JMP, JLE, X), \
979 INSN_3(JMP, JSGT, X), \
980 INSN_3(JMP, JSLT, X), \
981 INSN_3(JMP, JSGE, X), \
982 INSN_3(JMP, JSLE, X), \
983 INSN_3(JMP, JSET, X), \
984 /* Immediate based. */ \
985 INSN_3(JMP, JEQ, K), \
986 INSN_3(JMP, JNE, K), \
987 INSN_3(JMP, JGT, K), \
988 INSN_3(JMP, JLT, K), \
989 INSN_3(JMP, JGE, K), \
990 INSN_3(JMP, JLE, K), \
991 INSN_3(JMP, JSGT, K), \
992 INSN_3(JMP, JSLT, K), \
993 INSN_3(JMP, JSGE, K), \
994 INSN_3(JMP, JSLE, K), \
995 INSN_3(JMP, JSET, K), \
997 /* Store instructions. */ \
998 /* Register based. */ \
999 INSN_3(STX, MEM, B), \
1000 INSN_3(STX, MEM, H), \
1001 INSN_3(STX, MEM, W), \
1002 INSN_3(STX, MEM, DW), \
1003 INSN_3(STX, XADD, W), \
1004 INSN_3(STX, XADD, DW), \
1005 /* Immediate based. */ \
1006 INSN_3(ST, MEM, B), \
1007 INSN_3(ST, MEM, H), \
1008 INSN_3(ST, MEM, W), \
1009 INSN_3(ST, MEM, DW), \
1010 /* Load instructions. */ \
1011 /* Register based. */ \
1012 INSN_3(LDX, MEM, B), \
1013 INSN_3(LDX, MEM, H), \
1014 INSN_3(LDX, MEM, W), \
1015 INSN_3(LDX, MEM, DW), \
1016 /* Immediate based. */ \
1019 bool bpf_opcode_in_insntable(u8 code)
1021 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1022 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1023 static const bool public_insntable[256] = {
1024 [0 ... 255] = false,
1025 /* Now overwrite non-defaults ... */
1026 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1027 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1028 [BPF_LD | BPF_ABS | BPF_B] = true,
1029 [BPF_LD | BPF_ABS | BPF_H] = true,
1030 [BPF_LD | BPF_ABS | BPF_W] = true,
1031 [BPF_LD | BPF_IND | BPF_B] = true,
1032 [BPF_LD | BPF_IND | BPF_H] = true,
1033 [BPF_LD | BPF_IND | BPF_W] = true,
1035 #undef BPF_INSN_3_TBL
1036 #undef BPF_INSN_2_TBL
1037 return public_insntable[code];
1040 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1042 * __bpf_prog_run - run eBPF program on a given context
1043 * @ctx: is the data we are operating on
1044 * @insn: is the array of eBPF instructions
1046 * Decode and execute eBPF instructions.
1048 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1050 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1051 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1052 static const void *jumptable[256] = {
1053 [0 ... 255] = &&default_label,
1054 /* Now overwrite non-defaults ... */
1055 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1056 /* Non-UAPI available opcodes. */
1057 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1058 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1059 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1061 #undef BPF_INSN_3_LBL
1062 #undef BPF_INSN_2_LBL
1063 u32 tail_call_cnt = 0;
1066 #define CONT ({ insn++; goto select_insn; })
1067 #define CONT_JMP ({ insn++; goto select_insn; })
1070 goto *jumptable[insn->code];
1073 #define ALU(OPCODE, OP) \
1074 ALU64_##OPCODE##_X: \
1078 DST = (u32) DST OP (u32) SRC; \
1080 ALU64_##OPCODE##_K: \
1084 DST = (u32) DST OP (u32) IMM; \
1115 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1119 (*(s64 *) &DST) >>= SRC;
1122 (*(s64 *) &DST) >>= IMM;
1125 div64_u64_rem(DST, SRC, &tmp);
1130 DST = do_div(tmp, (u32) SRC);
1133 div64_u64_rem(DST, IMM, &tmp);
1138 DST = do_div(tmp, (u32) IMM);
1141 DST = div64_u64(DST, SRC);
1145 do_div(tmp, (u32) SRC);
1149 DST = div64_u64(DST, IMM);
1153 do_div(tmp, (u32) IMM);
1159 DST = (__force u16) cpu_to_be16(DST);
1162 DST = (__force u32) cpu_to_be32(DST);
1165 DST = (__force u64) cpu_to_be64(DST);
1172 DST = (__force u16) cpu_to_le16(DST);
1175 DST = (__force u32) cpu_to_le32(DST);
1178 DST = (__force u64) cpu_to_le64(DST);
1185 /* Function call scratches BPF_R1-BPF_R5 registers,
1186 * preserves BPF_R6-BPF_R9, and stores return value
1189 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1194 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1197 insn + insn->off + 1);
1201 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1202 struct bpf_array *array = container_of(map, struct bpf_array, map);
1203 struct bpf_prog *prog;
1206 if (unlikely(index >= array->map.max_entries))
1208 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1213 prog = READ_ONCE(array->ptrs[index]);
1217 /* ARG1 at this point is guaranteed to point to CTX from
1218 * the verifier side due to the fact that the tail call is
1219 * handeled like a helper, that is, bpf_tail_call_proto,
1220 * where arg1_type is ARG_PTR_TO_CTX.
1222 insn = prog->insnsi;
1304 if (((s64) DST) > ((s64) SRC)) {
1310 if (((s64) DST) > ((s64) IMM)) {
1316 if (((s64) DST) < ((s64) SRC)) {
1322 if (((s64) DST) < ((s64) IMM)) {
1328 if (((s64) DST) >= ((s64) SRC)) {
1334 if (((s64) DST) >= ((s64) IMM)) {
1340 if (((s64) DST) <= ((s64) SRC)) {
1346 if (((s64) DST) <= ((s64) IMM)) {
1366 /* ST, STX and LDX*/
1368 /* Speculation barrier for mitigating Speculative Store Bypass.
1369 * In case of arm64, we rely on the firmware mitigation as
1370 * controlled via the ssbd kernel parameter. Whenever the
1371 * mitigation is enabled, it works for all of the kernel code
1372 * with no need to provide any additional instructions here.
1373 * In case of x86, we use 'lfence' insn for mitigation. We
1374 * reuse preexisting logic from Spectre v1 mitigation that
1375 * happens to produce the required code on x86 for v4 as well.
1379 #define LDST(SIZEOP, SIZE) \
1381 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1384 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1387 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1395 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1396 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1399 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1400 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1405 /* If we ever reach this, we have a bug somewhere. Die hard here
1406 * instead of just returning 0; we could be somewhere in a subprog,
1407 * so execution could continue otherwise which we do /not/ want.
1409 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1411 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1415 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1417 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1418 #define DEFINE_BPF_PROG_RUN(stack_size) \
1419 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1421 u64 stack[stack_size / sizeof(u64)]; \
1422 u64 regs[MAX_BPF_EXT_REG]; \
1424 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1425 ARG1 = (u64) (unsigned long) ctx; \
1426 return ___bpf_prog_run(regs, insn, stack); \
1429 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1430 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1431 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1432 const struct bpf_insn *insn) \
1434 u64 stack[stack_size / sizeof(u64)]; \
1435 u64 regs[MAX_BPF_EXT_REG]; \
1437 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1443 return ___bpf_prog_run(regs, insn, stack); \
1446 #define EVAL1(FN, X) FN(X)
1447 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1448 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1449 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1450 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1451 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1453 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1454 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1455 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1457 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1458 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1459 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1461 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1463 static unsigned int (*interpreters[])(const void *ctx,
1464 const struct bpf_insn *insn) = {
1465 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1466 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1467 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1469 #undef PROG_NAME_LIST
1470 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1471 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1472 const struct bpf_insn *insn) = {
1473 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1474 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1475 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1477 #undef PROG_NAME_LIST
1479 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1481 stack_depth = max_t(u32, stack_depth, 1);
1482 insn->off = (s16) insn->imm;
1483 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1484 __bpf_call_base_args;
1485 insn->code = BPF_JMP | BPF_CALL_ARGS;
1489 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1490 const struct bpf_insn *insn)
1492 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1493 * is not working properly, so warn about it!
1500 bool bpf_prog_array_compatible(struct bpf_array *array,
1501 const struct bpf_prog *fp)
1503 if (fp->kprobe_override)
1506 if (!array->owner_prog_type) {
1507 /* There's no owner yet where we could check for
1510 array->owner_prog_type = fp->type;
1511 array->owner_jited = fp->jited;
1516 return array->owner_prog_type == fp->type &&
1517 array->owner_jited == fp->jited;
1520 static int bpf_check_tail_call(const struct bpf_prog *fp)
1522 struct bpf_prog_aux *aux = fp->aux;
1525 for (i = 0; i < aux->used_map_cnt; i++) {
1526 struct bpf_map *map = aux->used_maps[i];
1527 struct bpf_array *array;
1529 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1532 array = container_of(map, struct bpf_array, map);
1533 if (!bpf_prog_array_compatible(array, fp))
1540 static void bpf_prog_select_func(struct bpf_prog *fp)
1542 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1543 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1545 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1547 fp->bpf_func = __bpf_prog_ret0_warn;
1552 * bpf_prog_select_runtime - select exec runtime for BPF program
1553 * @fp: bpf_prog populated with internal BPF program
1554 * @err: pointer to error variable
1556 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1557 * The BPF program will be executed via BPF_PROG_RUN() macro.
1559 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1561 /* In case of BPF to BPF calls, verifier did all the prep
1562 * work with regards to JITing, etc.
1567 bpf_prog_select_func(fp);
1569 /* eBPF JITs can rewrite the program in case constant
1570 * blinding is active. However, in case of error during
1571 * blinding, bpf_int_jit_compile() must always return a
1572 * valid program, which in this case would simply not
1573 * be JITed, but falls back to the interpreter.
1575 if (!bpf_prog_is_dev_bound(fp->aux)) {
1576 fp = bpf_int_jit_compile(fp);
1577 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1584 *err = bpf_prog_offload_compile(fp);
1590 bpf_prog_lock_ro(fp);
1592 /* The tail call compatibility check can only be done at
1593 * this late stage as we need to determine, if we deal
1594 * with JITed or non JITed program concatenations and not
1595 * all eBPF JITs might immediately support all features.
1597 *err = bpf_check_tail_call(fp);
1601 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1603 static unsigned int __bpf_prog_ret1(const void *ctx,
1604 const struct bpf_insn *insn)
1609 static struct bpf_prog_dummy {
1610 struct bpf_prog prog;
1611 } dummy_bpf_prog = {
1613 .bpf_func = __bpf_prog_ret1,
1617 /* to avoid allocating empty bpf_prog_array for cgroups that
1618 * don't have bpf program attached use one global 'empty_prog_array'
1619 * It will not be modified the caller of bpf_prog_array_alloc()
1620 * (since caller requested prog_cnt == 0)
1621 * that pointer should be 'freed' by bpf_prog_array_free()
1624 struct bpf_prog_array hdr;
1625 struct bpf_prog *null_prog;
1626 } empty_prog_array = {
1630 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1633 return kzalloc(sizeof(struct bpf_prog_array) +
1634 sizeof(struct bpf_prog_array_item) *
1638 return &empty_prog_array.hdr;
1641 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1644 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1646 kfree_rcu(progs, rcu);
1649 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1651 struct bpf_prog_array_item *item;
1655 item = rcu_dereference(array)->items;
1656 for (; item->prog; item++)
1657 if (item->prog != &dummy_bpf_prog.prog)
1664 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1668 struct bpf_prog_array_item *item;
1671 item = rcu_dereference_check(array, 1)->items;
1672 for (; item->prog; item++) {
1673 if (item->prog == &dummy_bpf_prog.prog)
1675 prog_ids[i] = item->prog->aux->id;
1676 if (++i == request_cnt) {
1682 return !!(item->prog);
1685 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1686 __u32 __user *prog_ids, u32 cnt)
1688 unsigned long err = 0;
1692 /* users of this function are doing:
1693 * cnt = bpf_prog_array_length();
1695 * bpf_prog_array_copy_to_user(..., cnt);
1696 * so below kcalloc doesn't need extra cnt > 0 check, but
1697 * bpf_prog_array_length() releases rcu lock and
1698 * prog array could have been swapped with empty or larger array,
1699 * so always copy 'cnt' prog_ids to the user.
1700 * In a rare race the user will see zero prog_ids
1702 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1706 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1708 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1717 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1718 struct bpf_prog *old_prog)
1720 struct bpf_prog_array_item *item = array->items;
1722 for (; item->prog; item++)
1723 if (item->prog == old_prog) {
1724 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1729 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1730 struct bpf_prog *exclude_prog,
1731 struct bpf_prog *include_prog,
1732 struct bpf_prog_array **new_array)
1734 int new_prog_cnt, carry_prog_cnt = 0;
1735 struct bpf_prog_array_item *existing;
1736 struct bpf_prog_array *array;
1737 bool found_exclude = false;
1738 int new_prog_idx = 0;
1740 /* Figure out how many existing progs we need to carry over to
1744 existing = old_array->items;
1745 for (; existing->prog; existing++) {
1746 if (existing->prog == exclude_prog) {
1747 found_exclude = true;
1750 if (existing->prog != &dummy_bpf_prog.prog)
1752 if (existing->prog == include_prog)
1757 if (exclude_prog && !found_exclude)
1760 /* How many progs (not NULL) will be in the new array? */
1761 new_prog_cnt = carry_prog_cnt;
1765 /* Do we have any prog (not NULL) in the new array? */
1766 if (!new_prog_cnt) {
1771 /* +1 as the end of prog_array is marked with NULL */
1772 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1776 /* Fill in the new prog array */
1777 if (carry_prog_cnt) {
1778 existing = old_array->items;
1779 for (; existing->prog; existing++)
1780 if (existing->prog != exclude_prog &&
1781 existing->prog != &dummy_bpf_prog.prog) {
1782 array->items[new_prog_idx++].prog =
1787 array->items[new_prog_idx++].prog = include_prog;
1788 array->items[new_prog_idx].prog = NULL;
1793 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1794 u32 *prog_ids, u32 request_cnt,
1800 cnt = bpf_prog_array_length(array);
1804 /* return early if user requested only program count or nothing to copy */
1805 if (!request_cnt || !cnt)
1808 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1809 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1813 static void bpf_prog_free_deferred(struct work_struct *work)
1815 struct bpf_prog_aux *aux;
1818 aux = container_of(work, struct bpf_prog_aux, work);
1819 if (bpf_prog_is_dev_bound(aux))
1820 bpf_prog_offload_destroy(aux->prog);
1821 #ifdef CONFIG_PERF_EVENTS
1822 if (aux->prog->has_callchain_buf)
1823 put_callchain_buffers();
1825 for (i = 0; i < aux->func_cnt; i++)
1826 bpf_jit_free(aux->func[i]);
1827 if (aux->func_cnt) {
1829 bpf_prog_unlock_free(aux->prog);
1831 bpf_jit_free(aux->prog);
1835 /* Free internal BPF program */
1836 void bpf_prog_free(struct bpf_prog *fp)
1838 struct bpf_prog_aux *aux = fp->aux;
1840 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1841 schedule_work(&aux->work);
1843 EXPORT_SYMBOL_GPL(bpf_prog_free);
1845 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1846 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1848 void bpf_user_rnd_init_once(void)
1850 prandom_init_once(&bpf_user_rnd_state);
1853 BPF_CALL_0(bpf_user_rnd_u32)
1855 /* Should someone ever have the rather unwise idea to use some
1856 * of the registers passed into this function, then note that
1857 * this function is called from native eBPF and classic-to-eBPF
1858 * transformations. Register assignments from both sides are
1859 * different, f.e. classic always sets fn(ctx, A, X) here.
1861 struct rnd_state *state;
1864 state = &get_cpu_var(bpf_user_rnd_state);
1865 res = prandom_u32_state(state);
1866 put_cpu_var(bpf_user_rnd_state);
1871 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1872 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1873 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1874 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1876 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1877 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1878 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1879 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1881 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1882 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1883 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1884 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1885 const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
1886 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
1887 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
1889 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1895 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1896 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1900 EXPORT_SYMBOL_GPL(bpf_event_output);
1902 /* Always built-in helper functions. */
1903 const struct bpf_func_proto bpf_tail_call_proto = {
1906 .ret_type = RET_VOID,
1907 .arg1_type = ARG_PTR_TO_CTX,
1908 .arg2_type = ARG_CONST_MAP_PTR,
1909 .arg3_type = ARG_ANYTHING,
1912 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1913 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1914 * eBPF and implicitly also cBPF can get JITed!
1916 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1921 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1922 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1924 void __weak bpf_jit_compile(struct bpf_prog *prog)
1928 bool __weak bpf_helper_changes_pkt_data(void *func)
1933 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1934 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1936 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1942 /* All definitions of tracepoints related to BPF. */
1943 #define CREATE_TRACE_POINTS
1944 #include <linux/bpf_trace.h>
1946 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);