1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmzone.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/file.h>
19 #include <linux/license.h>
20 #include <linux/filter.h>
21 #include <linux/version.h>
23 DEFINE_PER_CPU(int, bpf_prog_active);
25 int sysctl_unprivileged_bpf_disabled __read_mostly =
26 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
28 static LIST_HEAD(bpf_map_types);
30 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
32 struct bpf_map_type_list *tl;
35 list_for_each_entry(tl, &bpf_map_types, list_node) {
36 if (tl->type == attr->map_type) {
37 map = tl->ops->map_alloc(attr);
41 map->map_type = attr->map_type;
45 return ERR_PTR(-EINVAL);
48 /* boot time registration of different map implementations */
49 void bpf_register_map_type(struct bpf_map_type_list *tl)
51 list_add(&tl->list_node, &bpf_map_types);
54 void *bpf_map_area_alloc(size_t size)
56 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
57 * trigger under memory pressure as we really just want to
60 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
63 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
64 area = kmalloc(size, GFP_USER | flags);
69 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
73 void bpf_map_area_free(void *area)
78 int bpf_map_precharge_memlock(u32 pages)
80 struct user_struct *user = get_current_user();
81 unsigned long memlock_limit, cur;
83 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
84 cur = atomic_long_read(&user->locked_vm);
86 if (cur + pages > memlock_limit)
91 static int bpf_map_charge_memlock(struct bpf_map *map)
93 struct user_struct *user = get_current_user();
94 unsigned long memlock_limit;
96 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
98 atomic_long_add(map->pages, &user->locked_vm);
100 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
101 atomic_long_sub(map->pages, &user->locked_vm);
109 static void bpf_map_uncharge_memlock(struct bpf_map *map)
111 struct user_struct *user = map->user;
113 atomic_long_sub(map->pages, &user->locked_vm);
117 /* called from workqueue */
118 static void bpf_map_free_deferred(struct work_struct *work)
120 struct bpf_map *map = container_of(work, struct bpf_map, work);
122 bpf_map_uncharge_memlock(map);
123 /* implementation dependent freeing */
124 map->ops->map_free(map);
127 static void bpf_map_put_uref(struct bpf_map *map)
129 if (atomic_dec_and_test(&map->usercnt)) {
130 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
131 bpf_fd_array_map_clear(map);
135 /* decrement map refcnt and schedule it for freeing via workqueue
136 * (unrelying map implementation ops->map_free() might sleep)
138 void bpf_map_put(struct bpf_map *map)
140 if (atomic_dec_and_test(&map->refcnt)) {
141 INIT_WORK(&map->work, bpf_map_free_deferred);
142 schedule_work(&map->work);
146 void bpf_map_put_with_uref(struct bpf_map *map)
148 bpf_map_put_uref(map);
152 static int bpf_map_release(struct inode *inode, struct file *filp)
154 struct bpf_map *map = filp->private_data;
156 if (map->ops->map_release)
157 map->ops->map_release(map, filp);
159 bpf_map_put_with_uref(map);
163 #ifdef CONFIG_PROC_FS
164 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
166 const struct bpf_map *map = filp->private_data;
182 static const struct file_operations bpf_map_fops = {
183 #ifdef CONFIG_PROC_FS
184 .show_fdinfo = bpf_map_show_fdinfo,
186 .release = bpf_map_release,
189 int bpf_map_new_fd(struct bpf_map *map)
191 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
195 /* helper macro to check that unused fields 'union bpf_attr' are zero */
196 #define CHECK_ATTR(CMD) \
197 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
198 sizeof(attr->CMD##_LAST_FIELD), 0, \
200 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
201 sizeof(attr->CMD##_LAST_FIELD)) != NULL
203 #define BPF_MAP_CREATE_LAST_FIELD map_flags
204 /* called via syscall */
205 static int map_create(union bpf_attr *attr)
210 err = CHECK_ATTR(BPF_MAP_CREATE);
214 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
215 map = find_and_alloc_map(attr);
219 atomic_set(&map->refcnt, 1);
220 atomic_set(&map->usercnt, 1);
222 err = bpf_map_charge_memlock(map);
224 goto free_map_nouncharge;
226 err = bpf_map_new_fd(map);
228 /* failed to allocate fd */
234 bpf_map_uncharge_memlock(map);
236 map->ops->map_free(map);
240 /* if error is returned, fd is released.
241 * On success caller should complete fd access with matching fdput()
243 struct bpf_map *__bpf_map_get(struct fd f)
246 return ERR_PTR(-EBADF);
247 if (f.file->f_op != &bpf_map_fops) {
249 return ERR_PTR(-EINVAL);
252 return f.file->private_data;
255 /* prog's and map's refcnt limit */
256 #define BPF_MAX_REFCNT 32768
258 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
260 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
261 atomic_dec(&map->refcnt);
262 return ERR_PTR(-EBUSY);
265 atomic_inc(&map->usercnt);
269 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
271 struct fd f = fdget(ufd);
274 map = __bpf_map_get(f);
278 map = bpf_map_inc(map, true);
284 /* helper to convert user pointers passed inside __aligned_u64 fields */
285 static void __user *u64_to_ptr(__u64 val)
287 return (void __user *) (unsigned long) val;
290 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
295 /* last field in 'union bpf_attr' used by this command */
296 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
298 static int map_lookup_elem(union bpf_attr *attr)
300 void __user *ukey = u64_to_ptr(attr->key);
301 void __user *uvalue = u64_to_ptr(attr->value);
302 int ufd = attr->map_fd;
304 void *key, *value, *ptr;
309 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
313 map = __bpf_map_get(f);
318 key = kmalloc(map->key_size, GFP_USER);
323 if (copy_from_user(key, ukey, map->key_size) != 0)
326 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
327 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
328 value_size = round_up(map->value_size, 8) * num_possible_cpus();
330 value_size = map->value_size;
333 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
337 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
338 err = bpf_percpu_hash_copy(map, key, value);
339 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
340 err = bpf_percpu_array_copy(map, key, value);
341 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
342 err = bpf_stackmap_copy(map, key, value);
345 ptr = map->ops->map_lookup_elem(map, key);
347 memcpy(value, ptr, value_size);
349 err = ptr ? 0 : -ENOENT;
356 if (copy_to_user(uvalue, value, value_size) != 0)
370 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
372 static int map_update_elem(union bpf_attr *attr)
374 void __user *ukey = u64_to_ptr(attr->key);
375 void __user *uvalue = u64_to_ptr(attr->value);
376 int ufd = attr->map_fd;
383 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
387 map = __bpf_map_get(f);
392 key = kmalloc(map->key_size, GFP_USER);
397 if (copy_from_user(key, ukey, map->key_size) != 0)
400 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
401 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
402 value_size = round_up(map->value_size, 8) * num_possible_cpus();
404 value_size = map->value_size;
407 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
412 if (copy_from_user(value, uvalue, value_size) != 0)
415 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
416 * inside bpf map update or delete otherwise deadlocks are possible
419 __this_cpu_inc(bpf_prog_active);
420 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
421 err = bpf_percpu_hash_update(map, key, value, attr->flags);
422 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
423 err = bpf_percpu_array_update(map, key, value, attr->flags);
424 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
425 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
426 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
428 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
433 err = map->ops->map_update_elem(map, key, value, attr->flags);
436 __this_cpu_dec(bpf_prog_active);
448 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
450 static int map_delete_elem(union bpf_attr *attr)
452 void __user *ukey = u64_to_ptr(attr->key);
453 int ufd = attr->map_fd;
459 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
463 map = __bpf_map_get(f);
468 key = kmalloc(map->key_size, GFP_USER);
473 if (copy_from_user(key, ukey, map->key_size) != 0)
477 __this_cpu_inc(bpf_prog_active);
479 err = map->ops->map_delete_elem(map, key);
481 __this_cpu_dec(bpf_prog_active);
491 /* last field in 'union bpf_attr' used by this command */
492 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
494 static int map_get_next_key(union bpf_attr *attr)
496 void __user *ukey = u64_to_ptr(attr->key);
497 void __user *unext_key = u64_to_ptr(attr->next_key);
498 int ufd = attr->map_fd;
500 void *key, *next_key;
504 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
508 map = __bpf_map_get(f);
514 key = kmalloc(map->key_size, GFP_USER);
519 if (copy_from_user(key, ukey, map->key_size) != 0)
526 next_key = kmalloc(map->key_size, GFP_USER);
531 err = map->ops->map_get_next_key(map, key, next_key);
537 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
551 static LIST_HEAD(bpf_prog_types);
553 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
555 struct bpf_prog_type_list *tl;
557 list_for_each_entry(tl, &bpf_prog_types, list_node) {
558 if (tl->type == type) {
559 prog->aux->ops = tl->ops;
568 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
570 list_add(&tl->list_node, &bpf_prog_types);
573 /* drop refcnt on maps used by eBPF program and free auxilary data */
574 static void free_used_maps(struct bpf_prog_aux *aux)
578 for (i = 0; i < aux->used_map_cnt; i++)
579 bpf_map_put(aux->used_maps[i]);
581 kfree(aux->used_maps);
584 int __bpf_prog_charge(struct user_struct *user, u32 pages)
586 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
587 unsigned long user_bufs;
590 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
591 if (user_bufs > memlock_limit) {
592 atomic_long_sub(pages, &user->locked_vm);
600 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
603 atomic_long_sub(pages, &user->locked_vm);
606 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
608 struct user_struct *user = get_current_user();
611 ret = __bpf_prog_charge(user, prog->pages);
617 prog->aux->user = user;
621 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
623 struct user_struct *user = prog->aux->user;
625 __bpf_prog_uncharge(user, prog->pages);
629 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
631 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
634 bpf_prog_uncharge_memlock(aux->prog);
635 bpf_prog_free(aux->prog);
638 void bpf_prog_put(struct bpf_prog *prog)
640 if (atomic_dec_and_test(&prog->aux->refcnt))
641 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
643 EXPORT_SYMBOL_GPL(bpf_prog_put);
645 static int bpf_prog_release(struct inode *inode, struct file *filp)
647 struct bpf_prog *prog = filp->private_data;
653 static const struct file_operations bpf_prog_fops = {
654 .release = bpf_prog_release,
657 int bpf_prog_new_fd(struct bpf_prog *prog)
659 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
663 static struct bpf_prog *____bpf_prog_get(struct fd f)
666 return ERR_PTR(-EBADF);
667 if (f.file->f_op != &bpf_prog_fops) {
669 return ERR_PTR(-EINVAL);
672 return f.file->private_data;
675 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
677 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
678 atomic_sub(i, &prog->aux->refcnt);
679 return ERR_PTR(-EBUSY);
683 EXPORT_SYMBOL_GPL(bpf_prog_add);
685 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
687 return bpf_prog_add(prog, 1);
690 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
692 struct fd f = fdget(ufd);
693 struct bpf_prog *prog;
695 prog = ____bpf_prog_get(f);
698 if (type && prog->type != *type) {
699 prog = ERR_PTR(-EINVAL);
703 prog = bpf_prog_inc(prog);
709 struct bpf_prog *bpf_prog_get(u32 ufd)
711 return __bpf_prog_get(ufd, NULL);
714 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
716 return __bpf_prog_get(ufd, &type);
718 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
720 /* last field in 'union bpf_attr' used by this command */
721 #define BPF_PROG_LOAD_LAST_FIELD kern_version
723 static int bpf_prog_load(union bpf_attr *attr)
725 enum bpf_prog_type type = attr->prog_type;
726 struct bpf_prog *prog;
731 if (CHECK_ATTR(BPF_PROG_LOAD))
734 /* copy eBPF program license from user space */
735 if (strncpy_from_user(license, u64_to_ptr(attr->license),
736 sizeof(license) - 1) < 0)
738 license[sizeof(license) - 1] = 0;
740 /* eBPF programs must be GPL compatible to use GPL-ed functions */
741 is_gpl = license_is_gpl_compatible(license);
743 if (attr->insn_cnt >= BPF_MAXINSNS)
746 if (type == BPF_PROG_TYPE_KPROBE &&
747 attr->kern_version != LINUX_VERSION_CODE)
750 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
753 /* plain bpf_prog allocation */
754 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
758 err = bpf_prog_charge_memlock(prog);
760 goto free_prog_nouncharge;
762 prog->len = attr->insn_cnt;
765 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
766 prog->len * sizeof(struct bpf_insn)) != 0)
769 prog->orig_prog = NULL;
772 atomic_set(&prog->aux->refcnt, 1);
773 prog->gpl_compatible = is_gpl ? 1 : 0;
775 /* find program type: socket_filter vs tracing_filter */
776 err = find_prog_type(type, prog);
780 /* run eBPF verifier */
781 err = bpf_check(&prog, attr);
785 /* eBPF program is ready to be JITed */
786 prog = bpf_prog_select_runtime(prog, &err);
790 err = bpf_prog_new_fd(prog);
792 /* failed to allocate fd */
798 free_used_maps(prog->aux);
800 bpf_prog_uncharge_memlock(prog);
801 free_prog_nouncharge:
806 #define BPF_OBJ_LAST_FIELD bpf_fd
808 static int bpf_obj_pin(const union bpf_attr *attr)
810 if (CHECK_ATTR(BPF_OBJ))
813 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
816 static int bpf_obj_get(const union bpf_attr *attr)
818 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
821 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
824 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
829 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
832 if (!access_ok(VERIFY_READ, uattr, 1))
835 if (size > PAGE_SIZE) /* silly large */
838 /* If we're handed a bigger struct than we know of,
839 * ensure all the unknown bits are 0 - i.e. new
840 * user-space does not rely on any kernel feature
841 * extensions we dont know about yet.
843 if (size > sizeof(attr)) {
844 unsigned char __user *addr;
845 unsigned char __user *end;
848 addr = (void __user *)uattr + sizeof(attr);
849 end = (void __user *)uattr + size;
851 for (; addr < end; addr++) {
852 err = get_user(val, addr);
861 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
862 memset(&attr, 0, sizeof(attr));
863 if (copy_from_user(&attr, uattr, size) != 0)
868 err = map_create(&attr);
870 case BPF_MAP_LOOKUP_ELEM:
871 err = map_lookup_elem(&attr);
873 case BPF_MAP_UPDATE_ELEM:
874 err = map_update_elem(&attr);
876 case BPF_MAP_DELETE_ELEM:
877 err = map_delete_elem(&attr);
879 case BPF_MAP_GET_NEXT_KEY:
880 err = map_get_next_key(&attr);
883 err = bpf_prog_load(&attr);
886 err = bpf_obj_pin(&attr);
889 err = bpf_obj_get(&attr);