GNU Linux-libre 4.19.286-gnu1
[releases.git] / net / bpf / test_run.c
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13
14 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
15                                             struct bpf_cgroup_storage *storage)
16 {
17         u32 ret;
18
19         preempt_disable();
20         rcu_read_lock();
21         bpf_cgroup_storage_set(storage);
22         ret = BPF_PROG_RUN(prog, ctx);
23         rcu_read_unlock();
24         preempt_enable();
25
26         return ret;
27 }
28
29 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
30 {
31         struct bpf_cgroup_storage *storage = NULL;
32         u64 time_start, time_spent = 0;
33         u32 ret = 0, i;
34
35         storage = bpf_cgroup_storage_alloc(prog);
36         if (IS_ERR(storage))
37                 return PTR_ERR(storage);
38
39         if (!repeat)
40                 repeat = 1;
41         time_start = ktime_get_ns();
42         for (i = 0; i < repeat; i++) {
43                 ret = bpf_test_run_one(prog, ctx, storage);
44                 if (need_resched()) {
45                         if (signal_pending(current))
46                                 break;
47                         time_spent += ktime_get_ns() - time_start;
48                         cond_resched();
49                         time_start = ktime_get_ns();
50                 }
51         }
52         time_spent += ktime_get_ns() - time_start;
53         do_div(time_spent, repeat);
54         *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
55
56         bpf_cgroup_storage_free(storage);
57
58         return ret;
59 }
60
61 static int bpf_test_finish(const union bpf_attr *kattr,
62                            union bpf_attr __user *uattr, const void *data,
63                            u32 size, u32 retval, u32 duration)
64 {
65         void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
66         int err = -EFAULT;
67
68         if (data_out && copy_to_user(data_out, data, size))
69                 goto out;
70         if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
71                 goto out;
72         if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
73                 goto out;
74         if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
75                 goto out;
76         err = 0;
77 out:
78         return err;
79 }
80
81 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
82                            u32 headroom, u32 tailroom)
83 {
84         void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
85         void *data;
86
87         if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
88                 return ERR_PTR(-EINVAL);
89
90         size = SKB_DATA_ALIGN(size);
91         data = kzalloc(size + headroom + tailroom, GFP_USER);
92         if (!data)
93                 return ERR_PTR(-ENOMEM);
94
95         if (copy_from_user(data + headroom, data_in, size)) {
96                 kfree(data);
97                 return ERR_PTR(-EFAULT);
98         }
99         return data;
100 }
101
102 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
103                           union bpf_attr __user *uattr)
104 {
105         bool is_l2 = false, is_direct_pkt_access = false;
106         u32 size = kattr->test.data_size_in;
107         u32 repeat = kattr->test.repeat;
108         u32 retval, duration;
109         int hh_len = ETH_HLEN;
110         struct sk_buff *skb;
111         void *data;
112         int ret;
113
114         data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
115                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
116         if (IS_ERR(data))
117                 return PTR_ERR(data);
118
119         switch (prog->type) {
120         case BPF_PROG_TYPE_SCHED_CLS:
121         case BPF_PROG_TYPE_SCHED_ACT:
122                 is_l2 = true;
123                 /* fall through */
124         case BPF_PROG_TYPE_LWT_IN:
125         case BPF_PROG_TYPE_LWT_OUT:
126         case BPF_PROG_TYPE_LWT_XMIT:
127                 is_direct_pkt_access = true;
128                 break;
129         default:
130                 break;
131         }
132
133         skb = build_skb(data, 0);
134         if (!skb) {
135                 kfree(data);
136                 return -ENOMEM;
137         }
138
139         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
140         __skb_put(skb, size);
141         skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
142         skb_reset_network_header(skb);
143
144         if (is_l2)
145                 __skb_push(skb, hh_len);
146         if (is_direct_pkt_access)
147                 bpf_compute_data_pointers(skb);
148         retval = bpf_test_run(prog, skb, repeat, &duration);
149         if (!is_l2) {
150                 if (skb_headroom(skb) < hh_len) {
151                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
152
153                         if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
154                                 kfree_skb(skb);
155                                 return -ENOMEM;
156                         }
157                 }
158                 memset(__skb_push(skb, hh_len), 0, hh_len);
159         }
160
161         size = skb->len;
162         /* bpf program can never convert linear skb to non-linear */
163         if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
164                 size = skb_headlen(skb);
165         ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
166         kfree_skb(skb);
167         return ret;
168 }
169
170 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
171                           union bpf_attr __user *uattr)
172 {
173         u32 size = kattr->test.data_size_in;
174         u32 repeat = kattr->test.repeat;
175         struct netdev_rx_queue *rxqueue;
176         struct xdp_buff xdp = {};
177         u32 retval, duration;
178         void *data;
179         int ret;
180
181         data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
182         if (IS_ERR(data))
183                 return PTR_ERR(data);
184
185         xdp.data_hard_start = data;
186         xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
187         xdp.data_meta = xdp.data;
188         xdp.data_end = xdp.data + size;
189
190         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
191         xdp.rxq = &rxqueue->xdp_rxq;
192
193         retval = bpf_test_run(prog, &xdp, repeat, &duration);
194         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
195             xdp.data_end != xdp.data + size)
196                 size = xdp.data_end - xdp.data;
197         ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
198         kfree(data);
199         return ret;
200 }