1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
3 * Copyright (C) 2018 Netronome Systems, Inc.
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
10 * The BSD 2-Clause License:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/list.h>
44 #include "xlated_dumper.h"
47 struct list_head funcs;
54 struct bpf_insn *start;
62 struct list_head e_prevs;
63 struct list_head e_succs;
64 struct bpf_insn *head;
65 struct bpf_insn *tail;
69 #define EDGE_FLAG_EMPTY 0x0
70 #define EDGE_FLAG_FALLTHROUGH 0x1
71 #define EDGE_FLAG_JUMP 0x2
79 #define ENTRY_BLOCK_INDEX 0
80 #define EXIT_BLOCK_INDEX 1
81 #define NUM_FIXED_BLOCKS 2
82 #define func_prev(func) list_prev_entry(func, l)
83 #define func_next(func) list_next_entry(func, l)
84 #define bb_prev(bb) list_prev_entry(bb, l)
85 #define bb_next(bb) list_next_entry(bb, l)
86 #define entry_bb(func) func_first_bb(func)
87 #define exit_bb(func) func_last_bb(func)
88 #define cfg_first_func(cfg) \
89 list_first_entry(&cfg->funcs, struct func_node, l)
90 #define cfg_last_func(cfg) \
91 list_last_entry(&cfg->funcs, struct func_node, l)
92 #define func_first_bb(func) \
93 list_first_entry(&func->bbs, struct bb_node, l)
94 #define func_last_bb(func) \
95 list_last_entry(&func->bbs, struct bb_node, l)
97 static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
99 struct func_node *new_func, *func;
101 list_for_each_entry(func, &cfg->funcs, l) {
102 if (func->start == insn)
104 else if (func->start > insn)
108 func = func_prev(func);
109 new_func = calloc(1, sizeof(*new_func));
111 p_err("OOM when allocating FUNC node");
114 new_func->start = insn;
115 new_func->idx = cfg->func_num;
116 list_add(&new_func->l, &func->l);
122 static struct bb_node *func_append_bb(struct func_node *func,
123 struct bpf_insn *insn)
125 struct bb_node *new_bb, *bb;
127 list_for_each_entry(bb, &func->bbs, l) {
128 if (bb->head == insn)
130 else if (bb->head > insn)
135 new_bb = calloc(1, sizeof(*new_bb));
137 p_err("OOM when allocating BB node");
141 INIT_LIST_HEAD(&new_bb->e_prevs);
142 INIT_LIST_HEAD(&new_bb->e_succs);
143 list_add(&new_bb->l, &bb->l);
148 static struct bb_node *func_insert_dummy_bb(struct list_head *after)
152 bb = calloc(1, sizeof(*bb));
154 p_err("OOM when allocating BB node");
158 INIT_LIST_HEAD(&bb->e_prevs);
159 INIT_LIST_HEAD(&bb->e_succs);
160 list_add(&bb->l, after);
165 static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
166 struct bpf_insn *end)
168 struct func_node *func, *last_func;
170 func = cfg_append_func(cfg, cur);
174 for (; cur < end; cur++) {
175 if (cur->code != (BPF_JMP | BPF_CALL))
177 if (cur->src_reg != BPF_PSEUDO_CALL)
179 func = cfg_append_func(cfg, cur + cur->off + 1);
184 last_func = cfg_last_func(cfg);
185 last_func->end = end - 1;
186 func = cfg_first_func(cfg);
187 list_for_each_entry_from(func, &last_func->l, l) {
188 func->end = func_next(func)->start - 1;
194 static bool func_partition_bb_head(struct func_node *func)
196 struct bpf_insn *cur, *end;
201 INIT_LIST_HEAD(&func->bbs);
202 bb = func_append_bb(func, cur);
206 for (; cur <= end; cur++) {
207 if (BPF_CLASS(cur->code) == BPF_JMP) {
208 u8 opcode = BPF_OP(cur->code);
210 if (opcode == BPF_EXIT || opcode == BPF_CALL)
213 bb = func_append_bb(func, cur + cur->off + 1);
217 if (opcode != BPF_JA) {
218 bb = func_append_bb(func, cur + 1);
228 static void func_partition_bb_tail(struct func_node *func)
230 unsigned int bb_idx = NUM_FIXED_BLOCKS;
231 struct bb_node *bb, *last;
233 last = func_last_bb(func);
234 last->tail = func->end;
235 bb = func_first_bb(func);
236 list_for_each_entry_from(bb, &last->l, l) {
237 bb->tail = bb_next(bb)->head - 1;
241 last->idx = bb_idx++;
242 func->bb_num = bb_idx;
245 static bool func_add_special_bb(struct func_node *func)
249 bb = func_insert_dummy_bb(&func->bbs);
252 bb->idx = ENTRY_BLOCK_INDEX;
254 bb = func_insert_dummy_bb(&func_last_bb(func)->l);
257 bb->idx = EXIT_BLOCK_INDEX;
262 static bool func_partition_bb(struct func_node *func)
264 if (func_partition_bb_head(func))
267 func_partition_bb_tail(func);
272 static struct bb_node *func_search_bb_with_head(struct func_node *func,
273 struct bpf_insn *insn)
277 list_for_each_entry(bb, &func->bbs, l) {
278 if (bb->head == insn)
285 static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
290 e = calloc(1, sizeof(*e));
292 p_err("OOM when allocating edge node");
306 static bool func_add_bb_edges(struct func_node *func)
308 struct bpf_insn *insn;
313 e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
316 list_add_tail(&e->l, &bb->e_succs);
319 e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
322 list_add_tail(&e->l, &bb->e_prevs);
326 list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
327 e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
333 if (BPF_CLASS(insn->code) != BPF_JMP ||
334 BPF_OP(insn->code) == BPF_EXIT) {
335 e->dst = bb_next(bb);
336 e->flags |= EDGE_FLAG_FALLTHROUGH;
337 list_add_tail(&e->l, &bb->e_succs);
339 } else if (BPF_OP(insn->code) == BPF_JA) {
340 e->dst = func_search_bb_with_head(func,
341 insn + insn->off + 1);
342 e->flags |= EDGE_FLAG_JUMP;
343 list_add_tail(&e->l, &bb->e_succs);
347 e->dst = bb_next(bb);
348 e->flags |= EDGE_FLAG_FALLTHROUGH;
349 list_add_tail(&e->l, &bb->e_succs);
351 e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
355 e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
356 list_add_tail(&e->l, &bb->e_succs);
362 static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
364 int cnt = len / sizeof(*insn);
365 struct func_node *func;
367 INIT_LIST_HEAD(&cfg->funcs);
369 if (cfg_partition_funcs(cfg, insn, insn + cnt))
372 list_for_each_entry(func, &cfg->funcs, l) {
373 if (func_partition_bb(func) || func_add_special_bb(func))
376 if (func_add_bb_edges(func))
383 static void cfg_destroy(struct cfg *cfg)
385 struct func_node *func, *func2;
387 list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
388 struct bb_node *bb, *bb2;
390 list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
391 struct edge_node *e, *e2;
393 list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
398 list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
412 static void draw_bb_node(struct func_node *func, struct bb_node *bb)
416 if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
421 printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
422 func->idx, bb->idx, shape);
424 if (bb->idx == ENTRY_BLOCK_INDEX) {
426 } else if (bb->idx == EXIT_BLOCK_INDEX) {
429 unsigned int start_idx;
430 struct dump_data dd = {};
433 kernel_syms_load(&dd);
434 start_idx = bb->head - func->start;
435 dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
436 kernel_syms_destroy(&dd);
443 static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
445 const char *style = "\"solid,bold\"";
446 const char *color = "black";
447 int func_idx = func->idx;
451 if (list_empty(&bb->e_succs))
454 list_for_each_entry(e, &bb->e_succs, l) {
455 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
456 func_idx, e->src->idx, func_idx, e->dst->idx,
457 style, color, weight);
462 static void func_output_bb_def(struct func_node *func)
466 list_for_each_entry(bb, &func->bbs, l) {
467 draw_bb_node(func, bb);
471 static void func_output_edges(struct func_node *func)
473 int func_idx = func->idx;
476 list_for_each_entry(bb, &func->bbs, l) {
477 draw_bb_succ_edges(func, bb);
480 /* Add an invisible edge from ENTRY to EXIT, this is to
481 * improve the graph layout.
483 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
484 func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
487 static void cfg_dump(struct cfg *cfg)
489 struct func_node *func;
491 printf("digraph \"DOT graph for eBPF program\" {\n");
492 list_for_each_entry(func, &cfg->funcs, l) {
493 printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
494 func->idx, func->idx);
495 func_output_bb_def(func);
496 func_output_edges(func);
502 void dump_xlated_cfg(void *buf, unsigned int len)
504 struct bpf_insn *insn = buf;
507 memset(&cfg, 0, sizeof(cfg));
508 if (cfg_build(&cfg, insn, len))