python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
#include "bpf/libbpf_internal.h"
static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
{
ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
ASSERT_EQ(skel->bss->kprobe_testmod_test2_result, 1, "kprobe_test2_result");
ASSERT_EQ(skel->bss->kprobe_testmod_test3_result, 1, "kprobe_test3_result");
ASSERT_EQ(skel->bss->kretprobe_testmod_test1_result, 1, "kretprobe_test1_result");
ASSERT_EQ(skel->bss->kretprobe_testmod_test2_result, 1, "kretprobe_test2_result");
ASSERT_EQ(skel->bss->kretprobe_testmod_test3_result, 1, "kretprobe_test3_result");
}
static void test_testmod_attach_api(struct bpf_kprobe_multi_opts *opts)
{
struct kprobe_multi *skel = NULL;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
return;
skel->bss->pid = getpid();
skel->links.test_kprobe_testmod = bpf_program__attach_kprobe_multi_opts(
skel->progs.test_kprobe_testmod,
NULL, opts);
if (!skel->links.test_kprobe_testmod)
goto cleanup;
opts->retprobe = true;
skel->links.test_kretprobe_testmod = bpf_program__attach_kprobe_multi_opts(
skel->progs.test_kretprobe_testmod,
NULL, opts);
if (!skel->links.test_kretprobe_testmod)
goto cleanup;
ASSERT_OK(trigger_module_test_read(1), "trigger_read");
kprobe_multi_testmod_check(skel);
cleanup:
kprobe_multi__destroy(skel);
}
static void test_testmod_attach_api_addrs(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[3];
addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
test_testmod_attach_api(&opts);
}
static void test_testmod_attach_api_syms(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
const char *syms[3] = {
"bpf_testmod_fentry_test1",
"bpf_testmod_fentry_test2",
"bpf_testmod_fentry_test3",
};
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
test_testmod_attach_api(&opts);
}
void serial_test_kprobe_multi_testmod_test(void)
{
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
return;
if (test__start_subtest("testmod_attach_api_syms"))
test_testmod_attach_api_syms();
if (test__start_subtest("testmod_attach_api_addrs"))
test_testmod_attach_api_addrs();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
#include <test_progs.h>
struct inst {
struct bpf_object *obj;
struct bpf_link *link;
};
static struct bpf_program *load_prog(char *file, char *name, struct inst *inst)
{
struct bpf_object *obj;
struct bpf_program *prog;
int err;
obj = bpf_object__open_file(file, NULL);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return NULL;
inst->obj = obj;
err = bpf_object__load(obj);
if (!ASSERT_OK(err, "obj_load"))
return NULL;
prog = bpf_object__find_program_by_name(obj, name);
if (!ASSERT_OK_PTR(prog, "obj_find_prog"))
return NULL;
return prog;
}
/* TODO: use different target function to run in concurrent mode */
void serial_test_trampoline_count(void)
{
char *file = "test_trampoline_count.bpf.o";
char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
int bpf_max_tramp_links, err, i, prog_fd;
struct bpf_program *prog;
struct bpf_link *link;
struct inst *inst;
LIBBPF_OPTS(bpf_test_run_opts, opts);
bpf_max_tramp_links = get_bpf_max_tramp_links();
if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
return;
inst = calloc(bpf_max_tramp_links + 1, sizeof(*inst));
if (!ASSERT_OK_PTR(inst, "inst"))
return;
/* attach 'allowed' trampoline programs */
for (i = 0; i < bpf_max_tramp_links; i++) {
prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]);
if (!prog)
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
inst[i].link = link;
}
/* and try 1 extra.. */
prog = load_prog(file, "fmod_ret_test", &inst[i]);
if (!prog)
goto cleanup;
/* ..that needs to fail */
link = bpf_program__attach(prog);
if (!ASSERT_ERR_PTR(link, "attach_prog")) {
inst[i].link = link;
goto cleanup;
}
/* with E2BIG error */
if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG"))
goto cleanup;
if (!ASSERT_EQ(link, NULL, "ptr_is_null"))
goto cleanup;
/* and finally execute the probe */
prog_fd = bpf_program__fd(prog);
if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd"))
goto cleanup;
err = bpf_prog_test_run_opts(prog_fd, &opts);
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
goto cleanup;
ASSERT_EQ(opts.retval & 0xffff, 33, "bpf_modify_return_test.result");
ASSERT_EQ(opts.retval >> 16, 2, "bpf_modify_return_test.side_effect");
cleanup:
for (; i >= 0; i--) {
bpf_link__destroy(inst[i].link);
bpf_object__close(inst[i].obj);
}
free(inst);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/trampoline_count.c |
// SPDX-License-Identifier: GPL-2.0
#include <unistd.h>
#include <pthread.h>
#include <sys/mman.h>
#include <stdatomic.h>
#include <test_progs.h>
#include <sys/syscall.h>
#include <linux/module.h>
#include <linux/userfaultfd.h>
#include "ksym_race.skel.h"
#include "bpf_mod_race.skel.h"
#include "kfunc_call_race.skel.h"
#include "testing_helpers.h"
/* This test crafts a race between btf_try_get_module and do_init_module, and
* checks whether btf_try_get_module handles the invocation for a well-formed
* but uninitialized module correctly. Unless the module has completed its
* initcalls, the verifier should fail the program load and return ENXIO.
*
* userfaultfd is used to trigger a fault in an fmod_ret program, and make it
* sleep, then the BPF program is loaded and the return value from verifier is
* inspected. After this, the userfaultfd is closed so that the module loading
* thread makes forward progress, and fmod_ret injects an error so that the
* module load fails and it is freed.
*
* If the verifier succeeded in loading the supplied program, it will end up
* taking reference to freed module, and trigger a crash when the program fd
* is closed later. This is true for both kfuncs and ksyms. In both cases,
* the crash is triggered inside bpf_prog_free_deferred, when module reference
* is finally released.
*/
struct test_config {
const char *str_open;
void *(*bpf_open_and_load)();
void (*bpf_destroy)(void *);
};
enum bpf_test_state {
_TS_INVALID,
TS_MODULE_LOAD,
TS_MODULE_LOAD_FAIL,
};
static _Atomic enum bpf_test_state state = _TS_INVALID;
static void *load_module_thread(void *p)
{
if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail"))
atomic_store(&state, TS_MODULE_LOAD);
else
atomic_store(&state, TS_MODULE_LOAD_FAIL);
return p;
}
static int sys_userfaultfd(int flags)
{
return syscall(__NR_userfaultfd, flags);
}
static int test_setup_uffd(void *fault_addr)
{
struct uffdio_register uffd_register = {};
struct uffdio_api uffd_api = {};
int uffd;
uffd = sys_userfaultfd(O_CLOEXEC);
if (uffd < 0)
return -errno;
uffd_api.api = UFFD_API;
uffd_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
close(uffd);
return -1;
}
uffd_register.range.start = (unsigned long)fault_addr;
uffd_register.range.len = 4096;
uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
close(uffd);
return -1;
}
return uffd;
}
static void test_bpf_mod_race_config(const struct test_config *config)
{
void *fault_addr, *skel_fail;
struct bpf_mod_race *skel;
struct uffd_msg uffd_msg;
pthread_t load_mod_thrd;
_Atomic int *blockingp;
int uffd, ret;
fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
return;
if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod"))
goto end_mmap;
skel = bpf_mod_race__open();
if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
goto end_module;
skel->rodata->bpf_mod_race_config.tgid = getpid();
skel->rodata->bpf_mod_race_config.inject_error = -4242;
skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
goto end_destroy;
blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
goto end_destroy;
uffd = test_setup_uffd(fault_addr);
if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
goto end_destroy;
if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
"load module thread"))
goto end_uffd;
/* Now, we either fail loading module, or block in bpf prog, spin to find out */
while (!atomic_load(&state) && !atomic_load(blockingp))
;
if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
goto end_join;
if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
pthread_kill(load_mod_thrd, SIGKILL);
goto end_uffd;
}
/* We might have set bpf_blocking to 1, but may have not blocked in
* bpf_copy_from_user. Read userfaultfd descriptor to verify that.
*/
if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
"read uffd block event"))
goto end_join;
if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
goto end_join;
/* We know that load_mod_thrd is blocked in the fmod_ret program, the
* module state is still MODULE_STATE_COMING because mod->init hasn't
* returned. This is the time we try to load a program calling kfunc and
* check if we get ENXIO from verifier.
*/
skel_fail = config->bpf_open_and_load();
ret = errno;
if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
/* Close uffd to unblock load_mod_thrd */
close(uffd);
uffd = -1;
while (atomic_load(blockingp) != 2)
;
ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
config->bpf_destroy(skel_fail);
goto end_join;
}
ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
close(uffd);
uffd = -1;
end_join:
pthread_join(load_mod_thrd, NULL);
if (uffd < 0)
ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
end_uffd:
if (uffd >= 0)
close(uffd);
end_destroy:
bpf_mod_race__destroy(skel);
ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
end_module:
unload_bpf_testmod(false);
ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod");
end_mmap:
munmap(fault_addr, 4096);
atomic_store(&state, _TS_INVALID);
}
static const struct test_config ksym_config = {
.str_open = "ksym_race__open_and_load",
.bpf_open_and_load = (void *)ksym_race__open_and_load,
.bpf_destroy = (void *)ksym_race__destroy,
};
static const struct test_config kfunc_config = {
.str_open = "kfunc_call_race__open_and_load",
.bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
.bpf_destroy = (void *)kfunc_call_race__destroy,
};
void serial_test_bpf_mod_race(void)
{
if (test__start_subtest("ksym (used_btfs UAF)"))
test_bpf_mod_race_config(&ksym_config);
if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
test_bpf_mod_race_config(&kfunc_config);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
#include "btf_helpers.h"
static void test_split_simple() {
const struct btf_type *t;
struct btf *btf1, *btf2;
int str_off, err;
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
return;
btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */
btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_ptr(btf1, 1); /* [2] ptr to int */
btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=1",
"[3] STRUCT 's1' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0");
ASSERT_STREQ(btf_type_c_dump(btf1), "\
struct s1 {\n\
int f1;\n\
};\n\n", "c_dump");
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
/* pointer size should be "inherited" from main BTF */
ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz");
str_off = btf__find_str(btf2, "int");
ASSERT_NEQ(str_off, -ENOENT, "str_int_missing");
t = btf__type_by_id(btf2, 1);
if (!ASSERT_OK_PTR(t, "int_type"))
goto cleanup;
ASSERT_EQ(btf_is_int(t), true, "int_kind");
ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name");
btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */
btf__add_field(btf2, "f1", 6, 0, 0); /* struct s1 f1; */
btf__add_field(btf2, "f2", 5, 32, 0); /* int f2; */
btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */
/* } */
/* duplicated int */
btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [5] int */
/* duplicated struct s1 */
btf__add_struct(btf2, "s1", 4); /* [6] struct s1 { */
btf__add_field(btf2, "f1", 5, 0, 0); /* int f1; */
/* } */
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=1",
"[3] STRUCT 's1' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[4] STRUCT 's2' size=16 vlen=3\n"
"\t'f1' type_id=6 bits_offset=0\n"
"\t'f2' type_id=5 bits_offset=32\n"
"\t'f3' type_id=2 bits_offset=64",
"[5] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[6] STRUCT 's1' size=4 vlen=1\n"
"\t'f1' type_id=5 bits_offset=0");
ASSERT_STREQ(btf_type_c_dump(btf2), "\
struct s1 {\n\
int f1;\n\
};\n\
\n\
struct s1___2 {\n\
int f1;\n\
};\n\
\n\
struct s2 {\n\
struct s1___2 f1;\n\
int f2;\n\
int *f3;\n\
};\n\n", "c_dump");
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=1",
"[3] STRUCT 's1' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[4] STRUCT 's2' size=16 vlen=3\n"
"\t'f1' type_id=3 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32\n"
"\t'f3' type_id=2 bits_offset=64");
ASSERT_STREQ(btf_type_c_dump(btf2), "\
struct s1 {\n\
int f1;\n\
};\n\
\n\
struct s2 {\n\
struct s1 f1;\n\
int f2;\n\
int *f3;\n\
};\n\n", "c_dump");
cleanup:
btf__free(btf2);
btf__free(btf1);
}
static void test_split_fwd_resolve() {
struct btf *btf1, *btf2;
int err;
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
return;
btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */
btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_ptr(btf1, 4); /* [2] ptr to struct s1 */
btf__add_ptr(btf1, 5); /* [3] ptr to struct s2 */
btf__add_struct(btf1, "s1", 16); /* [4] struct s1 { */
btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */
btf__add_field(btf1, "f2", 3, 64, 0); /* struct s2 *f2; */
/* } */
btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
/* keep this not a part of type the graph to test btf_dedup_resolve_fwds */
btf__add_struct(btf1, "s3", 4); /* [6] struct s3 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=4",
"[3] PTR '(anon)' type_id=5",
"[4] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[6] STRUCT 's3' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0");
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [7] int */
btf__add_ptr(btf2, 11); /* [8] ptr to struct s1 */
btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [9] fwd for struct s2 */
btf__add_ptr(btf2, 9); /* [10] ptr to fwd struct s2 */
btf__add_struct(btf2, "s1", 16); /* [11] struct s1 { */
btf__add_field(btf2, "f1", 8, 0, 0); /* struct s1 *f1; */
btf__add_field(btf2, "f2", 10, 64, 0); /* struct s2 *f2; */
/* } */
btf__add_fwd(btf2, "s3", BTF_FWD_STRUCT); /* [12] fwd for struct s3 */
btf__add_ptr(btf2, 12); /* [13] ptr to struct s1 */
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=4",
"[3] PTR '(anon)' type_id=5",
"[4] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[6] STRUCT 's3' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[7] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[8] PTR '(anon)' type_id=11",
"[9] FWD 's2' fwd_kind=struct",
"[10] PTR '(anon)' type_id=9",
"[11] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=8 bits_offset=0\n"
"\t'f2' type_id=10 bits_offset=64",
"[12] FWD 's3' fwd_kind=struct",
"[13] PTR '(anon)' type_id=12");
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=4",
"[3] PTR '(anon)' type_id=5",
"[4] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[6] STRUCT 's3' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
"[7] PTR '(anon)' type_id=6");
cleanup:
btf__free(btf2);
btf__free(btf1);
}
static void test_split_struct_duped() {
struct btf *btf1, *btf2;
int err;
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
return;
btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */
btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_ptr(btf1, 5); /* [2] ptr to struct s1 */
btf__add_fwd(btf1, "s2", BTF_FWD_STRUCT); /* [3] fwd for struct s2 */
btf__add_ptr(btf1, 3); /* [4] ptr to fwd struct s2 */
btf__add_struct(btf1, "s1", 16); /* [5] struct s1 { */
btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */
btf__add_field(btf1, "f2", 4, 64, 0); /* struct s2 *f2; */
/* } */
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=5",
"[3] FWD 's2' fwd_kind=struct",
"[4] PTR '(anon)' type_id=3",
"[5] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=4 bits_offset=64");
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */
btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */
btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */
btf__add_ptr(btf2, 11); /* [9] ptr to struct s2 */
btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */
btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */
btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */
/* } */
btf__add_struct(btf2, "s2", 40); /* [11] struct s2 { */
btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */
btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */
btf__add_field(btf2, "f3", 6, 128, 0); /* int f3; */
btf__add_field(btf2, "f4", 10, 192, 0); /* struct s1 f4; */
/* } */
btf__add_ptr(btf2, 8); /* [12] ptr to fwd struct s2 */
btf__add_struct(btf2, "s3", 8); /* [13] struct s3 { */
btf__add_field(btf2, "f1", 12, 0, 0); /* struct s2 *f1; (fwd) */
/* } */
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=5",
"[3] FWD 's2' fwd_kind=struct",
"[4] PTR '(anon)' type_id=3",
"[5] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=4 bits_offset=64",
"[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[7] PTR '(anon)' type_id=10",
"[8] FWD 's2' fwd_kind=struct",
"[9] PTR '(anon)' type_id=11",
"[10] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=7 bits_offset=0\n"
"\t'f2' type_id=9 bits_offset=64",
"[11] STRUCT 's2' size=40 vlen=4\n"
"\t'f1' type_id=7 bits_offset=0\n"
"\t'f2' type_id=9 bits_offset=64\n"
"\t'f3' type_id=6 bits_offset=128\n"
"\t'f4' type_id=10 bits_offset=192",
"[12] PTR '(anon)' type_id=8",
"[13] STRUCT 's3' size=8 vlen=1\n"
"\t'f1' type_id=12 bits_offset=0");
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=5",
"[3] FWD 's2' fwd_kind=struct",
"[4] PTR '(anon)' type_id=3",
"[5] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=4 bits_offset=64",
"[6] PTR '(anon)' type_id=8",
"[7] PTR '(anon)' type_id=9",
"[8] STRUCT 's1' size=16 vlen=2\n"
"\t'f1' type_id=6 bits_offset=0\n"
"\t'f2' type_id=7 bits_offset=64",
"[9] STRUCT 's2' size=40 vlen=4\n"
"\t'f1' type_id=6 bits_offset=0\n"
"\t'f2' type_id=7 bits_offset=64\n"
"\t'f3' type_id=1 bits_offset=128\n"
"\t'f4' type_id=8 bits_offset=192",
"[10] STRUCT 's3' size=8 vlen=1\n"
"\t'f1' type_id=7 bits_offset=0");
cleanup:
btf__free(btf2);
btf__free(btf1);
}
static void btf_add_dup_struct_in_cu(struct btf *btf, int start_id)
{
#define ID(n) (start_id + n)
btf__set_pointer_size(btf, 8); /* enforce 64-bit arch */
btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_struct(btf, "s", 8); /* [2] struct s { */
btf__add_field(btf, "a", ID(3), 0, 0); /* struct anon a; */
btf__add_field(btf, "b", ID(4), 0, 0); /* struct anon b; */
/* } */
btf__add_struct(btf, "(anon)", 8); /* [3] struct anon { */
btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */
btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */
/* } */
btf__add_struct(btf, "(anon)", 8); /* [4] struct anon { */
btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */
btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */
/* } */
#undef ID
}
static void test_split_dup_struct_in_cu()
{
struct btf *btf1, *btf2 = NULL;
int err;
/* generate the base data.. */
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
return;
btf_add_dup_struct_in_cu(btf1, 0);
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] STRUCT 's' size=8 vlen=2\n"
"\t'a' type_id=3 bits_offset=0\n"
"\t'b' type_id=4 bits_offset=0",
"[3] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32",
"[4] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32");
/* ..dedup them... */
err = btf__dedup(btf1, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] STRUCT 's' size=8 vlen=2\n"
"\t'a' type_id=3 bits_offset=0\n"
"\t'b' type_id=3 bits_offset=0",
"[3] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32");
/* and add the same data on top of it */
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
btf_add_dup_struct_in_cu(btf2, 3);
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] STRUCT 's' size=8 vlen=2\n"
"\t'a' type_id=3 bits_offset=0\n"
"\t'b' type_id=3 bits_offset=0",
"[3] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32",
"[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[5] STRUCT 's' size=8 vlen=2\n"
"\t'a' type_id=6 bits_offset=0\n"
"\t'b' type_id=7 bits_offset=0",
"[6] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=4 bits_offset=0\n"
"\t'f2' type_id=4 bits_offset=32",
"[7] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=4 bits_offset=0\n"
"\t'f2' type_id=4 bits_offset=32");
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
/* after dedup it should match the original data */
VALIDATE_RAW_BTF(
btf2,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] STRUCT 's' size=8 vlen=2\n"
"\t'a' type_id=3 bits_offset=0\n"
"\t'b' type_id=3 bits_offset=0",
"[3] STRUCT '(anon)' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32");
cleanup:
btf__free(btf2);
btf__free(btf1);
}
void test_btf_dedup_split()
{
if (test__start_subtest("split_simple"))
test_split_simple();
if (test__start_subtest("split_struct_duped"))
test_split_struct_duped();
if (test__start_subtest("split_fwd_resolve"))
test_split_fwd_resolve();
if (test__start_subtest("split_dup_struct_in_cu"))
test_split_dup_struct_in_cu();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_subprogs_extable.skel.h"
void test_subprogs_extable(void)
{
const int read_sz = 456;
struct test_subprogs_extable *skel;
int err;
skel = test_subprogs_extable__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = test_subprogs_extable__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
ASSERT_OK(trigger_module_test_read(read_sz), "trigger_read");
ASSERT_NEQ(skel->bss->triggered, 0, "verify at least one program ran");
test_subprogs_extable__detach(skel);
cleanup:
test_subprogs_extable__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/subprogs_extable.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <linux/version.h>
#include "test_core_extern.skel.h"
static uint32_t get_kernel_version(void)
{
uint32_t major, minor, patch;
struct utsname info;
uname(&info);
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
return 0;
return KERNEL_VERSION(major, minor, patch);
}
#define CFG "CONFIG_BPF_SYSCALL=n\n"
static struct test_case {
const char *name;
const char *cfg;
bool fails;
struct test_core_extern__data data;
} test_cases[] = {
{ .name = "default search path", .data = { .bpf_syscall = true } },
{
.name = "custom values",
.cfg = "CONFIG_BPF_SYSCALL=n\n"
"CONFIG_TRISTATE=m\n"
"CONFIG_BOOL=y\n"
"CONFIG_CHAR=100\n"
"CONFIG_USHORT=30000\n"
"CONFIG_INT=123456\n"
"CONFIG_ULONG=0xDEADBEEFC0DE\n"
"CONFIG_STR=\"abracad\"\n"
"CONFIG_MISSING=0",
.data = {
.unkn_virt_val = 0,
.bpf_syscall = false,
.tristate_val = TRI_MODULE,
.bool_val = true,
.char_val = 100,
.ushort_val = 30000,
.int_val = 123456,
.ulong_val = 0xDEADBEEFC0DE,
.str_val = "abracad",
},
},
/* TRISTATE */
{ .name = "tristate (y)", .cfg = CFG"CONFIG_TRISTATE=y\n",
.data = { .tristate_val = TRI_YES } },
{ .name = "tristate (n)", .cfg = CFG"CONFIG_TRISTATE=n\n",
.data = { .tristate_val = TRI_NO } },
{ .name = "tristate (m)", .cfg = CFG"CONFIG_TRISTATE=m\n",
.data = { .tristate_val = TRI_MODULE } },
{ .name = "tristate (int)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=1" },
{ .name = "tristate (bad)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=M" },
/* BOOL */
{ .name = "bool (y)", .cfg = CFG"CONFIG_BOOL=y\n",
.data = { .bool_val = true } },
{ .name = "bool (n)", .cfg = CFG"CONFIG_BOOL=n\n",
.data = { .bool_val = false } },
{ .name = "bool (tristate)", .fails = 1, .cfg = CFG"CONFIG_BOOL=m" },
{ .name = "bool (int)", .fails = 1, .cfg = CFG"CONFIG_BOOL=1" },
/* CHAR */
{ .name = "char (tristate)", .cfg = CFG"CONFIG_CHAR=m\n",
.data = { .char_val = 'm' } },
{ .name = "char (bad)", .fails = 1, .cfg = CFG"CONFIG_CHAR=q\n" },
{ .name = "char (empty)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\n" },
{ .name = "char (str)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\"y\"\n" },
/* STRING */
{ .name = "str (empty)", .cfg = CFG"CONFIG_STR=\"\"\n",
.data = { .str_val = "\0\0\0\0\0\0\0" } },
{ .name = "str (padded)", .cfg = CFG"CONFIG_STR=\"abra\"\n",
.data = { .str_val = "abra\0\0\0" } },
{ .name = "str (too long)", .cfg = CFG"CONFIG_STR=\"abracada\"\n",
.data = { .str_val = "abracad" } },
{ .name = "str (no value)", .fails = 1, .cfg = CFG"CONFIG_STR=\n" },
{ .name = "str (bad value)", .fails = 1, .cfg = CFG"CONFIG_STR=bla\n" },
/* INTEGERS */
{
.name = "integer forms",
.cfg = CFG
"CONFIG_CHAR=0xA\n"
"CONFIG_USHORT=0462\n"
"CONFIG_INT=-100\n"
"CONFIG_ULONG=+1000000000000",
.data = {
.char_val = 0xA,
.ushort_val = 0462,
.int_val = -100,
.ulong_val = 1000000000000,
},
},
{ .name = "int (bad)", .fails = 1, .cfg = CFG"CONFIG_INT=abc" },
{ .name = "int (str)", .fails = 1, .cfg = CFG"CONFIG_INT=\"abc\"" },
{ .name = "int (empty)", .fails = 1, .cfg = CFG"CONFIG_INT=" },
{ .name = "int (mixed)", .fails = 1, .cfg = CFG"CONFIG_INT=123abc" },
{ .name = "int (max)", .cfg = CFG"CONFIG_INT=2147483647",
.data = { .int_val = 2147483647 } },
{ .name = "int (min)", .cfg = CFG"CONFIG_INT=-2147483648",
.data = { .int_val = -2147483648 } },
{ .name = "int (max+1)", .fails = 1, .cfg = CFG"CONFIG_INT=2147483648" },
{ .name = "int (min-1)", .fails = 1, .cfg = CFG"CONFIG_INT=-2147483649" },
{ .name = "ushort (max)", .cfg = CFG"CONFIG_USHORT=65535",
.data = { .ushort_val = 65535 } },
{ .name = "ushort (min)", .cfg = CFG"CONFIG_USHORT=0",
.data = { .ushort_val = 0 } },
{ .name = "ushort (max+1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=65536" },
{ .name = "ushort (min-1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=-1" },
{ .name = "u64 (max)", .cfg = CFG"CONFIG_ULONG=0xffffffffffffffff",
.data = { .ulong_val = 0xffffffffffffffff } },
{ .name = "u64 (min)", .cfg = CFG"CONFIG_ULONG=0",
.data = { .ulong_val = 0 } },
{ .name = "u64 (max+1)", .fails = 1, .cfg = CFG"CONFIG_ULONG=0x10000000000000000" },
};
void test_core_extern(void)
{
const uint32_t kern_ver = get_kernel_version();
int err, i, j;
struct test_core_extern *skel = NULL;
uint64_t *got, *exp;
int n = sizeof(*skel->data) / sizeof(uint64_t);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
struct test_case *t = &test_cases[i];
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.kconfig = t->cfg,
);
if (!test__start_subtest(t->name))
continue;
skel = test_core_extern__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_core_extern__load(skel);
if (t->fails) {
ASSERT_ERR(err, "skel_load_should_fail");
goto cleanup;
} else if (!ASSERT_OK(err, "skel_load")) {
goto cleanup;
}
err = test_core_extern__attach(skel);
if (!ASSERT_OK(err, "attach_raw_tp"))
goto cleanup;
usleep(1);
t->data.kern_ver = kern_ver;
t->data.missing_val = 0xDEADC0DE;
got = (uint64_t *)skel->data;
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
ASSERT_EQ(got[j], exp[j], "result");
}
cleanup:
test_core_extern__destroy(skel);
skel = NULL;
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_extern.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "connect4_dropper.skel.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
static int run_test(int cgroup_fd, int server_fd, bool classid)
{
struct network_helper_opts opts = {
.must_fail = true,
};
struct connect4_dropper *skel;
int fd, err = 0;
skel = connect4_dropper__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return -1;
skel->links.connect_v4_dropper =
bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
err = -1;
goto out;
}
if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
err = -1;
goto out;
}
fd = connect_to_fd_opts(server_fd, &opts);
if (fd < 0)
err = -1;
else
close(fd);
out:
connect4_dropper__destroy(skel);
return err;
}
void test_cgroup_v1v2(void)
{
struct network_helper_opts opts = {};
int server_fd, client_fd, cgroup_fd;
static const int port = 60120;
/* Step 1: Check base connectivity works without any BPF. */
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd"))
return;
client_fd = connect_to_fd_opts(server_fd, &opts);
if (!ASSERT_GE(client_fd, 0, "client_fd")) {
close(server_fd);
return;
}
close(client_fd);
close(server_fd);
/* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
cgroup_fd = test__join_cgroup("/connect_dropper");
if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd")) {
close(cgroup_fd);
return;
}
ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
setup_classid_environment();
set_classid(42);
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
cleanup_classid_environment();
close(server_fd);
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Carlos Neira [email protected] */
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_ns_current_pid_tgid.skel.h"
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sched.h>
#include <sys/wait.h>
#include <sys/mount.h>
#include <sys/fcntl.h>
#define STACK_SIZE (1024 * 1024)
static char child_stack[STACK_SIZE];
static int test_current_pid_tgid(void *args)
{
struct test_ns_current_pid_tgid__bss *bss;
struct test_ns_current_pid_tgid *skel;
int err = -1, duration = 0;
pid_t tgid, pid;
struct stat st;
skel = test_ns_current_pid_tgid__open_and_load();
if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n"))
goto cleanup;
pid = syscall(SYS_gettid);
tgid = getpid();
err = stat("/proc/self/ns/pid", &st);
if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err))
goto cleanup;
bss = skel->bss;
bss->dev = st.st_dev;
bss->ino = st.st_ino;
bss->user_pid = 0;
bss->user_tgid = 0;
err = test_ns_current_pid_tgid__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
ASSERT_EQ(bss->user_pid, pid, "pid");
ASSERT_EQ(bss->user_tgid, tgid, "tgid");
err = 0;
cleanup:
test_ns_current_pid_tgid__destroy(skel);
return err;
}
static void test_ns_current_pid_tgid_new_ns(void)
{
int wstatus, duration = 0;
pid_t cpid;
/* Create a process in a new namespace, this process
* will be the init process of this new namespace hence will be pid 1.
*/
cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE,
CLONE_NEWPID | SIGCHLD, NULL);
if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
return;
if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
return;
if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed"))
return;
}
/* TODO: use a different tracepoint */
void serial_test_ns_current_pid_tgid(void)
{
if (test__start_subtest("ns_current_pid_tgid_root_ns"))
test_current_pid_tgid(NULL);
if (test__start_subtest("ns_current_pid_tgid_new_ns"))
test_ns_current_pid_tgid_new_ns();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/err.h>
#include <netinet/tcp.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "bpf_dctcp.skel.h"
#include "bpf_cubic.skel.h"
#include "bpf_tcp_nogpl.skel.h"
#include "tcp_ca_update.skel.h"
#include "bpf_dctcp_release.skel.h"
#include "tcp_ca_write_sk_pacing.skel.h"
#include "tcp_ca_incompl_cong_ops.skel.h"
#include "tcp_ca_unsupp_cong_op.skel.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
static const unsigned int total_bytes = 10 * 1024 * 1024;
static int expected_stg = 0xeB9F;
static int stop, duration;
static int settcpca(int fd, const char *tcp_ca)
{
int err;
err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
errno))
return -1;
return 0;
}
static void *server(void *arg)
{
int lfd = (int)(long)arg, err = 0, fd;
ssize_t nr_sent = 0, bytes = 0;
char batch[1500];
fd = accept(lfd, NULL, NULL);
while (fd == -1) {
if (errno == EINTR)
continue;
err = -errno;
goto done;
}
if (settimeo(fd, 0)) {
err = -errno;
goto done;
}
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_sent = send(fd, &batch,
MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_sent == -1 && errno == EINTR)
continue;
if (nr_sent == -1) {
err = -errno;
break;
}
bytes += nr_sent;
}
CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
bytes, total_bytes, nr_sent, errno);
done:
if (fd >= 0)
close(fd);
if (err) {
WRITE_ONCE(stop, 1);
return ERR_PTR(err);
}
return NULL;
}
static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
{
struct sockaddr_in6 sa6 = {};
ssize_t nr_recv = 0, bytes = 0;
int lfd = -1, fd = -1;
pthread_t srv_thread;
socklen_t addrlen = sizeof(sa6);
void *thread_ret;
char batch[1500];
int err;
WRITE_ONCE(stop, 0);
lfd = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
return;
fd = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
close(lfd);
return;
}
if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) ||
settimeo(lfd, 0) || settimeo(fd, 0))
goto done;
/* bind, listen and start server thread to accept */
sa6.sin6_family = AF_INET6;
sa6.sin6_addr = in6addr_loopback;
err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "bind", "errno:%d\n", errno))
goto done;
err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
goto done;
err = listen(lfd, 1);
if (CHECK(err == -1, "listen", "errno:%d\n", errno))
goto done;
if (sk_stg_map) {
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
&expected_stg, BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
goto done;
}
/* connect to server */
err = connect(fd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "connect", "errno:%d\n", errno))
goto done;
if (sk_stg_map) {
int tmp_stg;
err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
&tmp_stg);
if (CHECK(!err || errno != ENOENT,
"bpf_map_lookup_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
goto done;
}
err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
goto done;
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_recv = recv(fd, &batch,
MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_recv == -1 && errno == EINTR)
continue;
if (nr_recv == -1)
break;
bytes += nr_recv;
}
CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
bytes, total_bytes, nr_recv, errno);
WRITE_ONCE(stop, 1);
pthread_join(srv_thread, &thread_ret);
CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
PTR_ERR(thread_ret));
done:
close(lfd);
close(fd);
}
static void test_cubic(void)
{
struct bpf_cubic *cubic_skel;
struct bpf_link *link;
cubic_skel = bpf_cubic__open_and_load();
if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
return;
link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
bpf_cubic__destroy(cubic_skel);
return;
}
do_test("bpf_cubic", NULL);
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
}
static void test_dctcp(void)
{
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link;
dctcp_skel = bpf_dctcp__open_and_load();
if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
return;
link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
bpf_dctcp__destroy(dctcp_skel);
return;
}
do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
CHECK(dctcp_skel->bss->stg_result != expected_stg,
"Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
dctcp_skel->bss->stg_result, expected_stg);
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
}
static char *err_str;
static bool found;
static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
const char *prog_name, *log_buf;
if (level != LIBBPF_WARN ||
!strstr(format, "-- BEGIN PROG LOAD LOG --")) {
vprintf(format, args);
return 0;
}
prog_name = va_arg(args, char *);
log_buf = va_arg(args, char *);
if (!log_buf)
goto out;
if (err_str && strstr(log_buf, err_str) != NULL)
found = true;
out:
printf(format, prog_name, log_buf);
return 0;
}
static void test_invalid_license(void)
{
libbpf_print_fn_t old_print_fn;
struct bpf_tcp_nogpl *skel;
err_str = "struct ops programs must have a GPL compatible license";
found = false;
old_print_fn = libbpf_set_print(libbpf_debug_print);
skel = bpf_tcp_nogpl__open_and_load();
ASSERT_NULL(skel, "bpf_tcp_nogpl");
ASSERT_EQ(found, true, "expected_err_msg");
bpf_tcp_nogpl__destroy(skel);
libbpf_set_print(old_print_fn);
}
static void test_dctcp_fallback(void)
{
int err, lfd = -1, cli_fd = -1, srv_fd = -1;
struct network_helper_opts opts = {
.cc = "cubic",
};
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link = NULL;
char srv_cc[16];
socklen_t cc_len = sizeof(srv_cc);
dctcp_skel = bpf_dctcp__open();
if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
return;
strcpy(dctcp_skel->rodata->fallback, "cubic");
if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
goto done;
link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
if (!ASSERT_OK_PTR(link, "dctcp link"))
goto done;
lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (!ASSERT_GE(lfd, 0, "lfd") ||
!ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
goto done;
cli_fd = connect_to_fd_opts(lfd, &opts);
if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
goto done;
srv_fd = accept(lfd, NULL, 0);
if (!ASSERT_GE(srv_fd, 0, "srv_fd"))
goto done;
ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res");
ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res");
/* All setsockopt(TCP_CONGESTION) in the recurred
* bpf_dctcp->init() should fail with -EBUSY.
*/
ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt");
err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len);
if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)"))
goto done;
ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc");
done:
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
if (lfd != -1)
close(lfd);
if (srv_fd != -1)
close(srv_fd);
if (cli_fd != -1)
close(cli_fd);
}
static void test_rel_setsockopt(void)
{
struct bpf_dctcp_release *rel_skel;
libbpf_print_fn_t old_print_fn;
err_str = "unknown func bpf_setsockopt";
found = false;
old_print_fn = libbpf_set_print(libbpf_debug_print);
rel_skel = bpf_dctcp_release__open_and_load();
libbpf_set_print(old_print_fn);
ASSERT_ERR_PTR(rel_skel, "rel_skel");
ASSERT_TRUE(found, "expected_err_msg");
bpf_dctcp_release__destroy(rel_skel);
}
static void test_write_sk_pacing(void)
{
struct tcp_ca_write_sk_pacing *skel;
struct bpf_link *link;
skel = tcp_ca_write_sk_pacing__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
ASSERT_OK_PTR(link, "attach_struct_ops");
bpf_link__destroy(link);
tcp_ca_write_sk_pacing__destroy(skel);
}
static void test_incompl_cong_ops(void)
{
struct tcp_ca_incompl_cong_ops *skel;
struct bpf_link *link;
skel = tcp_ca_incompl_cong_ops__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
/* That cong_avoid() and cong_control() are missing is only reported at
* this point:
*/
link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
ASSERT_ERR_PTR(link, "attach_struct_ops");
bpf_link__destroy(link);
tcp_ca_incompl_cong_ops__destroy(skel);
}
static void test_unsupp_cong_op(void)
{
libbpf_print_fn_t old_print_fn;
struct tcp_ca_unsupp_cong_op *skel;
err_str = "attach to unsupported member get_info";
found = false;
old_print_fn = libbpf_set_print(libbpf_debug_print);
skel = tcp_ca_unsupp_cong_op__open_and_load();
ASSERT_NULL(skel, "open_and_load");
ASSERT_EQ(found, true, "expected_err_msg");
tcp_ca_unsupp_cong_op__destroy(skel);
libbpf_set_print(old_print_fn);
}
static void test_update_ca(void)
{
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
int err;
skel = tcp_ca_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open"))
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
do_test("tcp_ca_update", NULL);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_update_2);
ASSERT_OK(err, "update_map");
do_test("tcp_ca_update", NULL);
ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
bpf_link__destroy(link);
tcp_ca_update__destroy(skel);
}
static void test_update_wrong(void)
{
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
int err;
skel = tcp_ca_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open"))
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
do_test("tcp_ca_update", NULL);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_wrong);
ASSERT_ERR(err, "update_map");
do_test("tcp_ca_update", NULL);
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
bpf_link__destroy(link);
tcp_ca_update__destroy(skel);
}
static void test_mixed_links(void)
{
struct tcp_ca_update *skel;
struct bpf_link *link, *link_nl;
int err;
skel = tcp_ca_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open"))
return;
link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
do_test("tcp_ca_update", NULL);
ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_no_link);
ASSERT_ERR(err, "update_map");
bpf_link__destroy(link);
bpf_link__destroy(link_nl);
tcp_ca_update__destroy(skel);
}
static void test_multi_links(void)
{
struct tcp_ca_update *skel;
struct bpf_link *link;
skel = tcp_ca_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open"))
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops_1st");
bpf_link__destroy(link);
/* A map should be able to be used to create links multiple
* times.
*/
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
bpf_link__destroy(link);
tcp_ca_update__destroy(skel);
}
static void test_link_replace(void)
{
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
struct tcp_ca_update *skel;
struct bpf_link *link;
int err;
skel = tcp_ca_update__open_and_load();
if (!ASSERT_OK_PTR(skel, "open"))
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops_1st");
bpf_link__destroy(link);
link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
*
* With BPF_F_REPLACE, the link should be updated only if the
* old map fd given here matches the map backing the link.
*/
opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1);
opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bpf_link__fd(link),
bpf_map__fd(skel->maps.ca_update_1),
&opts);
ASSERT_ERR(err, "bpf_link_update_fail");
/* BPF_F_REPLACE with a correct old map Fd. It should success! */
opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2);
err = bpf_link_update(bpf_link__fd(link),
bpf_map__fd(skel->maps.ca_update_1),
&opts);
ASSERT_OK(err, "bpf_link_update_success");
bpf_link__destroy(link);
tcp_ca_update__destroy(skel);
}
void test_bpf_tcp_ca(void)
{
if (test__start_subtest("dctcp"))
test_dctcp();
if (test__start_subtest("cubic"))
test_cubic();
if (test__start_subtest("invalid_license"))
test_invalid_license();
if (test__start_subtest("dctcp_fallback"))
test_dctcp_fallback();
if (test__start_subtest("rel_setsockopt"))
test_rel_setsockopt();
if (test__start_subtest("write_sk_pacing"))
test_write_sk_pacing();
if (test__start_subtest("incompl_cong_ops"))
test_incompl_cong_ops();
if (test__start_subtest("unsupp_cong_op"))
test_unsupp_cong_op();
if (test__start_subtest("update_ca"))
test_update_ca();
if (test__start_subtest("update_wrong"))
test_update_wrong();
if (test__start_subtest("mixed_links"))
test_mixed_links();
if (test__start_subtest("multi_links"))
test_multi_links();
if (test__start_subtest("link_replace"))
test_link_replace();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <unistd.h>
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
#include "test_bpf_cookie.skel.h"
#include "kprobe_multi.skel.h"
#include "uprobe_multi.skel.h"
/* uprobe attach point */
static noinline void trigger_func(void)
{
asm volatile ("");
}
static void kprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL;
struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
/* attach two kprobes */
opts.bpf_cookie = 0x1;
opts.retprobe = false;
link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
opts.bpf_cookie = 0x2;
opts.retprobe = false;
link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* attach two kretprobes */
opts.bpf_cookie = 0x10;
opts.retprobe = true;
retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(retlink1, "retlink1"))
goto cleanup;
opts.bpf_cookie = 0x20;
opts.retprobe = true;
retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(retlink2, "retlink2"))
goto cleanup;
/* trigger kprobe && kretprobe */
usleep(1);
ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(retlink1);
bpf_link__destroy(retlink2);
}
static void kprobe_multi_test_run(struct kprobe_multi *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
}
static void kprobe_multi_link_api_subtest(void)
{
int prog_fd, link1_fd = -1, link2_fd = -1;
struct kprobe_multi *skel = NULL;
LIBBPF_OPTS(bpf_link_create_opts, opts);
unsigned long long addrs[8];
__u64 cookies[8];
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
#define GET_ADDR(__sym, __addr) ({ \
__addr = ksym_get_addr(__sym); \
if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \
goto cleanup; \
})
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test3", addrs[1]);
GET_ADDR("bpf_fentry_test4", addrs[2]);
GET_ADDR("bpf_fentry_test5", addrs[3]);
GET_ADDR("bpf_fentry_test6", addrs[4]);
GET_ADDR("bpf_fentry_test7", addrs[5]);
GET_ADDR("bpf_fentry_test2", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
#undef GET_ADDR
cookies[0] = 1; /* bpf_fentry_test1 */
cookies[1] = 2; /* bpf_fentry_test3 */
cookies[2] = 3; /* bpf_fentry_test4 */
cookies[3] = 4; /* bpf_fentry_test5 */
cookies[4] = 5; /* bpf_fentry_test6 */
cookies[5] = 6; /* bpf_fentry_test7 */
cookies[6] = 7; /* bpf_fentry_test2 */
cookies[7] = 8; /* bpf_fentry_test8 */
opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
opts.kprobe_multi.cookies = (const __u64 *) &cookies;
prog_fd = bpf_program__fd(skel->progs.test_kprobe);
link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
cookies[0] = 8; /* bpf_fentry_test1 */
cookies[1] = 7; /* bpf_fentry_test3 */
cookies[2] = 6; /* bpf_fentry_test4 */
cookies[3] = 5; /* bpf_fentry_test5 */
cookies[4] = 4; /* bpf_fentry_test6 */
cookies[5] = 3; /* bpf_fentry_test7 */
cookies[6] = 2; /* bpf_fentry_test2 */
cookies[7] = 1; /* bpf_fentry_test8 */
opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
goto cleanup;
kprobe_multi_test_run(skel);
cleanup:
close(link1_fd);
close(link2_fd);
kprobe_multi__destroy(skel);
}
static void kprobe_multi_attach_api_subtest(void)
{
struct bpf_link *link1 = NULL, *link2 = NULL;
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct kprobe_multi *skel = NULL;
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test2",
"bpf_fentry_test8",
};
__u64 cookies[8];
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
cookies[0] = 1; /* bpf_fentry_test1 */
cookies[1] = 2; /* bpf_fentry_test3 */
cookies[2] = 3; /* bpf_fentry_test4 */
cookies[3] = 4; /* bpf_fentry_test5 */
cookies[4] = 5; /* bpf_fentry_test6 */
cookies[5] = 6; /* bpf_fentry_test7 */
cookies[6] = 7; /* bpf_fentry_test2 */
cookies[7] = 8; /* bpf_fentry_test8 */
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = cookies;
link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
NULL, &opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
cookies[0] = 8; /* bpf_fentry_test1 */
cookies[1] = 7; /* bpf_fentry_test3 */
cookies[2] = 6; /* bpf_fentry_test4 */
cookies[3] = 5; /* bpf_fentry_test5 */
cookies[4] = 4; /* bpf_fentry_test6 */
cookies[5] = 3; /* bpf_fentry_test7 */
cookies[6] = 2; /* bpf_fentry_test2 */
cookies[7] = 1; /* bpf_fentry_test8 */
opts.retprobe = true;
link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
NULL, &opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
kprobe_multi_test_run(skel);
cleanup:
bpf_link__destroy(link2);
bpf_link__destroy(link1);
kprobe_multi__destroy(skel);
}
/* defined in prog_tests/uprobe_multi_test.c */
void uprobe_multi_func_1(void);
void uprobe_multi_func_2(void);
void uprobe_multi_func_3(void);
static void uprobe_multi_test_run(struct uprobe_multi *skel)
{
skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result");
ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result");
ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result");
ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result");
}
static void uprobe_multi_attach_api_subtest(void)
{
struct bpf_link *link1 = NULL, *link2 = NULL;
struct uprobe_multi *skel = NULL;
LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
const char *syms[3] = {
"uprobe_multi_func_1",
"uprobe_multi_func_2",
"uprobe_multi_func_3",
};
__u64 cookies[3];
cookies[0] = 3; /* uprobe_multi_func_1 */
cookies[1] = 1; /* uprobe_multi_func_2 */
cookies[2] = 2; /* uprobe_multi_func_3 */
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = &cookies[0];
skel = uprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi"))
goto cleanup;
link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
"/proc/self/exe", NULL, &opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi"))
goto cleanup;
cookies[0] = 2; /* uprobe_multi_func_1 */
cookies[1] = 3; /* uprobe_multi_func_2 */
cookies[2] = 1; /* uprobe_multi_func_3 */
opts.retprobe = true;
link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1,
"/proc/self/exe", NULL, &opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe"))
goto cleanup;
uprobe_multi_test_run(skel);
cleanup:
bpf_link__destroy(link2);
bpf_link__destroy(link1);
uprobe_multi__destroy(skel);
}
static void uprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL;
struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
ssize_t uprobe_offset;
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
goto cleanup;
/* attach two uprobes */
opts.bpf_cookie = 0x100;
opts.retprobe = false;
link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
opts.bpf_cookie = 0x200;
opts.retprobe = false;
link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* attach two uretprobes */
opts.bpf_cookie = 0x1000;
opts.retprobe = true;
retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(retlink1, "retlink1"))
goto cleanup;
opts.bpf_cookie = 0x2000;
opts.retprobe = true;
retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(retlink2, "retlink2"))
goto cleanup;
/* trigger uprobe && uretprobe */
trigger_func();
ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(retlink1);
bpf_link__destroy(retlink2);
}
static void tp_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
/* attach first tp prog */
opts.bpf_cookie = 0x10000;
link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
/* attach second tp prog */
opts.bpf_cookie = 0x20000;
link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* trigger tracepoints */
usleep(1);
ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
/* now we detach first prog and will attach third one, which causes
* two internal calls to bpf_prog_array_copy(), shuffling
* bpf_prog_array_items around. We test here that we don't lose track
* of associated bpf_cookies.
*/
bpf_link__destroy(link1);
link1 = NULL;
kern_sync_rcu();
skel->bss->tp_res = 0;
/* attach third tp prog */
opts.bpf_cookie = 0x40000;
link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link3, "link3"))
goto cleanup;
/* trigger tracepoints */
usleep(1);
ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(link3);
}
static void burn_cpu(void)
{
volatile int j = 0;
cpu_set_t cpu_set;
int i, err;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
ASSERT_OK(err, "set_thread_affinity");
/* spin the loop for a while (random high number) */
for (i = 0; i < 1000000; ++i)
++j;
}
static void pe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
struct bpf_link *link = NULL;
struct perf_event_attr attr;
int pfd = -1;
/* create perf event */
memset(&attr, 0, sizeof(attr));
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
opts.bpf_cookie = 0x100000;
link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
if (!ASSERT_OK_PTR(link, "link1"))
goto cleanup;
burn_cpu(); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
/* prevent bpf_link__destroy() closing pfd itself */
bpf_link__disconnect(link);
/* close BPF link's FD explicitly */
close(bpf_link__fd(link));
/* free up memory used by struct bpf_link */
bpf_link__destroy(link);
link = NULL;
kern_sync_rcu();
skel->bss->pe_res = 0;
opts.bpf_cookie = 0x200000;
link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
if (!ASSERT_OK_PTR(link, "link2"))
goto cleanup;
burn_cpu(); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
cleanup:
close(pfd);
bpf_link__destroy(link);
}
static void tracing_subtest(struct test_bpf_cookie *skel)
{
__u64 cookie;
int prog_fd;
int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
LIBBPF_OPTS(bpf_test_run_opts, opts);
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
skel->bss->fentry_res = 0;
skel->bss->fexit_res = 0;
cookie = 0x10000000000000L;
prog_fd = bpf_program__fd(skel->progs.fentry_test1);
link_opts.tracing.cookie = cookie;
fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts);
if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
goto cleanup;
cookie = 0x20000000000000L;
prog_fd = bpf_program__fd(skel->progs.fexit_test1);
link_opts.tracing.cookie = cookie;
fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts);
if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create"))
goto cleanup;
cookie = 0x30000000000000L;
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
link_opts.tracing.cookie = cookie;
fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts);
if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.fentry_test1);
bpf_prog_test_run_opts(prog_fd, &opts);
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res");
ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res");
ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res");
cleanup:
if (fentry_fd >= 0)
close(fentry_fd);
if (fexit_fd >= 0)
close(fexit_fd);
if (fmod_ret_fd >= 0)
close(fmod_ret_fd);
}
int stack_mprotect(void);
static void lsm_subtest(struct test_bpf_cookie *skel)
{
__u64 cookie;
int prog_fd;
int lsm_fd = -1;
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
int err;
skel->bss->lsm_res = 0;
cookie = 0x90000000000090L;
prog_fd = bpf_program__fd(skel->progs.test_int_hook);
link_opts.tracing.cookie = cookie;
lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts);
if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
goto cleanup;
err = stack_mprotect();
if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
goto cleanup;
usleep(1);
ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res");
cleanup:
if (lsm_fd >= 0)
close(lsm_fd);
}
void test_bpf_cookie(void)
{
struct test_bpf_cookie *skel;
skel = test_bpf_cookie__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->my_tid = syscall(SYS_gettid);
if (test__start_subtest("kprobe"))
kprobe_subtest(skel);
if (test__start_subtest("multi_kprobe_link_api"))
kprobe_multi_link_api_subtest();
if (test__start_subtest("multi_kprobe_attach_api"))
kprobe_multi_attach_api_subtest();
if (test__start_subtest("uprobe"))
uprobe_subtest(skel);
if (test__start_subtest("multi_uprobe_attach_api"))
uprobe_multi_attach_api_subtest();
if (test__start_subtest("tracepoint"))
tp_subtest(skel);
if (test__start_subtest("perf_event"))
pe_subtest(skel);
if (test__start_subtest("trampoline"))
tracing_subtest(skel);
if (test__start_subtest("lsm"))
lsm_subtest(skel);
test_bpf_cookie__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_cookie.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(void)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
void serial_test_cgroup_attach_autodetach(void)
{
__u32 duration = 0, prog_cnt = 4, attach_flags;
int allow_prog[2] = {-1};
__u32 prog_ids[2] = {0};
void *ptr = NULL;
int cg = 0, i;
int attempts;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
allow_prog[i] = prog_load();
if (CHECK(allow_prog[i] < 0, "prog_load",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
}
if (CHECK_FAIL(setup_cgroup_environment()))
goto err;
/* create a cgroup, attach two programs and remember their ids */
cg = create_and_get_cgroup("/cg_autodetach");
if (CHECK_FAIL(cg < 0))
goto err;
if (CHECK_FAIL(join_cgroup("/cg_autodetach")))
goto err;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (CHECK(bpf_prog_attach(allow_prog[i], cg,
BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog_attach", "prog[%d], errno=%d\n", i, errno))
goto err;
/* make sure that programs are attached and run some traffic */
if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags,
prog_ids, &prog_cnt),
"prog_query", "errno=%d\n", errno))
goto err;
if (CHECK_FAIL(system(PING_CMD)))
goto err;
/* allocate some memory (4Mb) to pin the original cgroup */
ptr = malloc(4 * (1 << 20));
if (CHECK_FAIL(!ptr))
goto err;
/* close programs and cgroup fd */
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
close(allow_prog[i]);
allow_prog[i] = -1;
}
close(cg);
cg = 0;
/* leave the cgroup and remove it. don't detach programs */
cleanup_cgroup_environment();
/* wait for the asynchronous auto-detachment.
* wait for no more than 5 sec and give up.
*/
for (i = 0; i < ARRAY_SIZE(prog_ids); i++) {
for (attempts = 5; attempts >= 0; attempts--) {
int fd = bpf_prog_get_fd_by_id(prog_ids[i]);
if (fd < 0)
break;
/* don't leave the fd open */
close(fd);
if (CHECK_FAIL(!attempts))
goto err;
sleep(1);
}
}
err:
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (allow_prog[i] >= 0)
close(allow_prog[i]);
if (cg)
close(cg);
free(ptr);
cleanup_cgroup_environment();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "kfunc_call_fail.skel.h"
#include "kfunc_call_test.skel.h"
#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
#include "kfunc_call_test_subprog.lskel.h"
#include "kfunc_call_destructive.skel.h"
#include "cap_helpers.h"
static size_t log_buf_sz = 1048576; /* 1 MB */
static char obj_log_buf[1048576];
enum kfunc_test_type {
tc_test = 0,
syscall_test,
syscall_null_ctx_test,
};
struct kfunc_test_params {
const char *prog_name;
unsigned long lskel_prog_desc_offset;
int retval;
enum kfunc_test_type test_type;
const char *expected_err_msg;
};
#define __BPF_TEST_SUCCESS(name, __retval, type) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = NULL, \
}
#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
{ \
.prog_name = #name, \
.lskel_prog_desc_offset = 0 /* unused when test is failing */, \
.retval = __retval, \
.test_type = type, \
.expected_err_msg = error_msg, \
}
#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test)
#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test)
#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg)
#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
__BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
static struct kfunc_test_params kfunc_tests[] = {
/* failure cases:
* if retval is 0 -> the program will fail to load and the error message is an error
* if retval is not 0 -> the program can be loaded but running it will gives the
* provided return value. The error message is thus the one
* from a successful load
*/
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"),
TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"),
TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
/* success cases */
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
TC_TEST(kfunc_call_test4, -1234),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
TC_TEST(kfunc_call_test_static_unused_arg, 0),
};
struct syscall_test_args {
__u8 data[16];
size_t size;
};
static void verify_success(struct kfunc_test_params *param)
{
struct kfunc_call_test_lskel *lskel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_prog_desc *lskel_prog;
struct kfunc_call_test *skel;
struct bpf_program *prog;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
}
/* first test with normal libbpf */
skel = kfunc_call_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
if (!ASSERT_EQ(topts.retval, param->retval, "retval"))
goto cleanup;
/* second test with light skeletons */
lskel = kfunc_call_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(lskel, "lskel"))
goto cleanup;
lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset);
prog_fd = lskel_prog->prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, param->prog_name))
goto cleanup;
ASSERT_EQ(topts.retval, param->retval, "retval");
cleanup:
kfunc_call_test__destroy(skel);
if (lskel)
kfunc_call_test_lskel__destroy(lskel);
}
static void verify_fail(struct kfunc_test_params *param)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_program *prog;
struct kfunc_call_fail *skel;
int prog_fd, err;
struct syscall_test_args args = {
.size = 10,
};
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 1;
switch (param->test_type) {
case syscall_test:
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
/* fallthrough */
case syscall_null_ctx_test:
break;
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
topts.repeat = 1;
break;
}
skel = kfunc_call_fail__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
bpf_program__set_autoload(prog, true);
err = kfunc_call_fail__load(skel);
if (!param->retval) {
/* the verifier is supposed to complain and refuses to load */
if (!ASSERT_ERR(err, "unexpected load success"))
goto out_err;
} else {
/* the program is loaded but must dynamically fail */
if (!ASSERT_OK(err, "unexpected load error"))
goto out_err;
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_EQ(err, param->retval, param->prog_name))
goto out_err;
}
out_err:
if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
}
cleanup:
kfunc_call_fail__destroy(skel);
}
static void test_main(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) {
if (!test__start_subtest(kfunc_tests[i].prog_name))
continue;
if (!kfunc_tests[i].expected_err_msg)
verify_success(&kfunc_tests[i]);
else
verify_fail(&kfunc_tests[i]);
}
}
static void test_subprog(void)
{
struct kfunc_call_test_subprog *skel;
int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = kfunc_call_test_subprog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(topts.retval, 10, "test1-retval");
ASSERT_NEQ(skel->data->active_res, -1, "active_res");
ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res");
kfunc_call_test_subprog__destroy(skel);
}
static void test_subprog_lskel(void)
{
struct kfunc_call_test_subprog_lskel *skel;
int prog_fd, err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
skel = kfunc_call_test_subprog_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(topts.retval, 10, "test1-retval");
ASSERT_NEQ(skel->data->active_res, -1, "active_res");
ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res");
kfunc_call_test_subprog_lskel__destroy(skel);
}
static int test_destructive_open_and_load(void)
{
struct kfunc_call_destructive *skel;
int err;
skel = kfunc_call_destructive__open();
if (!ASSERT_OK_PTR(skel, "prog_open"))
return -1;
err = kfunc_call_destructive__load(skel);
kfunc_call_destructive__destroy(skel);
return err;
}
static void test_destructive(void)
{
__u64 save_caps = 0;
ASSERT_OK(test_destructive_open_and_load(), "successful_load");
if (!ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "drop_caps"))
return;
ASSERT_EQ(test_destructive_open_and_load(), -13, "no_caps_failure");
cap_enable_effective(save_caps, NULL);
}
void test_kfunc_call(void)
{
test_main();
if (test__start_subtest("subprog"))
test_subprog();
if (test__start_subtest("subprog_lskel"))
test_subprog_lskel();
if (test__start_subtest("destructive"))
test_destructive();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/kfunc_call.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
/* real layout and sizes according to test's (32-bit) BTF
* needs to be defined before skeleton is included */
struct test_struct___real {
unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
unsigned int val2;
unsigned long long val1;
unsigned short val3;
unsigned char val4;
unsigned char _pad;
};
#include "test_core_autosize.skel.h"
static int duration = 0;
static struct {
unsigned long long ptr_samesized;
unsigned long long val1_samesized;
unsigned long long val2_samesized;
unsigned long long val3_samesized;
unsigned long long val4_samesized;
struct test_struct___real output_samesized;
unsigned long long ptr_downsized;
unsigned long long val1_downsized;
unsigned long long val2_downsized;
unsigned long long val3_downsized;
unsigned long long val4_downsized;
struct test_struct___real output_downsized;
unsigned long long ptr_probed;
unsigned long long val1_probed;
unsigned long long val2_probed;
unsigned long long val3_probed;
unsigned long long val4_probed;
unsigned long long ptr_signed;
unsigned long long val1_signed;
unsigned long long val2_signed;
unsigned long long val3_signed;
unsigned long long val4_signed;
struct test_struct___real output_signed;
} out;
void test_core_autosize(void)
{
char btf_file[] = "/tmp/core_autosize.btf.XXXXXX";
int err, fd = -1, zero = 0;
int char_id, short_id, int_id, long_long_id, void_ptr_id, id;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
struct test_core_autosize* skel = NULL;
struct bpf_program *prog;
struct bpf_map *bss_map;
struct btf *btf = NULL;
size_t written;
const void *raw_data;
__u32 raw_sz;
FILE *f = NULL;
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "empty_btf"))
return;
/* Emit the following struct with 32-bit pointer size:
*
* struct test_struct {
* void *ptr;
* unsigned long val2;
* unsigned long long val1;
* unsigned short val3;
* unsigned char val4;
* char: 8;
* };
*
* This struct is going to be used as the "kernel BTF" for this test.
* It's equivalent memory-layout-wise to test_struct__real above.
*/
/* force 32-bit pointer size */
btf__set_pointer_size(btf, 4);
char_id = btf__add_int(btf, "unsigned char", 1, 0);
ASSERT_EQ(char_id, 1, "char_id");
short_id = btf__add_int(btf, "unsigned short", 2, 0);
ASSERT_EQ(short_id, 2, "short_id");
/* "long unsigned int" of 4 byte size tells BTF that sizeof(void *) == 4 */
int_id = btf__add_int(btf, "long unsigned int", 4, 0);
ASSERT_EQ(int_id, 3, "int_id");
long_long_id = btf__add_int(btf, "unsigned long long", 8, 0);
ASSERT_EQ(long_long_id, 4, "long_long_id");
void_ptr_id = btf__add_ptr(btf, 0);
ASSERT_EQ(void_ptr_id, 5, "void_ptr_id");
id = btf__add_struct(btf, "test_struct", 20 /* bytes */);
ASSERT_EQ(id, 6, "struct_id");
err = btf__add_field(btf, "ptr", void_ptr_id, 0, 0);
err = err ?: btf__add_field(btf, "val2", int_id, 32, 0);
err = err ?: btf__add_field(btf, "val1", long_long_id, 64, 0);
err = err ?: btf__add_field(btf, "val3", short_id, 128, 0);
err = err ?: btf__add_field(btf, "val4", char_id, 144, 0);
ASSERT_OK(err, "struct_fields");
fd = mkstemp(btf_file);
if (CHECK(fd < 0, "btf_tmp", "failed to create file: %d\n", fd))
goto cleanup;
f = fdopen(fd, "w");
if (!ASSERT_OK_PTR(f, "btf_fdopen"))
goto cleanup;
raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data"))
goto cleanup;
written = fwrite(raw_data, 1, raw_sz, f);
if (CHECK(written != raw_sz, "btf_write", "written: %zu, errno: %d\n", written, errno))
goto cleanup;
fflush(f);
fclose(f);
f = NULL;
close(fd);
fd = -1;
/* open and load BPF program with custom BTF as the kernel BTF */
open_opts.btf_custom_path = btf_file;
skel = test_core_autosize__open_opts(&open_opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
/* disable handle_signed() for now */
prog = bpf_object__find_program_by_name(skel->obj, "handle_signed");
if (!ASSERT_OK_PTR(prog, "prog_find"))
goto cleanup;
bpf_program__set_autoload(prog, false);
err = bpf_object__load(skel->obj);
if (!ASSERT_OK(err, "prog_load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, "handle_samesize");
if (!ASSERT_OK_PTR(prog, "prog_find"))
goto cleanup;
skel->links.handle_samesize = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(skel->links.handle_samesize, "prog_attach"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, "handle_downsize");
if (!ASSERT_OK_PTR(prog, "prog_find"))
goto cleanup;
skel->links.handle_downsize = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(skel->links.handle_downsize, "prog_attach"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, "handle_probed");
if (!ASSERT_OK_PTR(prog, "prog_find"))
goto cleanup;
skel->links.handle_probed = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(skel->links.handle_probed, "prog_attach"))
goto cleanup;
usleep(1);
bss_map = bpf_object__find_map_by_name(skel->obj, ".bss");
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
goto cleanup;
err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
if (!ASSERT_OK(err, "bss_lookup"))
goto cleanup;
ASSERT_EQ(out.ptr_samesized, 0x01020304, "ptr_samesized");
ASSERT_EQ(out.val1_samesized, 0x1020304050607080, "val1_samesized");
ASSERT_EQ(out.val2_samesized, 0x0a0b0c0d, "val2_samesized");
ASSERT_EQ(out.val3_samesized, 0xfeed, "val3_samesized");
ASSERT_EQ(out.val4_samesized, 0xb9, "val4_samesized");
ASSERT_EQ(out.output_samesized.ptr, 0x01020304, "ptr_samesized");
ASSERT_EQ(out.output_samesized.val1, 0x1020304050607080, "val1_samesized");
ASSERT_EQ(out.output_samesized.val2, 0x0a0b0c0d, "val2_samesized");
ASSERT_EQ(out.output_samesized.val3, 0xfeed, "val3_samesized");
ASSERT_EQ(out.output_samesized.val4, 0xb9, "val4_samesized");
ASSERT_EQ(out.ptr_downsized, 0x01020304, "ptr_downsized");
ASSERT_EQ(out.val1_downsized, 0x1020304050607080, "val1_downsized");
ASSERT_EQ(out.val2_downsized, 0x0a0b0c0d, "val2_downsized");
ASSERT_EQ(out.val3_downsized, 0xfeed, "val3_downsized");
ASSERT_EQ(out.val4_downsized, 0xb9, "val4_downsized");
ASSERT_EQ(out.output_downsized.ptr, 0x01020304, "ptr_downsized");
ASSERT_EQ(out.output_downsized.val1, 0x1020304050607080, "val1_downsized");
ASSERT_EQ(out.output_downsized.val2, 0x0a0b0c0d, "val2_downsized");
ASSERT_EQ(out.output_downsized.val3, 0xfeed, "val3_downsized");
ASSERT_EQ(out.output_downsized.val4, 0xb9, "val4_downsized");
ASSERT_EQ(out.ptr_probed, 0x01020304, "ptr_probed");
ASSERT_EQ(out.val1_probed, 0x1020304050607080, "val1_probed");
ASSERT_EQ(out.val2_probed, 0x0a0b0c0d, "val2_probed");
ASSERT_EQ(out.val3_probed, 0xfeed, "val3_probed");
ASSERT_EQ(out.val4_probed, 0xb9, "val4_probed");
test_core_autosize__destroy(skel);
skel = NULL;
/* now re-load with handle_signed() enabled, it should fail loading */
open_opts.btf_custom_path = btf_file;
skel = test_core_autosize__open_opts(&open_opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_core_autosize__load(skel);
if (!ASSERT_ERR(err, "skel_load"))
goto cleanup;
cleanup:
if (f)
fclose(f);
if (fd >= 0)
close(fd);
remove(btf_file);
btf__free(btf);
test_core_autosize__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_autosize.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
* between src and dst. The netns fwd has veth links to each src and dst. The
* client is in src and server in dst. The test installs a TC BPF program to each
* host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
* neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
* switch from ingress side; it also installs a checker prog on the egress side
* to drop unexpected traffic.
*/
#include <arpa/inet.h>
#include <linux/if_tun.h>
#include <linux/limits.h>
#include <linux/sysctl.h>
#include <linux/time_types.h>
#include <linux/net_tstamp.h>
#include <net/if.h>
#include <stdbool.h>
#include <stdio.h>
#include <sys/stat.h>
#include <unistd.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "test_tc_neigh_fib.skel.h"
#include "test_tc_neigh.skel.h"
#include "test_tc_peer.skel.h"
#include "test_tc_dtime.skel.h"
#ifndef TCP_TX_DELAY
#define TCP_TX_DELAY 37
#endif
#define NS_SRC "ns_src"
#define NS_FWD "ns_fwd"
#define NS_DST "ns_dst"
#define IP4_SRC "172.16.1.100"
#define IP4_DST "172.16.2.100"
#define IP4_TUN_SRC "172.17.1.100"
#define IP4_TUN_FWD "172.17.1.200"
#define IP4_PORT 9004
#define IP6_SRC "0::1:dead:beef:cafe"
#define IP6_DST "0::2:dead:beef:cafe"
#define IP6_TUN_SRC "1::1:dead:beef:cafe"
#define IP6_TUN_FWD "1::2:dead:beef:cafe"
#define IP6_PORT 9006
#define IP4_SLL "169.254.0.1"
#define IP4_DLL "169.254.0.2"
#define IP4_NET "169.254.0.0"
#define MAC_DST_FWD "00:11:22:33:44:55"
#define MAC_DST "00:22:33:44:55:66"
#define IFADDR_STR_LEN 18
#define PING_ARGS "-i 0.2 -c 3 -w 10 -q"
#define TIMEOUT_MILLIS 10000
#define NSEC_PER_SEC 1000000000ULL
#define log_err(MSG, ...) \
fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
static int write_file(const char *path, const char *newval)
{
FILE *f;
f = fopen(path, "r+");
if (!f)
return -1;
if (fwrite(newval, strlen(newval), 1, f) != 1) {
log_err("writing to %s failed", path);
fclose(f);
return -1;
}
fclose(f);
return 0;
}
static int netns_setup_namespaces(const char *verb)
{
const char * const *ns = namespaces;
char cmd[128];
while (*ns) {
snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns);
if (!ASSERT_OK(system(cmd), cmd))
return -1;
ns++;
}
return 0;
}
static void netns_setup_namespaces_nofail(const char *verb)
{
const char * const *ns = namespaces;
char cmd[128];
while (*ns) {
snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns);
system(cmd);
ns++;
}
}
struct netns_setup_result {
int ifindex_veth_src;
int ifindex_veth_src_fwd;
int ifindex_veth_dst;
int ifindex_veth_dst_fwd;
};
static int get_ifaddr(const char *name, char *ifaddr)
{
char path[PATH_MAX];
FILE *f;
int ret;
snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name);
f = fopen(path, "r");
if (!ASSERT_OK_PTR(f, path))
return -1;
ret = fread(ifaddr, 1, IFADDR_STR_LEN, f);
if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) {
fclose(f);
return -1;
}
fclose(f);
return 0;
}
static int netns_setup_links_and_routes(struct netns_setup_result *result)
{
struct nstoken *nstoken = NULL;
char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd");
SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd");
SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD);
SYS(fail, "ip link set veth_dst address " MAC_DST);
if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
goto fail;
result->ifindex_veth_src = if_nametoindex("veth_src");
if (!ASSERT_GT(result->ifindex_veth_src, 0, "ifindex_veth_src"))
goto fail;
result->ifindex_veth_src_fwd = if_nametoindex("veth_src_fwd");
if (!ASSERT_GT(result->ifindex_veth_src_fwd, 0, "ifindex_veth_src_fwd"))
goto fail;
result->ifindex_veth_dst = if_nametoindex("veth_dst");
if (!ASSERT_GT(result->ifindex_veth_dst, 0, "ifindex_veth_dst"))
goto fail;
result->ifindex_veth_dst_fwd = if_nametoindex("veth_dst_fwd");
if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd"))
goto fail;
SYS(fail, "ip link set veth_src netns " NS_SRC);
SYS(fail, "ip link set veth_src_fwd netns " NS_FWD);
SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD);
SYS(fail, "ip link set veth_dst netns " NS_DST);
/** setup in 'src' namespace */
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto fail;
SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src");
SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad");
SYS(fail, "ip link set dev veth_src up");
SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global");
SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global");
SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global");
SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s",
veth_src_fwd_addr);
SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s",
veth_src_fwd_addr);
close_netns(nstoken);
/** setup in 'fwd' namespace */
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
goto fail;
/* The fwd netns automatically gets a v6 LL address / routes, but also
* needs v4 one in order to start ARP probing. IP4_NET route is added
* to the endpoints so that the ARP processing will reply.
*/
SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd");
SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
SYS(fail, "ip link set dev veth_src_fwd up");
SYS(fail, "ip link set dev veth_dst_fwd up");
SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
close_netns(nstoken);
/** setup in 'dst' namespace */
nstoken = open_netns(NS_DST);
if (!ASSERT_OK_PTR(nstoken, "setns dst"))
goto fail;
SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst");
SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad");
SYS(fail, "ip link set dev veth_dst up");
SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global");
SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global");
SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global");
SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
close_netns(nstoken);
return 0;
fail:
if (nstoken)
close_netns(nstoken);
return -1;
}
static int qdisc_clsact_create(struct bpf_tc_hook *qdisc_hook, int ifindex)
{
char err_str[128], ifname[16];
int err;
qdisc_hook->ifindex = ifindex;
qdisc_hook->attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
err = bpf_tc_hook_create(qdisc_hook);
snprintf(err_str, sizeof(err_str),
"qdisc add dev %s clsact",
if_indextoname(qdisc_hook->ifindex, ifname) ? : "<unknown_iface>");
err_str[sizeof(err_str) - 1] = 0;
ASSERT_OK(err, err_str);
return err;
}
static int xgress_filter_add(struct bpf_tc_hook *qdisc_hook,
enum bpf_tc_attach_point xgress,
const struct bpf_program *prog, int priority)
{
LIBBPF_OPTS(bpf_tc_opts, tc_attach);
char err_str[128], ifname[16];
int err;
qdisc_hook->attach_point = xgress;
tc_attach.prog_fd = bpf_program__fd(prog);
tc_attach.priority = priority;
err = bpf_tc_attach(qdisc_hook, &tc_attach);
snprintf(err_str, sizeof(err_str),
"filter add dev %s %s prio %d bpf da %s",
if_indextoname(qdisc_hook->ifindex, ifname) ? : "<unknown_iface>",
xgress == BPF_TC_INGRESS ? "ingress" : "egress",
priority, bpf_program__name(prog));
err_str[sizeof(err_str) - 1] = 0;
ASSERT_OK(err, err_str);
return err;
}
#define QDISC_CLSACT_CREATE(qdisc_hook, ifindex) ({ \
if ((err = qdisc_clsact_create(qdisc_hook, ifindex))) \
goto fail; \
})
#define XGRESS_FILTER_ADD(qdisc_hook, xgress, prog, priority) ({ \
if ((err = xgress_filter_add(qdisc_hook, xgress, prog, priority))) \
goto fail; \
})
static int netns_load_bpf(const struct bpf_program *src_prog,
const struct bpf_program *dst_prog,
const struct bpf_program *chk_prog,
const struct netns_setup_result *setup_result)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
int err;
/* tc qdisc add dev veth_src_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
/* tc filter add dev veth_src_fwd ingress bpf da src_prog */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, src_prog, 0);
/* tc filter add dev veth_src_fwd egress bpf da chk_prog */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
/* tc qdisc add dev veth_dst_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
/* tc filter add dev veth_dst_fwd ingress bpf da dst_prog */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
/* tc filter add dev veth_dst_fwd egress bpf da chk_prog */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
return 0;
fail:
return -1;
}
static void test_tcp(int family, const char *addr, __u16 port)
{
int listen_fd = -1, accept_fd = -1, client_fd = -1;
char buf[] = "testing testing";
int n;
struct nstoken *nstoken;
nstoken = open_netns(NS_DST);
if (!ASSERT_OK_PTR(nstoken, "setns dst"))
return;
listen_fd = start_server(family, SOCK_STREAM, addr, port, 0);
if (!ASSERT_GE(listen_fd, 0, "listen"))
goto done;
close_netns(nstoken);
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto done;
accept_fd = accept(listen_fd, NULL, NULL);
if (!ASSERT_GE(accept_fd, 0, "accept"))
goto done;
if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo"))
goto done;
n = write(client_fd, buf, sizeof(buf));
if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
goto done;
n = read(accept_fd, buf, sizeof(buf));
ASSERT_EQ(n, sizeof(buf), "recv from server");
done:
if (nstoken)
close_netns(nstoken);
if (listen_fd >= 0)
close(listen_fd);
if (accept_fd >= 0)
close(accept_fd);
if (client_fd >= 0)
close(client_fd);
}
static int test_ping(int family, const char *addr)
{
SYS(fail, "ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr);
return 0;
fail:
return -1;
}
static void test_connectivity(void)
{
test_tcp(AF_INET, IP4_DST, IP4_PORT);
test_ping(AF_INET, IP4_DST);
test_tcp(AF_INET6, IP6_DST, IP6_PORT);
test_ping(AF_INET6, IP6_DST);
}
static int set_forwarding(bool enable)
{
int err;
err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0");
if (!ASSERT_OK(err, "set ipv4.ip_forward=0"))
return err;
err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0");
if (!ASSERT_OK(err, "set ipv6.forwarding=0"))
return err;
return 0;
}
static void rcv_tstamp(int fd, const char *expected, size_t s)
{
struct __kernel_timespec pkt_ts = {};
char ctl[CMSG_SPACE(sizeof(pkt_ts))];
struct timespec now_ts;
struct msghdr msg = {};
__u64 now_ns, pkt_ns;
struct cmsghdr *cmsg;
struct iovec iov;
char data[32];
int ret;
iov.iov_base = data;
iov.iov_len = sizeof(data);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = &ctl;
msg.msg_controllen = sizeof(ctl);
ret = recvmsg(fd, &msg, 0);
if (!ASSERT_EQ(ret, s, "recvmsg"))
return;
ASSERT_STRNEQ(data, expected, s, "expected rcv data");
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg && cmsg->cmsg_level == SOL_SOCKET &&
cmsg->cmsg_type == SO_TIMESTAMPNS_NEW)
memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
ret = clock_gettime(CLOCK_REALTIME, &now_ts);
ASSERT_OK(ret, "clock_gettime");
now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec;
if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
"check rcv tstamp");
}
static void snd_tstamp(int fd, char *b, size_t s)
{
struct sock_txtime opt = { .clockid = CLOCK_TAI };
char ctl[CMSG_SPACE(sizeof(__u64))];
struct timespec now_ts;
struct msghdr msg = {};
struct cmsghdr *cmsg;
struct iovec iov;
__u64 now_ns;
int ret;
ret = clock_gettime(CLOCK_TAI, &now_ts);
ASSERT_OK(ret, "clock_get_time(CLOCK_TAI)");
now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec;
iov.iov_base = b;
iov.iov_len = s;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = &ctl;
msg.msg_controllen = sizeof(ctl);
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_TXTIME;
cmsg->cmsg_len = CMSG_LEN(sizeof(now_ns));
*(__u64 *)CMSG_DATA(cmsg) = now_ns;
ret = setsockopt(fd, SOL_SOCKET, SO_TXTIME, &opt, sizeof(opt));
ASSERT_OK(ret, "setsockopt(SO_TXTIME)");
ret = sendmsg(fd, &msg, 0);
ASSERT_EQ(ret, s, "sendmsg");
}
static void test_inet_dtime(int family, int type, const char *addr, __u16 port)
{
int opt = 1, accept_fd = -1, client_fd = -1, listen_fd, err;
char buf[] = "testing testing";
struct nstoken *nstoken;
nstoken = open_netns(NS_DST);
if (!ASSERT_OK_PTR(nstoken, "setns dst"))
return;
listen_fd = start_server(family, type, addr, port, 0);
close_netns(nstoken);
if (!ASSERT_GE(listen_fd, 0, "listen"))
return;
/* Ensure the kernel puts the (rcv) timestamp for all skb */
err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
&opt, sizeof(opt));
if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
goto done;
if (type == SOCK_STREAM) {
/* Ensure the kernel set EDT when sending out rst/ack
* from the kernel's ctl_sk.
*/
err = setsockopt(listen_fd, SOL_TCP, TCP_TX_DELAY, &opt,
sizeof(opt));
if (!ASSERT_OK(err, "setsockopt(TCP_TX_DELAY)"))
goto done;
}
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
close_netns(nstoken);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto done;
if (type == SOCK_STREAM) {
int n;
accept_fd = accept(listen_fd, NULL, NULL);
if (!ASSERT_GE(accept_fd, 0, "accept"))
goto done;
n = write(client_fd, buf, sizeof(buf));
if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
goto done;
rcv_tstamp(accept_fd, buf, sizeof(buf));
} else {
snd_tstamp(client_fd, buf, sizeof(buf));
rcv_tstamp(listen_fd, buf, sizeof(buf));
}
done:
close(listen_fd);
if (accept_fd != -1)
close(accept_fd);
if (client_fd != -1)
close(client_fd);
}
static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
const struct netns_setup_result *setup_result)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src);
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst);
struct nstoken *nstoken;
int err;
/* setup ns_src tc progs */
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
return -1;
/* tc qdisc add dev veth_src clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_src, setup_result->ifindex_veth_src);
/* tc filter add dev veth_src ingress bpf da ingress_host */
XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
/* tc filter add dev veth_src egress bpf da egress_host */
XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
close_netns(nstoken);
/* setup ns_dst tc progs */
nstoken = open_netns(NS_DST);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST))
return -1;
/* tc qdisc add dev veth_dst clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_dst, setup_result->ifindex_veth_dst);
/* tc filter add dev veth_dst ingress bpf da ingress_host */
XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
/* tc filter add dev veth_dst egress bpf da egress_host */
XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
close_netns(nstoken);
/* setup ns_fwd tc progs */
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
return -1;
/* tc qdisc add dev veth_dst_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
/* tc filter add dev veth_dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio100, 100);
/* tc filter add dev veth_dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio101, 101);
/* tc filter add dev veth_dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio100, 100);
/* tc filter add dev veth_dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio101, 101);
/* tc qdisc add dev veth_src_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
/* tc filter add dev veth_src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio100, 100);
/* tc filter add dev veth_src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
skel->progs.ingress_fwdns_prio101, 101);
/* tc filter add dev veth_src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio100, 100);
/* tc filter add dev veth_src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
skel->progs.egress_fwdns_prio101, 101);
close_netns(nstoken);
return 0;
fail:
close_netns(nstoken);
return err;
}
enum {
INGRESS_FWDNS_P100,
INGRESS_FWDNS_P101,
EGRESS_FWDNS_P100,
EGRESS_FWDNS_P101,
INGRESS_ENDHOST,
EGRESS_ENDHOST,
SET_DTIME,
__MAX_CNT,
};
const char *cnt_names[] = {
"ingress_fwdns_p100",
"ingress_fwdns_p101",
"egress_fwdns_p100",
"egress_fwdns_p101",
"ingress_endhost",
"egress_endhost",
"set_dtime",
};
enum {
TCP_IP6_CLEAR_DTIME,
TCP_IP4,
TCP_IP6,
UDP_IP4,
UDP_IP6,
TCP_IP4_RT_FWD,
TCP_IP6_RT_FWD,
UDP_IP4_RT_FWD,
UDP_IP6_RT_FWD,
UKN_TEST,
__NR_TESTS,
};
const char *test_names[] = {
"tcp ip6 clear dtime",
"tcp ip4",
"tcp ip6",
"udp ip4",
"udp ip6",
"tcp ip4 rt fwd",
"tcp ip6 rt fwd",
"udp ip4 rt fwd",
"udp ip6 rt fwd",
};
static const char *dtime_cnt_str(int test, int cnt)
{
static char name[64];
snprintf(name, sizeof(name), "%s %s", test_names[test], cnt_names[cnt]);
return name;
}
static const char *dtime_err_str(int test, int cnt)
{
static char name[64];
snprintf(name, sizeof(name), "%s %s errs", test_names[test],
cnt_names[cnt]);
return name;
}
static void test_tcp_clear_dtime(struct test_tc_dtime *skel)
{
int i, t = TCP_IP6_CLEAR_DTIME;
__u32 *dtimes = skel->bss->dtimes[t];
__u32 *errs = skel->bss->errs[t];
skel->bss->test = t;
test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 50000 + t);
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P101));
ASSERT_GT(dtimes[EGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, EGRESS_FWDNS_P100));
ASSERT_EQ(dtimes[EGRESS_FWDNS_P101], 0,
dtime_cnt_str(t, EGRESS_FWDNS_P101));
ASSERT_GT(dtimes[EGRESS_ENDHOST], 0,
dtime_cnt_str(t, EGRESS_ENDHOST));
ASSERT_GT(dtimes[INGRESS_ENDHOST], 0,
dtime_cnt_str(t, INGRESS_ENDHOST));
for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
}
static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
{
__u32 *dtimes, *errs;
const char *addr;
int i, t;
if (family == AF_INET) {
t = bpf_fwd ? TCP_IP4 : TCP_IP4_RT_FWD;
addr = IP4_DST;
} else {
t = bpf_fwd ? TCP_IP6 : TCP_IP6_RT_FWD;
addr = IP6_DST;
}
dtimes = skel->bss->dtimes[t];
errs = skel->bss->errs[t];
skel->bss->test = t;
test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t);
/* fwdns_prio100 prog does not read delivery_time_type, so
* kernel puts the (rcv) timetamp in __sk_buff->tstamp
*/
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
for (i = INGRESS_FWDNS_P101; i < SET_DTIME; i++)
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
}
static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
{
__u32 *dtimes, *errs;
const char *addr;
int i, t;
if (family == AF_INET) {
t = bpf_fwd ? UDP_IP4 : UDP_IP4_RT_FWD;
addr = IP4_DST;
} else {
t = bpf_fwd ? UDP_IP6 : UDP_IP6_RT_FWD;
addr = IP6_DST;
}
dtimes = skel->bss->dtimes[t];
errs = skel->bss->errs[t];
skel->bss->test = t;
test_inet_dtime(family, SOCK_DGRAM, addr, 50000 + t);
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
/* non mono delivery time is not forwarded */
ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P101));
for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
}
static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
{
struct test_tc_dtime *skel;
struct nstoken *nstoken;
int err;
skel = test_tc_dtime__open();
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
return;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
err = test_tc_dtime__load(skel);
if (!ASSERT_OK(err, "test_tc_dtime__load"))
goto done;
if (netns_load_dtime_bpf(skel, setup_result))
goto done;
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
goto done;
err = set_forwarding(false);
close_netns(nstoken);
if (!ASSERT_OK(err, "disable forwarding"))
goto done;
test_tcp_clear_dtime(skel);
test_tcp_dtime(skel, AF_INET, true);
test_tcp_dtime(skel, AF_INET6, true);
test_udp_dtime(skel, AF_INET, true);
test_udp_dtime(skel, AF_INET6, true);
/* Test the kernel ip[6]_forward path instead
* of bpf_redirect_neigh().
*/
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
goto done;
err = set_forwarding(true);
close_netns(nstoken);
if (!ASSERT_OK(err, "enable forwarding"))
goto done;
test_tcp_dtime(skel, AF_INET, false);
test_tcp_dtime(skel, AF_INET6, false);
test_udp_dtime(skel, AF_INET, false);
test_udp_dtime(skel, AF_INET6, false);
done:
test_tc_dtime__destroy(skel);
}
static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
{
struct nstoken *nstoken = NULL;
struct test_tc_neigh_fib *skel = NULL;
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
return;
skel = test_tc_neigh_fib__open();
if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open"))
goto done;
if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load"))
goto done;
if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
skel->progs.tc_chk, setup_result))
goto done;
/* bpf_fib_lookup() checks if forwarding is enabled */
if (!ASSERT_OK(set_forwarding(true), "enable forwarding"))
goto done;
test_connectivity();
done:
if (skel)
test_tc_neigh_fib__destroy(skel);
close_netns(nstoken);
}
static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
{
struct nstoken *nstoken = NULL;
struct test_tc_neigh *skel = NULL;
int err;
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
return;
skel = test_tc_neigh__open();
if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
goto done;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
err = test_tc_neigh__load(skel);
if (!ASSERT_OK(err, "test_tc_neigh__load"))
goto done;
if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
skel->progs.tc_chk, setup_result))
goto done;
if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
goto done;
test_connectivity();
done:
if (skel)
test_tc_neigh__destroy(skel);
close_netns(nstoken);
}
static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
{
struct nstoken *nstoken;
struct test_tc_peer *skel;
int err;
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
return;
skel = test_tc_peer__open();
if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
goto done;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
err = test_tc_peer__load(skel);
if (!ASSERT_OK(err, "test_tc_peer__load"))
goto done;
if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
skel->progs.tc_chk, setup_result))
goto done;
if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
goto done;
test_connectivity();
done:
if (skel)
test_tc_peer__destroy(skel);
close_netns(nstoken);
}
static int tun_open(char *name)
{
struct ifreq ifr;
int fd, err;
fd = open("/dev/net/tun", O_RDWR);
if (!ASSERT_GE(fd, 0, "open /dev/net/tun"))
return -1;
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
if (*name)
strncpy(ifr.ifr_name, name, IFNAMSIZ);
err = ioctl(fd, TUNSETIFF, &ifr);
if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
goto fail;
SYS(fail, "ip link set dev %s up", name);
return fd;
fail:
close(fd);
return -1;
}
enum {
SRC_TO_TARGET = 0,
TARGET_TO_SRC = 1,
};
static int tun_relay_loop(int src_fd, int target_fd)
{
fd_set rfds, wfds;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
for (;;) {
char buf[1500];
int direction, nread, nwrite;
FD_SET(src_fd, &rfds);
FD_SET(target_fd, &rfds);
if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) {
log_err("select failed");
return 1;
}
direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC;
nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf));
if (nread < 0) {
log_err("read failed");
return 1;
}
nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread);
if (nwrite != nread) {
log_err("write failed");
return 1;
}
}
}
static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd);
LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
struct test_tc_peer *skel = NULL;
struct nstoken *nstoken = NULL;
int err;
int tunnel_pid = -1;
int src_fd, target_fd = -1;
int ifindex;
/* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
* This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those
* expose the L2 headers encapsulating the IP packet to BPF and hence
* don't have skb in suitable state for this test. Alternative to TUN/TAP
* would be e.g. Wireguard which would appear as a pure L3 device to BPF,
* but that requires much more complicated setup.
*/
nstoken = open_netns(NS_SRC);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
return;
src_fd = tun_open("tun_src");
if (!ASSERT_GE(src_fd, 0, "tun_open tun_src"))
goto fail;
close_netns(nstoken);
nstoken = open_netns(NS_FWD);
if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
goto fail;
target_fd = tun_open("tun_fwd");
if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd"))
goto fail;
tunnel_pid = fork();
if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop"))
goto fail;
if (tunnel_pid == 0)
exit(tun_relay_loop(src_fd, target_fd));
skel = test_tc_peer__open();
if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
goto fail;
ifindex = if_nametoindex("tun_fwd");
if (!ASSERT_GT(ifindex, 0, "if_indextoname tun_fwd"))
goto fail;
skel->rodata->IFINDEX_SRC = ifindex;
skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
err = test_tc_peer__load(skel);
if (!ASSERT_OK(err, "test_tc_peer__load"))
goto fail;
/* Load "tc_src_l3" to the tun_fwd interface to redirect packets
* towards dst, and "tc_dst" to redirect packets
* and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
*/
/* tc qdisc add dev tun_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex);
/* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */
XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0);
/* tc qdisc add dev veth_dst_fwd clsact */
QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
/* tc filter add dev veth_dst_fwd ingress bpf da tc_dst_l3 */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
/* tc filter add dev veth_dst_fwd egress bpf da tc_chk */
XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
/* Setup route and neigh tables */
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24");
SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
" dev tun_src scope global");
SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
" dev tun_src scope global");
SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
goto fail;
test_connectivity();
fail:
if (tunnel_pid > 0) {
kill(tunnel_pid, SIGTERM);
waitpid(tunnel_pid, NULL, 0);
}
if (src_fd >= 0)
close(src_fd);
if (target_fd >= 0)
close(target_fd);
if (skel)
test_tc_peer__destroy(skel);
if (nstoken)
close_netns(nstoken);
}
#define RUN_TEST(name) \
({ \
struct netns_setup_result setup_result; \
if (test__start_subtest(#name)) \
if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
"setup links and routes")) \
test_ ## name(&setup_result); \
netns_setup_namespaces("delete"); \
} \
})
static void *test_tc_redirect_run_tests(void *arg)
{
netns_setup_namespaces_nofail("delete");
RUN_TEST(tc_redirect_peer);
RUN_TEST(tc_redirect_peer_l3);
RUN_TEST(tc_redirect_neigh);
RUN_TEST(tc_redirect_neigh_fib);
RUN_TEST(tc_redirect_dtime);
return NULL;
}
void test_tc_redirect(void)
{
pthread_t test_thread;
int err;
/* Run the tests in their own thread to isolate the namespace changes
* so they do not affect the environment of other tests.
* (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
*/
err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL);
if (ASSERT_OK(err, "pthread_create"))
ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tc_redirect.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#define MAX_CNT_RAWTP 10ull
#define MAX_STACK_RAWTP 100
static int duration = 0;
struct get_stack_trace_t {
int pid;
int kern_stack_size;
int user_stack_size;
int user_stack_buildid_size;
__u64 kern_stack[MAX_STACK_RAWTP];
__u64 user_stack[MAX_STACK_RAWTP];
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
{
bool good_kern_stack = false, good_user_stack = false;
const char *nonjit_func = "___bpf_prog_run";
/* perfbuf-submitted data is 4-byte aligned, but we need 8-byte
* alignment, so copy data into a local variable, for simplicity
*/
struct get_stack_trace_t e;
int i, num_stack;
struct ksym *ks;
memset(&e, 0, sizeof(e));
memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
if (size < sizeof(struct get_stack_trace_t)) {
__u64 *raw_data = data;
bool found = false;
num_stack = size / sizeof(__u64);
/* If jit is enabled, we do not have a good way to
* verify the sanity of the kernel stack. So we
* just assume it is good if the stack is not empty.
* This could be improved in the future.
*/
if (env.jit_enabled) {
found = num_stack > 0;
} else {
for (i = 0; i < num_stack; i++) {
ks = ksym_search(raw_data[i]);
if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
found = true;
break;
}
}
}
if (found) {
good_kern_stack = true;
good_user_stack = true;
}
} else {
num_stack = e.kern_stack_size / sizeof(__u64);
if (env.jit_enabled) {
good_kern_stack = num_stack > 0;
} else {
for (i = 0; i < num_stack; i++) {
ks = ksym_search(e.kern_stack[i]);
if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
good_kern_stack = true;
break;
}
}
}
if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
good_user_stack = true;
}
if (!good_kern_stack)
CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
if (!good_user_stack)
CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
}
void test_get_stack_raw_tp(void)
{
const char *file = "./test_get_stack_rawtp.bpf.o";
const char *file_err = "./test_get_stack_rawtp_err.bpf.o";
const char *prog_name = "bpf_prog1";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
struct perf_buffer *pb = NULL;
struct bpf_link *link = NULL;
struct timespec tv = {0, 10};
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_map *map;
cpu_set_t cpu_set;
err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
goto close_prog;
map = bpf_object__find_map_by_name(obj, "perfmap");
if (CHECK(!map, "bpf_find_map", "not found\n"))
goto close_prog;
err = load_kallsyms();
if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
goto close_prog;
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
NULL, NULL, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
/* trigger some syscall action */
for (i = 0; i < MAX_CNT_RAWTP; i++)
nanosleep(&tv, NULL);
while (exp_cnt > 0) {
err = perf_buffer__poll(pb, 100);
if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
goto close_prog;
exp_cnt -= err;
}
close_prog:
bpf_link__destroy(link);
perf_buffer__free(pb);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Red Hat */
#include <test_progs.h>
#include <bpf/btf.h>
#include "bpf/libbpf_internal.h"
#include "cgroup_helpers.h"
static const char *module_name = "bpf_testmod";
static const char *symbol_name = "bpf_fentry_shadow_test";
static int get_bpf_testmod_btf_fd(void)
{
struct bpf_btf_info info;
char name[64];
__u32 id = 0, len;
int err, fd;
while (true) {
err = bpf_btf_get_next_id(id, &id);
if (err) {
log_err("failed to iterate BTF objects");
return err;
}
fd = bpf_btf_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue; /* expected race: BTF was unloaded */
err = -errno;
log_err("failed to get FD for BTF object #%d", id);
return err;
}
len = sizeof(info);
memset(&info, 0, sizeof(info));
info.name = ptr_to_u64(name);
info.name_len = sizeof(name);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
err = -errno;
log_err("failed to get info for BTF object #%d", id);
close(fd);
return err;
}
if (strcmp(name, module_name) == 0)
return fd;
close(fd);
}
return -ENOENT;
}
void test_module_fentry_shadow(void)
{
struct btf *vmlinux_btf = NULL, *mod_btf = NULL;
int err, i;
int btf_fd[2] = {};
int prog_fd[2] = {};
int link_fd[2] = {};
__s32 btf_id[2] = {};
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
.expected_attach_type = BPF_TRACE_FENTRY,
);
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux_btf"))
return;
btf_fd[1] = get_bpf_testmod_btf_fd();
if (!ASSERT_GE(btf_fd[1], 0, "get_bpf_testmod_btf_fd"))
goto out;
mod_btf = btf_get_from_fd(btf_fd[1], vmlinux_btf);
if (!ASSERT_OK_PTR(mod_btf, "btf_get_from_fd"))
goto out;
btf_id[0] = btf__find_by_name_kind(vmlinux_btf, symbol_name, BTF_KIND_FUNC);
if (!ASSERT_GT(btf_id[0], 0, "btf_find_by_name"))
goto out;
btf_id[1] = btf__find_by_name_kind(mod_btf, symbol_name, BTF_KIND_FUNC);
if (!ASSERT_GT(btf_id[1], 0, "btf_find_by_name"))
goto out;
for (i = 0; i < 2; i++) {
load_opts.attach_btf_id = btf_id[i];
load_opts.attach_btf_obj_fd = btf_fd[i];
prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
sizeof(trace_program) / sizeof(struct bpf_insn),
&load_opts);
if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load"))
goto out;
/* If the verifier incorrectly resolves addresses of the
* shadowed functions and uses the same address for both the
* vmlinux and the bpf_testmod functions, this will fail on
* attempting to create two trampolines for the same address,
* which is forbidden.
*/
link_fd[i] = bpf_link_create(prog_fd[i], 0, BPF_TRACE_FENTRY, NULL);
if (!ASSERT_GE(link_fd[i], 0, "bpf_link_create"))
goto out;
}
err = bpf_prog_test_run_opts(prog_fd[0], NULL);
ASSERT_OK(err, "running test");
out:
btf__free(vmlinux_btf);
btf__free(mod_btf);
for (i = 0; i < 2; i++) {
if (btf_fd[i])
close(btf_fd[i]);
if (prog_fd[i] > 0)
close(prog_fd[i]);
if (link_fd[i] > 0)
close(link_fd[i]);
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "jit_probe_mem.skel.h"
void test_jit_probe_mem(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct jit_probe_mem *skel;
int ret;
skel = jit_probe_mem__open_and_load();
if (!ASSERT_OK_PTR(skel, "jit_probe_mem__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_jit_probe_mem), &opts);
ASSERT_OK(ret, "jit_probe_mem ret");
ASSERT_OK(opts.retval, "jit_probe_mem opts.retval");
ASSERT_EQ(skel->data->total_sum, 192, "jit_probe_mem total_sum");
jit_probe_mem__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <test_progs.h>
#include "dummy_st_ops_success.skel.h"
#include "dummy_st_ops_fail.skel.h"
#include "trace_dummy_st_ops.skel.h"
/* Need to keep consistent with definition in include/linux/bpf.h */
struct bpf_dummy_ops_state {
int val;
};
static void test_dummy_st_ops_attach(void)
{
struct dummy_st_ops_success *skel;
struct bpf_link *link;
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach");
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ret_value(void)
{
__u64 args[1] = {0};
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_1);
err = bpf_prog_test_run_opts(fd, &attr);
ASSERT_OK(err, "test_run");
ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ptr_arg(void)
{
int exp_retval = 0xbeef;
struct bpf_dummy_ops_state in_state = {
.val = exp_retval,
};
__u64 args[1] = {(unsigned long)&in_state};
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct trace_dummy_st_ops *trace_skel;
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_1);
trace_skel = trace_dummy_st_ops__open();
if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open"))
goto done;
err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1,
fd, "test_1");
if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)"))
goto done;
err = trace_dummy_st_ops__load(trace_skel);
if (!ASSERT_OK(err, "load(trace_skel)"))
goto done;
err = trace_dummy_st_ops__attach(trace_skel);
if (!ASSERT_OK(err, "attach(trace_skel)"))
goto done;
err = bpf_prog_test_run_opts(fd, &attr);
ASSERT_OK(err, "test_run");
ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
ASSERT_EQ(attr.retval, exp_retval, "test_ret");
ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
done:
dummy_st_ops_success__destroy(skel);
trace_dummy_st_ops__destroy(trace_skel);
}
static void test_dummy_multiple_args(void)
{
__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops_success *skel;
int fd, err;
size_t i;
char name[8];
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_2);
err = bpf_prog_test_run_opts(fd, &attr);
ASSERT_OK(err, "test_run");
for (i = 0; i < ARRAY_SIZE(args); i++) {
snprintf(name, sizeof(name), "arg %zu", i);
ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
}
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_sleepable(void)
{
__u64 args[1] = {0};
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_sleepable);
err = bpf_prog_test_run_opts(fd, &attr);
ASSERT_OK(err, "test_run");
dummy_st_ops_success__destroy(skel);
}
void test_dummy_st_ops(void)
{
if (test__start_subtest("dummy_st_ops_attach"))
test_dummy_st_ops_attach();
if (test__start_subtest("dummy_init_ret_value"))
test_dummy_init_ret_value();
if (test__start_subtest("dummy_init_ptr_arg"))
test_dummy_init_ptr_arg();
if (test__start_subtest("dummy_multiple_args"))
test_dummy_multiple_args();
if (test__start_subtest("dummy_sleepable"))
test_dummy_sleepable();
RUN_TESTS(dummy_st_ops_fail);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <string.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <linux/kernel.h>
#define CONFIG_DEBUG_INFO_BTF
#include <linux/btf_ids.h>
#include "test_progs.h"
static int duration;
struct symbol {
const char *name;
int type;
int id;
};
struct symbol test_symbols[] = {
{ "unused", BTF_KIND_UNKN, 0 },
{ "S", BTF_KIND_TYPEDEF, -1 },
{ "T", BTF_KIND_TYPEDEF, -1 },
{ "U", BTF_KIND_TYPEDEF, -1 },
{ "S", BTF_KIND_STRUCT, -1 },
{ "U", BTF_KIND_UNION, -1 },
{ "func", BTF_KIND_FUNC, -1 },
};
/* Align the .BTF_ids section to 4 bytes */
asm (
".pushsection " BTF_IDS_SECTION " ,\"a\"; \n"
".balign 4, 0; \n"
".popsection; \n");
BTF_ID_LIST(test_list_local)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
extern __u32 test_list_global[];
BTF_ID_LIST_GLOBAL(test_list_global, 1)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
BTF_SET_START(test_set)
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
BTF_SET_END(test_set)
static int
__resolve_symbol(struct btf *btf, int type_id)
{
const struct btf_type *type;
const char *str;
unsigned int i;
type = btf__type_by_id(btf, type_id);
if (!type) {
PRINT_FAIL("Failed to get type for ID %d\n", type_id);
return -1;
}
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
if (test_symbols[i].id >= 0)
continue;
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
continue;
str = btf__name_by_offset(btf, type->name_off);
if (!str) {
PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id);
return -1;
}
if (!strcmp(str, test_symbols[i].name))
test_symbols[i].id = type_id;
}
return 0;
}
static int resolve_symbols(void)
{
struct btf *btf;
int type_id;
__u32 nr;
btf = btf__parse_elf("btf_data.bpf.o", NULL);
if (CHECK(libbpf_get_error(btf), "resolve",
"Failed to load BTF from btf_data.o\n"))
return -1;
nr = btf__type_cnt(btf);
for (type_id = 1; type_id < nr; type_id++) {
if (__resolve_symbol(btf, type_id))
break;
}
btf__free(btf);
return 0;
}
void test_resolve_btfids(void)
{
__u32 *test_list, *test_lists[] = { test_list_local, test_list_global };
unsigned int i, j;
int ret = 0;
if (resolve_symbols())
return;
/* Check BTF_ID_LIST(test_list_local) and
* BTF_ID_LIST_GLOBAL(test_list_global) IDs
*/
for (j = 0; j < ARRAY_SIZE(test_lists); j++) {
test_list = test_lists[j];
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
ret = CHECK(test_list[i] != test_symbols[i].id,
"id_check",
"wrong ID for %s (%d != %d)\n",
test_symbols[i].name,
test_list[i], test_symbols[i].id);
if (ret)
return;
}
}
/* Check BTF_SET_START(test_set) IDs */
for (i = 0; i < test_set.cnt; i++) {
bool found = false;
for (j = 0; j < ARRAY_SIZE(test_symbols); j++) {
if (test_symbols[j].id != test_set.ids[i])
continue;
found = true;
break;
}
ret = CHECK(!found, "id_check",
"ID %d not found in test_symbols\n",
test_set.ids[i]);
if (ret)
break;
if (i > 0) {
if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check"))
return;
}
}
}
| linux-master | tools/testing/selftests/bpf/prog_tests/resolve_btfids.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <sys/stat.h>
#include "test_link_pinning.skel.h"
static int duration = 0;
void test_link_pinning_subtest(struct bpf_program *prog,
struct test_link_pinning__bss *bss)
{
const char *link_pin_path = "/sys/fs/bpf/pinned_link_test";
struct stat statbuf = {};
struct bpf_link *link;
int err, i;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
bss->in = 1;
usleep(1);
CHECK(bss->out != 1, "res_check1", "exp %d, got %d\n", 1, bss->out);
/* pin link */
err = bpf_link__pin(link, link_pin_path);
if (CHECK(err, "link_pin", "err: %d\n", err))
goto cleanup;
CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path1",
"exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
/* check that link was pinned */
err = stat(link_pin_path, &statbuf);
if (CHECK(err, "stat_link", "err %d errno %d\n", err, errno))
goto cleanup;
bss->in = 2;
usleep(1);
CHECK(bss->out != 2, "res_check2", "exp %d, got %d\n", 2, bss->out);
/* destroy link, pinned link should keep program attached */
bpf_link__destroy(link);
link = NULL;
bss->in = 3;
usleep(1);
CHECK(bss->out != 3, "res_check3", "exp %d, got %d\n", 3, bss->out);
/* re-open link from BPFFS */
link = bpf_link__open(link_pin_path);
if (!ASSERT_OK_PTR(link, "link_open"))
goto cleanup;
CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2",
"exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
/* unpin link from BPFFS, program still attached */
err = bpf_link__unpin(link);
if (CHECK(err, "link_unpin", "err: %d\n", err))
goto cleanup;
/* still active, as we have FD open now */
bss->in = 4;
usleep(1);
CHECK(bss->out != 4, "res_check4", "exp %d, got %d\n", 4, bss->out);
bpf_link__destroy(link);
link = NULL;
/* Validate it's finally detached.
* Actual detachment might get delayed a bit, so there is no reliable
* way to validate it immediately here, let's count up for long enough
* and see if eventually output stops being updated
*/
for (i = 5; i < 10000; i++) {
bss->in = i;
usleep(1);
if (bss->out == i - 1)
break;
}
CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i);
cleanup:
bpf_link__destroy(link);
}
void test_link_pinning(void)
{
struct test_link_pinning* skel;
skel = test_link_pinning__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
if (test__start_subtest("pin_raw_tp"))
test_link_pinning_subtest(skel->progs.raw_tp_prog, skel->bss);
if (test__start_subtest("pin_tp_btf"))
test_link_pinning_subtest(skel->progs.tp_btf_prog, skel->bss);
test_link_pinning__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/link_pinning.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Tessares SA. */
/* Copyright (c) 2022, SUSE. */
#include <linux/const.h>
#include <netinet/in.h>
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
#define NS_TEST "mptcp_ns"
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#ifndef SOL_MPTCP
#define SOL_MPTCP 284
#endif
#ifndef MPTCP_INFO
#define MPTCP_INFO 1
#endif
#ifndef MPTCP_INFO_FLAG_FALLBACK
#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
#endif
#ifndef MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
#endif
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
#endif
struct __mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
__u8 mptcpi_add_addr_accepted;
__u8 mptcpi_subflows_max;
__u8 mptcpi_add_addr_signal_max;
__u8 mptcpi_add_addr_accepted_max;
__u32 mptcpi_flags;
__u32 mptcpi_token;
__u64 mptcpi_write_seq;
__u64 mptcpi_snd_una;
__u64 mptcpi_rcv_nxt;
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
__u8 mptcpi_csum_enabled;
__u32 mptcpi_retransmits;
__u64 mptcpi_bytes_retrans;
__u64 mptcpi_bytes_sent;
__u64 mptcpi_bytes_received;
__u64 mptcpi_bytes_acked;
};
struct mptcp_storage {
__u32 invoked;
__u32 is_mptcp;
struct sock *sk;
__u32 token;
struct sock *first;
char ca_name[TCP_CA_NAME_MAX];
};
static struct nstoken *create_netns(void)
{
SYS(fail, "ip netns add %s", NS_TEST);
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
return open_netns(NS_TEST);
fail:
return NULL;
}
static void cleanup_netns(struct nstoken *nstoken)
{
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST);
}
static int verify_tsk(int map_fd, int client_fd)
{
int err, cfd = client_fd;
struct mptcp_storage val;
err = bpf_map_lookup_elem(map_fd, &cfd, &val);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
return err;
if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
err++;
if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp"))
err++;
return err;
}
static void get_msk_ca_name(char ca_name[])
{
size_t len;
int fd;
fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY);
if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control"))
return;
len = read(fd, ca_name, TCP_CA_NAME_MAX);
if (!ASSERT_GT(len, 0, "failed to read ca_name"))
goto err;
if (len > 0 && ca_name[len - 1] == '\n')
ca_name[len - 1] = '\0';
err:
close(fd);
}
static int verify_msk(int map_fd, int client_fd, __u32 token)
{
char ca_name[TCP_CA_NAME_MAX];
int err, cfd = client_fd;
struct mptcp_storage val;
if (!ASSERT_GT(token, 0, "invalid token"))
return -1;
get_msk_ca_name(ca_name);
err = bpf_map_lookup_elem(map_fd, &cfd, &val);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
return err;
if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
err++;
if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp"))
err++;
if (!ASSERT_EQ(val.token, token, "unexpected token"))
err++;
if (!ASSERT_EQ(val.first, val.sk, "unexpected first"))
err++;
if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name"))
err++;
return err;
}
static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)
{
int client_fd, prog_fd, map_fd, err;
struct mptcp_sock *sock_skel;
sock_skel = mptcp_sock__open_and_load();
if (!ASSERT_OK_PTR(sock_skel, "skel_open_load"))
return libbpf_get_error(sock_skel);
err = mptcp_sock__attach(sock_skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
prog_fd = bpf_program__fd(sock_skel->progs._sockops);
map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map);
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
err = -EIO;
goto out;
}
err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) :
verify_tsk(map_fd, client_fd);
close(client_fd);
out:
mptcp_sock__destroy(sock_skel);
return err;
}
static void test_base(void)
{
struct nstoken *nstoken = NULL;
int server_fd, cgroup_fd;
cgroup_fd = test__join_cgroup("/mptcp");
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
nstoken = create_netns();
if (!ASSERT_OK_PTR(nstoken, "create_netns"))
goto fail;
/* without MPTCP */
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto with_mptcp;
ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp");
close(server_fd);
with_mptcp:
/* with MPTCP */
server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_mptcp_server"))
goto fail;
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp");
close(server_fd);
fail:
cleanup_netns(nstoken);
close(cgroup_fd);
}
static void send_byte(int fd)
{
char b = 0x55;
ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
}
static int verify_mptcpify(int server_fd, int client_fd)
{
struct __mptcp_info info;
socklen_t optlen;
int protocol;
int err = 0;
optlen = sizeof(protocol);
if (!ASSERT_OK(getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen),
"getsockopt(SOL_PROTOCOL)"))
return -1;
if (!ASSERT_EQ(protocol, IPPROTO_MPTCP, "protocol isn't MPTCP"))
err++;
optlen = sizeof(info);
if (!ASSERT_OK(getsockopt(client_fd, SOL_MPTCP, MPTCP_INFO, &info, &optlen),
"getsockopt(MPTCP_INFO)"))
return -1;
if (!ASSERT_GE(info.mptcpi_flags, 0, "unexpected mptcpi_flags"))
err++;
if (!ASSERT_FALSE(info.mptcpi_flags & MPTCP_INFO_FLAG_FALLBACK,
"MPTCP fallback"))
err++;
if (!ASSERT_TRUE(info.mptcpi_flags & MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED,
"no remote key received"))
err++;
return err;
}
static int run_mptcpify(int cgroup_fd)
{
int server_fd, client_fd, err = 0;
struct mptcpify *mptcpify_skel;
mptcpify_skel = mptcpify__open_and_load();
if (!ASSERT_OK_PTR(mptcpify_skel, "skel_open_load"))
return libbpf_get_error(mptcpify_skel);
err = mptcpify__attach(mptcpify_skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* without MPTCP */
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server")) {
err = -EIO;
goto out;
}
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
err = -EIO;
goto close_server;
}
send_byte(client_fd);
err = verify_mptcpify(server_fd, client_fd);
close(client_fd);
close_server:
close(server_fd);
out:
mptcpify__destroy(mptcpify_skel);
return err;
}
static void test_mptcpify(void)
{
struct nstoken *nstoken = NULL;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/mptcpify");
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
nstoken = create_netns();
if (!ASSERT_OK_PTR(nstoken, "create_netns"))
goto fail;
ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify");
fail:
cleanup_netns(nstoken);
close(cgroup_fd);
}
void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
if (test__start_subtest("mptcpify"))
test_mptcpify();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/mptcp.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
static int sigusr1_received;
static void sigusr1_handler(int signum)
{
sigusr1_received = 1;
}
static void test_send_signal_common(struct perf_event_attr *attr,
bool signal_thread)
{
struct test_send_signal_kern *skel;
int pipe_c2p[2], pipe_p2c[2];
int err = -1, pmu_fd = -1;
char buf[256];
pid_t pid;
if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p"))
return;
if (!ASSERT_OK(pipe(pipe_p2c), "pipe_p2c")) {
close(pipe_c2p[0]);
close(pipe_c2p[1]);
return;
}
pid = fork();
if (!ASSERT_GE(pid, 0, "fork")) {
close(pipe_c2p[0]);
close(pipe_c2p[1]);
close(pipe_p2c[0]);
close(pipe_p2c[1]);
return;
}
if (pid == 0) {
int old_prio;
volatile int j = 0;
/* install signal handler and notify parent */
ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
/* boost with a high priority so we got a higher chance
* that if an interrupt happens, the underlying task
* is this process.
*/
errno = 0;
old_prio = getpriority(PRIO_PROCESS, 0);
ASSERT_OK(errno, "getpriority");
ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
/* notify parent signal handler is installed */
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* make sure parent enabled bpf program to send_signal */
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
for (int i = 0; i < 1000000000 && !sigusr1_received; i++) {
j /= i + j + 1;
if (!attr)
/* trigger the nanosleep tracepoint program. */
usleep(1);
}
buf[0] = sigusr1_received ? '2' : '0';
ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* wait for parent notification and exit */
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* restore the old priority */
ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
close(pipe_c2p[1]);
close(pipe_p2c[0]);
exit(0);
}
close(pipe_c2p[1]); /* close write */
close(pipe_p2c[0]); /* close read */
skel = test_send_signal_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto skel_open_load_failure;
if (!attr) {
err = test_send_signal_kern__attach(skel);
if (!ASSERT_OK(err, "skel_attach")) {
err = -1;
goto destroy_skel;
}
} else {
pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
-1 /* group id */, 0 /* flags */);
if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {
err = -1;
goto destroy_skel;
}
skel->links.send_signal_perf =
bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd);
if (!ASSERT_OK_PTR(skel->links.send_signal_perf, "attach_perf_event"))
goto disable_pmu;
}
/* wait until child signal handler installed */
ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");
/* trigger the bpf send_signal */
skel->bss->signal_thread = signal_thread;
skel->bss->sig = SIGUSR1;
skel->bss->pid = pid;
/* notify child that bpf program can send_signal now */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
/* wait for result */
err = read(pipe_c2p[0], buf, 1);
if (!ASSERT_GE(err, 0, "reading pipe"))
goto disable_pmu;
if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) {
err = -1;
goto disable_pmu;
}
ASSERT_EQ(buf[0], '2', "incorrect result");
/* notify child safe to exit */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
disable_pmu:
close(pmu_fd);
destroy_skel:
test_send_signal_kern__destroy(skel);
skel_open_load_failure:
close(pipe_c2p[0]);
close(pipe_p2c[1]);
wait(NULL);
}
static void test_send_signal_tracepoint(bool signal_thread)
{
test_send_signal_common(NULL, signal_thread);
}
static void test_send_signal_perf(bool signal_thread)
{
struct perf_event_attr attr = {
.sample_period = 1,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
test_send_signal_common(&attr, signal_thread);
}
static void test_send_signal_nmi(bool signal_thread)
{
struct perf_event_attr attr = {
.sample_period = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
int pmu_fd;
/* Some setups (e.g. virtual machines) might run with hardware
* perf events disabled. If this is the case, skip this test.
*/
pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
-1 /* cpu */, -1 /* group_fd */, 0 /* flags */);
if (pmu_fd == -1) {
if (errno == ENOENT) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n",
__func__);
test__skip();
return;
}
/* Let the test fail with a more informative message */
} else {
close(pmu_fd);
}
test_send_signal_common(&attr, signal_thread);
}
void test_send_signal(void)
{
if (test__start_subtest("send_signal_tracepoint"))
test_send_signal_tracepoint(false);
if (test__start_subtest("send_signal_perf"))
test_send_signal_perf(false);
if (test__start_subtest("send_signal_nmi"))
test_send_signal_nmi(false);
if (test__start_subtest("send_signal_tracepoint_thread"))
test_send_signal_tracepoint(true);
if (test__start_subtest("send_signal_perf_thread"))
test_send_signal_perf(true);
if (test__start_subtest("send_signal_nmi_thread"))
test_send_signal_nmi(true);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/send_signal.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_tcpbpf.h"
#include "test_tcpbpf_kern.skel.h"
#define LO_ADDR6 "::1"
#define CG_NAME "/tcpbpf-user-test"
static void verify_result(struct tcpbpf_globals *result)
{
__u32 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
(1 << BPF_SOCK_OPS_RWND_INIT) |
(1 << BPF_SOCK_OPS_TCP_CONNECT_CB) |
(1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) |
(1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) |
(1 << BPF_SOCK_OPS_NEEDS_ECN) |
(1 << BPF_SOCK_OPS_STATE_CB) |
(1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
/* check global map */
ASSERT_EQ(expected_events, result->event_map, "event_map");
ASSERT_EQ(result->bytes_received, 501, "bytes_received");
ASSERT_EQ(result->bytes_acked, 1002, "bytes_acked");
ASSERT_EQ(result->data_segs_in, 1, "data_segs_in");
ASSERT_EQ(result->data_segs_out, 1, "data_segs_out");
ASSERT_EQ(result->bad_cb_test_rv, 0x80, "bad_cb_test_rv");
ASSERT_EQ(result->good_cb_test_rv, 0, "good_cb_test_rv");
ASSERT_EQ(result->num_listen, 1, "num_listen");
/* 3 comes from one listening socket + both ends of the connection */
ASSERT_EQ(result->num_close_events, 3, "num_close_events");
/* check setsockopt for SAVE_SYN */
ASSERT_EQ(result->tcp_save_syn, 0, "tcp_save_syn");
/* check getsockopt for SAVED_SYN */
ASSERT_EQ(result->tcp_saved_syn, 1, "tcp_saved_syn");
/* check getsockopt for window_clamp */
ASSERT_EQ(result->window_clamp_client, 9216, "window_clamp_client");
ASSERT_EQ(result->window_clamp_server, 9216, "window_clamp_server");
}
static void run_test(struct tcpbpf_globals *result)
{
int listen_fd = -1, cli_fd = -1, accept_fd = -1;
char buf[1000];
int err = -1;
int i, rv;
listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
if (!ASSERT_NEQ(listen_fd, -1, "start_server"))
goto done;
cli_fd = connect_to_fd(listen_fd, 0);
if (!ASSERT_NEQ(cli_fd, -1, "connect_to_fd(listen_fd)"))
goto done;
accept_fd = accept(listen_fd, NULL, NULL);
if (!ASSERT_NEQ(accept_fd, -1, "accept(listen_fd)"))
goto done;
/* Send 1000B of '+'s from cli_fd -> accept_fd */
for (i = 0; i < 1000; i++)
buf[i] = '+';
rv = send(cli_fd, buf, 1000, 0);
if (!ASSERT_EQ(rv, 1000, "send(cli_fd)"))
goto done;
rv = recv(accept_fd, buf, 1000, 0);
if (!ASSERT_EQ(rv, 1000, "recv(accept_fd)"))
goto done;
/* Send 500B of '.'s from accept_fd ->cli_fd */
for (i = 0; i < 500; i++)
buf[i] = '.';
rv = send(accept_fd, buf, 500, 0);
if (!ASSERT_EQ(rv, 500, "send(accept_fd)"))
goto done;
rv = recv(cli_fd, buf, 500, 0);
if (!ASSERT_EQ(rv, 500, "recv(cli_fd)"))
goto done;
/*
* shutdown accept first to guarantee correct ordering for
* bytes_received and bytes_acked when we go to verify the results.
*/
shutdown(accept_fd, SHUT_WR);
err = recv(cli_fd, buf, 1, 0);
if (!ASSERT_OK(err, "recv(cli_fd) for fin"))
goto done;
shutdown(cli_fd, SHUT_WR);
err = recv(accept_fd, buf, 1, 0);
ASSERT_OK(err, "recv(accept_fd) for fin");
done:
if (accept_fd != -1)
close(accept_fd);
if (cli_fd != -1)
close(cli_fd);
if (listen_fd != -1)
close(listen_fd);
if (!err)
verify_result(result);
}
void test_tcpbpf_user(void)
{
struct test_tcpbpf_kern *skel;
int cg_fd = -1;
skel = test_tcpbpf_kern__open_and_load();
if (!ASSERT_OK_PTR(skel, "open and load skel"))
return;
cg_fd = test__join_cgroup(CG_NAME);
if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup(" CG_NAME ")"))
goto err;
skel->links.bpf_testcb = bpf_program__attach_cgroup(skel->progs.bpf_testcb, cg_fd);
if (!ASSERT_OK_PTR(skel->links.bpf_testcb, "attach_cgroup(bpf_testcb)"))
goto err;
run_test(&skel->bss->global);
err:
if (cg_fd != -1)
close(cg_fd);
test_tcpbpf_kern__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Hengqi Chen */
#include <test_progs.h>
#include <bpf/btf.h>
static const char *module_name = "bpf_testmod";
static const char *symbol_name = "bpf_testmod_test_read";
void test_btf_module()
{
struct btf *vmlinux_btf, *module_btf;
__s32 type_id;
if (!env.has_testmod) {
test__skip();
return;
}
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
return;
module_btf = btf__load_module_btf(module_name, vmlinux_btf);
if (!ASSERT_OK_PTR(module_btf, "could not load module BTF"))
goto cleanup;
type_id = btf__find_by_name(module_btf, symbol_name);
ASSERT_GT(type_id, 0, "func not found");
cleanup:
btf__free(module_btf);
btf__free(vmlinux_btf);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_module.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/compiler.h>
#include <asm/barrier.h>
#include <test_progs.h>
#include <sys/mman.h>
#include <sys/epoll.h>
#include <time.h>
#include <sched.h>
#include <signal.h>
#include <pthread.h>
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
#include "test_ringbuf.lskel.h"
#include "test_ringbuf_map_key.lskel.h"
#define EDONE 7777
static int duration = 0;
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
static int sample_cnt;
static void atomic_inc(int *cnt)
{
__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
}
static int atomic_xchg(int *cnt, int val)
{
return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
}
static int process_sample(void *ctx, void *data, size_t len)
{
struct sample *s = data;
atomic_inc(&sample_cnt);
switch (s->seq) {
case 0:
CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
333L, s->value);
return 0;
case 1:
CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
777L, s->value);
return -EDONE;
default:
/* we don't care about the rest */
return 0;
}
}
static struct test_ringbuf_map_key_lskel *skel_map_key;
static struct test_ringbuf_lskel *skel;
static struct ring_buffer *ringbuf;
static void trigger_samples()
{
skel->bss->dropped = 0;
skel->bss->total = 0;
skel->bss->discarded = 0;
/* trigger exactly two samples */
skel->bss->value = 333;
syscall(__NR_getpgid);
skel->bss->value = 777;
syscall(__NR_getpgid);
}
static void *poll_thread(void *input)
{
long timeout = (long)input;
return (void *)(long)ring_buffer__poll(ringbuf, timeout);
}
static void ringbuf_subtest(void)
{
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
pthread_t thread;
long bg_ret = -1;
int err, cnt, rb_fd;
int page_size = getpagesize();
void *mmap_ptr, *tmp_ptr;
skel = test_ringbuf_lskel__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
skel->maps.ringbuf.max_entries = page_size;
err = test_ringbuf_lskel__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
rb_fd = skel->maps.ringbuf.map_fd;
/* good read/write cons_pos */
mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
goto cleanup;
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
/* bad writeable prod_pos */
mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
err = -errno;
ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
/* bad writeable data pages */
mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
err = -errno;
ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
/* good read-only pages */
mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
goto cleanup;
ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
/* good read-only pages with initial offset */
mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
goto cleanup;
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
process_sample, NULL, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
err = test_ringbuf_lskel__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
trigger_samples();
/* 2 submitted + 1 discarded records */
CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->avail_data);
CHECK(skel->bss->ring_size != page_size,
"err_ring_size", "exp %ld, got %ld\n",
(long)page_size, skel->bss->ring_size);
CHECK(skel->bss->cons_pos != 0,
"err_cons_pos", "exp %ld, got %ld\n",
0L, skel->bss->cons_pos);
CHECK(skel->bss->prod_pos != 3 * rec_sz,
"err_prod_pos", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->prod_pos);
/* poll for samples */
err = ring_buffer__poll(ringbuf, -1);
/* -EDONE is used as an indicator that we are done */
if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
goto cleanup;
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
/* we expect extra polling to return nothing */
err = ring_buffer__poll(ringbuf, 0);
if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
goto cleanup;
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0L, skel->bss->dropped);
CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
2L, skel->bss->total);
CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
1L, skel->bss->discarded);
/* now validate consumer position is updated and returned */
trigger_samples();
CHECK(skel->bss->cons_pos != 3 * rec_sz,
"err_cons_pos", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->cons_pos);
err = ring_buffer__poll(ringbuf, -1);
CHECK(err <= 0, "poll_err", "err %d\n", err);
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
/* start poll in background w/ long timeout */
err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
goto cleanup;
/* turn off notifications now */
skel->bss->flags = BPF_RB_NO_WAKEUP;
/* give background thread a bit of a time */
usleep(50000);
trigger_samples();
/* sleeping arbitrarily is bad, but no better way to know that
* epoll_wait() **DID NOT** unblock in background thread
*/
usleep(50000);
/* background poll should still be blocked */
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
goto cleanup;
/* BPF side did everything right */
CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0L, skel->bss->dropped);
CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
2L, skel->bss->total);
CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
1L, skel->bss->discarded);
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
/* clear flags to return to "adaptive" notification mode */
skel->bss->flags = 0;
/* produce new samples, no notification should be triggered, because
* consumer is now behind
*/
trigger_samples();
/* background poll should still be blocked */
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
goto cleanup;
/* still no samples, because consumer is behind */
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
skel->bss->dropped = 0;
skel->bss->total = 0;
skel->bss->discarded = 0;
skel->bss->value = 333;
syscall(__NR_getpgid);
/* now force notifications */
skel->bss->flags = BPF_RB_FORCE_WAKEUP;
skel->bss->value = 777;
syscall(__NR_getpgid);
/* now we should get a pending notification */
usleep(50000);
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
if (CHECK(err, "join_bg", "err %d\n", err))
goto cleanup;
if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
goto cleanup;
/* due to timing variations, there could still be non-notified
* samples, so consume them here to collect all the samples
*/
err = ring_buffer__consume(ringbuf);
CHECK(err < 0, "rb_consume", "failed: %d\b", err);
/* 3 rounds, 2 samples each */
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
/* BPF side did everything right */
CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0L, skel->bss->dropped);
CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
2L, skel->bss->total);
CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
1L, skel->bss->discarded);
test_ringbuf_lskel__detach(skel);
cleanup:
ring_buffer__free(ringbuf);
test_ringbuf_lskel__destroy(skel);
}
static int process_map_key_sample(void *ctx, void *data, size_t len)
{
struct sample *s;
int err, val;
s = data;
switch (s->seq) {
case 1:
ASSERT_EQ(s->value, 42, "sample_value");
err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
s, &val);
ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
ASSERT_EQ(val, 1, "hash_map val");
return -EDONE;
default:
return 0;
}
}
static void ringbuf_map_key_subtest(void)
{
int err;
skel_map_key = test_ringbuf_map_key_lskel__open();
if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
return;
skel_map_key->maps.ringbuf.max_entries = getpagesize();
skel_map_key->bss->pid = getpid();
err = test_ringbuf_map_key_lskel__load(skel_map_key);
if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
goto cleanup;
ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
process_map_key_sample, NULL, NULL);
if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
goto cleanup;
err = test_ringbuf_map_key_lskel__attach(skel_map_key);
if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
goto cleanup_ringbuf;
syscall(__NR_getpgid);
ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
err = ring_buffer__poll(ringbuf, -1);
ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
cleanup_ringbuf:
ring_buffer__free(ringbuf);
cleanup:
test_ringbuf_map_key_lskel__destroy(skel_map_key);
}
void test_ringbuf(void)
{
if (test__start_subtest("ringbuf"))
ringbuf_subtest();
if (test__start_subtest("ringbuf_map_key"))
ringbuf_map_key_subtest();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ringbuf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "bpf_dctcp.skel.h"
#include "bpf_cubic.skel.h"
#include "bpf_iter_setsockopt.skel.h"
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return -1;
if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo"))
return -1;
return 0;
}
static unsigned int set_bpf_cubic(int *fds, unsigned int nr_fds)
{
unsigned int i;
for (i = 0; i < nr_fds; i++) {
if (setsockopt(fds[i], SOL_TCP, TCP_CONGESTION, "bpf_cubic",
sizeof("bpf_cubic")))
return i;
}
return nr_fds;
}
static unsigned int check_bpf_dctcp(int *fds, unsigned int nr_fds)
{
char tcp_cc[16];
socklen_t optlen = sizeof(tcp_cc);
unsigned int i;
for (i = 0; i < nr_fds; i++) {
if (getsockopt(fds[i], SOL_TCP, TCP_CONGESTION,
tcp_cc, &optlen) ||
strcmp(tcp_cc, "bpf_dctcp"))
return i;
}
return nr_fds;
}
static int *make_established(int listen_fd, unsigned int nr_est,
int **paccepted_fds)
{
int *est_fds, *accepted_fds;
unsigned int i;
est_fds = malloc(sizeof(*est_fds) * nr_est);
if (!est_fds)
return NULL;
accepted_fds = malloc(sizeof(*accepted_fds) * nr_est);
if (!accepted_fds) {
free(est_fds);
return NULL;
}
for (i = 0; i < nr_est; i++) {
est_fds[i] = connect_to_fd(listen_fd, 0);
if (est_fds[i] == -1)
break;
if (set_bpf_cubic(&est_fds[i], 1) != 1) {
close(est_fds[i]);
break;
}
accepted_fds[i] = accept(listen_fd, NULL, 0);
if (accepted_fds[i] == -1) {
close(est_fds[i]);
break;
}
}
if (!ASSERT_EQ(i, nr_est, "create established fds")) {
free_fds(accepted_fds, i);
free_fds(est_fds, i);
return NULL;
}
*paccepted_fds = accepted_fds;
return est_fds;
}
static unsigned short get_local_port(int fd)
{
struct sockaddr_in6 addr;
socklen_t addrlen = sizeof(addr);
if (!getsockname(fd, &addr, &addrlen))
return ntohs(addr.sin6_port);
return 0;
}
static void do_bpf_iter_setsockopt(struct bpf_iter_setsockopt *iter_skel,
bool random_retry)
{
int *reuse_listen_fds = NULL, *accepted_fds = NULL, *est_fds = NULL;
unsigned int nr_reuse_listens = 256, nr_est = 256;
int err, iter_fd = -1, listen_fd = -1;
char buf;
/* Prepare non-reuseport listen_fd */
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (!ASSERT_GE(listen_fd, 0, "start_server"))
return;
if (!ASSERT_EQ(set_bpf_cubic(&listen_fd, 1), 1,
"set listen_fd to cubic"))
goto done;
iter_skel->bss->listen_hport = get_local_port(listen_fd);
if (!ASSERT_NEQ(iter_skel->bss->listen_hport, 0,
"get_local_port(listen_fd)"))
goto done;
/* Connect to non-reuseport listen_fd */
est_fds = make_established(listen_fd, nr_est, &accepted_fds);
if (!ASSERT_OK_PTR(est_fds, "create established"))
goto done;
/* Prepare reuseport listen fds */
reuse_listen_fds = start_reuseport_server(AF_INET6, SOCK_STREAM,
"::1", 0, 0,
nr_reuse_listens);
if (!ASSERT_OK_PTR(reuse_listen_fds, "start_reuseport_server"))
goto done;
if (!ASSERT_EQ(set_bpf_cubic(reuse_listen_fds, nr_reuse_listens),
nr_reuse_listens, "set reuse_listen_fds to cubic"))
goto done;
iter_skel->bss->reuse_listen_hport = get_local_port(reuse_listen_fds[0]);
if (!ASSERT_NEQ(iter_skel->bss->reuse_listen_hport, 0,
"get_local_port(reuse_listen_fds[0])"))
goto done;
/* Run bpf tcp iter to switch from bpf_cubic to bpf_dctcp */
iter_skel->bss->random_retry = random_retry;
iter_fd = bpf_iter_create(bpf_link__fd(iter_skel->links.change_tcp_cc));
if (!ASSERT_GE(iter_fd, 0, "create iter_fd"))
goto done;
while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 &&
errno == EAGAIN)
;
if (!ASSERT_OK(err, "read iter error"))
goto done;
/* Check reuseport listen fds for dctcp */
ASSERT_EQ(check_bpf_dctcp(reuse_listen_fds, nr_reuse_listens),
nr_reuse_listens,
"check reuse_listen_fds dctcp");
/* Check non reuseport listen fd for dctcp */
ASSERT_EQ(check_bpf_dctcp(&listen_fd, 1), 1,
"check listen_fd dctcp");
/* Check established fds for dctcp */
ASSERT_EQ(check_bpf_dctcp(est_fds, nr_est), nr_est,
"check est_fds dctcp");
/* Check accepted fds for dctcp */
ASSERT_EQ(check_bpf_dctcp(accepted_fds, nr_est), nr_est,
"check accepted_fds dctcp");
done:
if (iter_fd != -1)
close(iter_fd);
if (listen_fd != -1)
close(listen_fd);
free_fds(reuse_listen_fds, nr_reuse_listens);
free_fds(accepted_fds, nr_est);
free_fds(est_fds, nr_est);
}
void serial_test_bpf_iter_setsockopt(void)
{
struct bpf_iter_setsockopt *iter_skel = NULL;
struct bpf_cubic *cubic_skel = NULL;
struct bpf_dctcp *dctcp_skel = NULL;
struct bpf_link *cubic_link = NULL;
struct bpf_link *dctcp_link = NULL;
if (create_netns())
return;
/* Load iter_skel */
iter_skel = bpf_iter_setsockopt__open_and_load();
if (!ASSERT_OK_PTR(iter_skel, "iter_skel"))
return;
iter_skel->links.change_tcp_cc = bpf_program__attach_iter(iter_skel->progs.change_tcp_cc, NULL);
if (!ASSERT_OK_PTR(iter_skel->links.change_tcp_cc, "attach iter"))
goto done;
/* Load bpf_cubic */
cubic_skel = bpf_cubic__open_and_load();
if (!ASSERT_OK_PTR(cubic_skel, "cubic_skel"))
goto done;
cubic_link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
if (!ASSERT_OK_PTR(cubic_link, "cubic_link"))
goto done;
/* Load bpf_dctcp */
dctcp_skel = bpf_dctcp__open_and_load();
if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
goto done;
dctcp_link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
if (!ASSERT_OK_PTR(dctcp_link, "dctcp_link"))
goto done;
do_bpf_iter_setsockopt(iter_skel, true);
do_bpf_iter_setsockopt(iter_skel, false);
done:
bpf_link__destroy(cubic_link);
bpf_link__destroy(dctcp_link);
bpf_cubic__destroy(cubic_skel);
bpf_dctcp__destroy(dctcp_skel);
bpf_iter_setsockopt__destroy(iter_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/filter.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
#include <libelf.h>
#include <gelf.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <assert.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "bpf_util.h"
#include "../test_btf.h"
#include "test_progs.h"
#define MAX_INSNS 512
#define MAX_SUBPROGS 16
static int duration = 0;
static bool always_log;
#undef CHECK
#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
#define NAME_TBD 0xdeadb33f
#define NAME_NTH(N) (0xfffe0000 | N)
#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000)
#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
static char btf_log_buf[BTF_LOG_BUF_SIZE];
static struct btf_header hdr_tmpl = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
};
/* several different mapv kinds(types) supported by pprint */
enum pprint_mapv_kind_t {
PPRINT_MAPV_KIND_BASIC = 0,
PPRINT_MAPV_KIND_INT128,
};
struct btf_raw_test {
const char *descr;
const char *str_sec;
const char *map_name;
const char *err_str;
__u32 raw_types[MAX_NR_RAW_U32];
__u32 str_sec_size;
enum bpf_map_type map_type;
__u32 key_size;
__u32 value_size;
__u32 key_type_id;
__u32 value_type_id;
__u32 max_entries;
bool btf_load_err;
bool map_create_err;
bool ordered_map;
bool lossless_map;
bool percpu_map;
int hdr_len_delta;
int type_off_delta;
int str_off_delta;
int str_len_delta;
enum pprint_mapv_kind_t mapv_kind;
};
#define BTF_STR_SEC(str) \
.str_sec = str, .str_sec_size = sizeof(str)
static struct btf_raw_test raw_tests[] = {
/* enum E {
* E0,
* E1,
* };
*
* struct A {
* unsigned long long m;
* int n;
* char o;
* [3 bytes hole]
* int p[8];
* int q[4][8];
* enum E r;
* };
*/
{
.descr = "struct test #1",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 6), 180),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
BTF_MEMBER_ENC(NAME_TBD, 6, 384),/* int q[4][8] */
BTF_MEMBER_ENC(NAME_TBD, 7, 1408), /* enum E r */
/* } */
/* int[4][8] */
BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [6] */
/* enum E */ /* [7] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), sizeof(int)),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0q\0r\0E\0E0\0E1",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0q\0r\0E\0E0\0E1"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_test1_map",
.key_size = sizeof(int),
.value_size = 180,
.key_type_id = 1,
.value_type_id = 5,
.max_entries = 4,
},
/* typedef struct b Struct_B;
*
* struct A {
* int m;
* struct b n[4];
* const Struct_B o[4];
* };
*
* struct B {
* int m;
* int n;
* };
*/
{
.descr = "struct test #2",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* struct b [4] */ /* [2] */
BTF_TYPE_ARRAY_ENC(4, 1, 4),
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 3), 68),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct B n[4] */
BTF_MEMBER_ENC(NAME_TBD, 8, 288),/* const Struct_B o[4];*/
/* } */
/* struct B { */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 1, 32),/* int n; */
/* } */
/* const int */ /* [5] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1),
/* typedef struct b Struct_B */ /* [6] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 4),
/* const Struct_B */ /* [7] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 6),
/* const Struct_B [4] */ /* [8] */
BTF_TYPE_ARRAY_ENC(7, 1, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0B\0m\0n\0Struct_B",
.str_sec_size = sizeof("\0A\0m\0n\0o\0B\0m\0n\0Struct_B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_test2_map",
.key_size = sizeof(int),
.value_size = 68,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
},
{
.descr = "struct test #3 Invalid member offset",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int64 */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16),
BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0",
.str_sec_size = sizeof("\0A\0m\0n\0"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_test3_map",
.key_size = sizeof(int),
.value_size = 16,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member bits_offset",
},
/*
* struct A {
* unsigned long long m;
* int n;
* char o;
* [3 bytes hole]
* int p[8];
* };
*/
{
.descr = "global data test #1",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_test1_map",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 1,
.value_type_id = 5,
.max_entries = 4,
},
/*
* struct A {
* unsigned long long m;
* int n;
* char o;
* [3 bytes hole]
* int p[8];
* };
* static struct A t; <- in .bss
*/
{
.descr = "global data test #2",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* .bss section */ /* [7] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
BTF_VAR_SECINFO_ENC(6, 0, 48),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 0,
.value_type_id = 7,
.max_entries = 1,
},
{
.descr = "global data test #3",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* static int t */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0t\0.bss",
.str_sec_size = sizeof("\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 3,
.max_entries = 1,
},
{
.descr = "global data test #4, unsupported linkage",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* static int t */
BTF_VAR_ENC(NAME_TBD, 1, 2), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0t\0.bss",
.str_sec_size = sizeof("\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 3,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Linkage not supported",
},
{
.descr = "global data test #5, invalid var type",
.raw_types = {
/* static void t */
BTF_VAR_ENC(NAME_TBD, 0, 0), /* [1] */
/* .bss section */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(1, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0t\0.bss",
.str_sec_size = sizeof("\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #6, invalid var type (fwd type)",
.raw_types = {
/* union A */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
/* static union A t */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0t\0.bss",
.str_sec_size = sizeof("\0A\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type",
},
{
.descr = "global data test #7, invalid var type (fwd type)",
.raw_types = {
/* union A */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
/* static union A t */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(1, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0t\0.bss",
.str_sec_size = sizeof("\0A\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type",
},
{
.descr = "global data test #8, invalid var size",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* .bss section */ /* [7] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
BTF_VAR_SECINFO_ENC(6, 0, 47),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 0,
.value_type_id = 7,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid size",
},
{
.descr = "global data test #9, invalid var size",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* .bss section */ /* [7] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
BTF_VAR_SECINFO_ENC(6, 0, 48),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 0,
.value_type_id = 7,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid size",
},
{
.descr = "global data test #10, invalid var size",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* .bss section */ /* [7] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
BTF_VAR_SECINFO_ENC(6, 0, 46),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 0,
.value_type_id = 7,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid size",
},
{
.descr = "global data test #11, multiple section members",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* static int u */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
/* .bss section */ /* [8] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
BTF_VAR_SECINFO_ENC(6, 10, 48),
BTF_VAR_SECINFO_ENC(7, 58, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 62,
.key_type_id = 0,
.value_type_id = 8,
.max_entries = 1,
},
{
.descr = "global data test #12, invalid offset",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* static int u */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
/* .bss section */ /* [8] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
BTF_VAR_SECINFO_ENC(6, 10, 48),
BTF_VAR_SECINFO_ENC(7, 60, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 62,
.key_type_id = 0,
.value_type_id = 8,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid offset+size",
},
{
.descr = "global data test #13, invalid offset",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* static int u */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
/* .bss section */ /* [8] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
BTF_VAR_SECINFO_ENC(6, 10, 48),
BTF_VAR_SECINFO_ENC(7, 12, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 62,
.key_type_id = 0,
.value_type_id = 8,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid offset",
},
{
.descr = "global data test #14, invalid offset",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* unsigned long long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
/* char */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
/* int[8] */
BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
/* } */
/* static struct A t */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
/* static int u */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
/* .bss section */ /* [8] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
BTF_VAR_SECINFO_ENC(7, 58, 4),
BTF_VAR_SECINFO_ENC(6, 10, 48),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
.str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 62,
.key_type_id = 0,
.value_type_id = 8,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid offset",
},
{
.descr = "global data test #15, not var kind",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(1, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0t\0.bss",
.str_sec_size = sizeof("\0A\0t\0.bss"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 3,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Not a VAR kind member",
},
{
.descr = "global data test #16, invalid var referencing sec",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [2] */
BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */
/* a section */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(3, 0, 4),
/* a section */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(6, 0, 4),
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #17, invalid var referencing var",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */
/* a section */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(3, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #18, invalid var loop",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 2, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0t\0aaa",
.str_sec_size = sizeof("\0A\0t\0aaa"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #19, invalid var referencing var",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 3, 0), /* [2] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #20, invalid ptr referencing var",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* PTR type_id=3 */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "global data test #21, var included in struct",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* struct A { */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* VAR type_id=3; */
/* } */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid member",
},
{
.descr = "global data test #22, array of var",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ARRAY_ENC(3, 1, 4), /* [2] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0A\0t\0s\0a\0a",
.str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 0,
.value_type_id = 4,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid elem",
},
{
.descr = "var after datasec, ptr followed by modifier",
.raw_types = {
/* .bss section */ /* [1] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
sizeof(void*)+4),
BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
/* int */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
BTF_VAR_ENC(NAME_TBD, 3, 0), /* [4] */
/* const int */ /* [5] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0a\0b\0c\0",
.str_sec_size = sizeof("\0a\0b\0c\0"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = ".bss",
.key_size = sizeof(int),
.value_size = sizeof(void*)+4,
.key_type_id = 0,
.value_type_id = 1,
.max_entries = 1,
},
/* Test member exceeds the size of struct.
*
* struct A {
* int m;
* int n;
* };
*/
{
.descr = "size check test #1",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* struct A { */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2 - 1),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 1, 32),/* int n; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n",
.str_sec_size = sizeof("\0A\0m\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "size_check1_map",
.key_size = sizeof(int),
.value_size = 1,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
/* Test member exceeds the size of struct
*
* struct A {
* int m;
* int n[2];
* };
*/
{
.descr = "size check test #2",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
/* int[2] */ /* [2] */
BTF_TYPE_ARRAY_ENC(1, 1, 2),
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 3 - 1),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* int n[2]; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n",
.str_sec_size = sizeof("\0A\0m\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "size_check2_map",
.key_size = sizeof(int),
.value_size = 1,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
/* Test member exceeds the size of struct
*
* struct A {
* int m;
* void *n;
* };
*/
{
.descr = "size check test #3",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
/* void* */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) + sizeof(void *) - 1),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* void *n; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m\0n",
.str_sec_size = sizeof("\0A\0m\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "size_check3_map",
.key_size = sizeof(int),
.value_size = 1,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
/* Test member exceeds the size of struct
*
* enum E {
* E0,
* E1,
* };
*
* struct A {
* int m;
* enum E n;
* };
*/
{
.descr = "size check test #4",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
/* enum E { */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), sizeof(int)),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
/* } */
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2 - 1),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* enum E n; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0E\0E0\0E1\0A\0m\0n",
.str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "size_check4_map",
.key_size = sizeof(int),
.value_size = 1,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
/* Test member unexceeds the size of struct
*
* enum E {
* E0,
* E1,
* };
*
* struct A {
* char m;
* enum E __attribute__((packed)) n;
* };
*/
{
.descr = "size check test #5",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
/* char */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),
/* enum E { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
/* } */
/* struct A { */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2),
BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* char m; */
BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */
/* } */
BTF_END_RAW,
},
.str_sec = "\0E\0E0\0E1\0A\0m\0n",
.str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "size_check5_map",
.key_size = sizeof(int),
.value_size = 2,
.key_type_id = 1,
.value_type_id = 4,
.max_entries = 4,
},
/* typedef const void * const_void_ptr;
* struct A {
* const_void_ptr m;
* };
*/
{
.descr = "void test #1",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
/* typedef const void * const_void_ptr */
BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const_void_ptr m; */
BTF_MEMBER_ENC(NAME_TBD, 4, 0),
/* } */
BTF_END_RAW,
},
.str_sec = "\0const_void_ptr\0A\0m",
.str_sec_size = sizeof("\0const_void_ptr\0A\0m"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "void_test1_map",
.key_size = sizeof(int),
.value_size = sizeof(void *),
.key_type_id = 1,
.value_type_id = 4,
.max_entries = 4,
},
/* struct A {
* const void m;
* };
*/
{
.descr = "void test #2",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* struct A { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 8),
/* const void m; */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
/* } */
BTF_END_RAW,
},
.str_sec = "\0A\0m",
.str_sec_size = sizeof("\0A\0m"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "void_test2_map",
.key_size = sizeof(int),
.value_size = sizeof(void *),
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member",
},
/* typedef const void * const_void_ptr;
* const_void_ptr[4]
*/
{
.descr = "void test #3",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
/* typedef const void * const_void_ptr */
BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
/* const_void_ptr[4] */
BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */
BTF_END_RAW,
},
.str_sec = "\0const_void_ptr",
.str_sec_size = sizeof("\0const_void_ptr"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "void_test3_map",
.key_size = sizeof(int),
.value_size = sizeof(void *) * 4,
.key_type_id = 1,
.value_type_id = 5,
.max_entries = 4,
},
/* const void[4] */
{
.descr = "void test #4",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* const void[4] */ /* [3] */
BTF_TYPE_ARRAY_ENC(2, 1, 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m",
.str_sec_size = sizeof("\0A\0m"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "void_test4_map",
.key_size = sizeof(int),
.value_size = sizeof(void *) * 4,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid elem",
},
/* Array_A <------------------+
* elem_type == Array_B |
* | |
* | |
* Array_B <-------- + |
* elem_type == Array A --+
*/
{
.descr = "loop test #1",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* Array_A */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 1, 8),
/* Array_B */ /* [3] */
BTF_TYPE_ARRAY_ENC(2, 1, 8),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test1_map",
.key_size = sizeof(int),
.value_size = sizeof(sizeof(int) * 8),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
/* typedef is _before_ the BTF type of Array_A and Array_B
*
* typedef Array_B int_array;
*
* Array_A <------------------+
* elem_type == int_array |
* | |
* | |
* Array_B <-------- + |
* elem_type == Array_A --+
*/
{
.descr = "loop test #2",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* typedef Array_B int_array */
BTF_TYPEDEF_ENC(1, 4), /* [2] */
/* Array_A */
BTF_TYPE_ARRAY_ENC(2, 1, 8), /* [3] */
/* Array_B */
BTF_TYPE_ARRAY_ENC(3, 1, 8), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0int_array\0",
.str_sec_size = sizeof("\0int_array"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test2_map",
.key_size = sizeof(int),
.value_size = sizeof(sizeof(int) * 8),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
/* Array_A <------------------+
* elem_type == Array_B |
* | |
* | |
* Array_B <-------- + |
* elem_type == Array_A --+
*/
{
.descr = "loop test #3",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* Array_A */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 1, 8),
/* Array_B */ /* [3] */
BTF_TYPE_ARRAY_ENC(2, 1, 8),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test3_map",
.key_size = sizeof(int),
.value_size = sizeof(sizeof(int) * 8),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
/* typedef is _between_ the BTF type of Array_A and Array_B
*
* typedef Array_B int_array;
*
* Array_A <------------------+
* elem_type == int_array |
* | |
* | |
* Array_B <-------- + |
* elem_type == Array_A --+
*/
{
.descr = "loop test #4",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* Array_A */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 1, 8),
/* typedef Array_B int_array */ /* [3] */
BTF_TYPEDEF_ENC(NAME_TBD, 4),
/* Array_B */ /* [4] */
BTF_TYPE_ARRAY_ENC(2, 1, 8),
BTF_END_RAW,
},
.str_sec = "\0int_array\0",
.str_sec_size = sizeof("\0int_array"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test4_map",
.key_size = sizeof(int),
.value_size = sizeof(sizeof(int) * 8),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
/* typedef struct B Struct_B
*
* struct A {
* int x;
* Struct_B y;
* };
*
* struct B {
* int x;
* struct A y;
* };
*/
{
.descr = "loop test #5",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* struct A */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */
BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* Struct_B y; */
/* typedef struct B Struct_B */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
/* struct B */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct A y; */
BTF_END_RAW,
},
.str_sec = "\0A\0x\0y\0Struct_B\0B\0x\0y",
.str_sec_size = sizeof("\0A\0x\0y\0Struct_B\0B\0x\0y"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test5_map",
.key_size = sizeof(int),
.value_size = 8,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
/* struct A {
* int x;
* struct A array_a[4];
* };
*/
{
.descr = "loop test #6",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ARRAY_ENC(3, 1, 4), /* [2] */
/* struct A */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */
BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct A array_a[4]; */
BTF_END_RAW,
},
.str_sec = "\0A\0x\0y",
.str_sec_size = sizeof("\0A\0x\0y"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test6_map",
.key_size = sizeof(int),
.value_size = 8,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
{
.descr = "loop test #7",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* struct A { */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const void *m; */
BTF_MEMBER_ENC(NAME_TBD, 3, 0),
/* CONST type_id=3 */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4),
/* PTR type_id=2 */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
BTF_END_RAW,
},
.str_sec = "\0A\0m",
.str_sec_size = sizeof("\0A\0m"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test7_map",
.key_size = sizeof(int),
.value_size = sizeof(void *),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
{
.descr = "loop test #8",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* struct A { */ /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const void *m; */
BTF_MEMBER_ENC(NAME_TBD, 4, 0),
/* struct B { */ /* [3] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const void *n; */
BTF_MEMBER_ENC(NAME_TBD, 6, 0),
/* CONST type_id=5 */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 5),
/* PTR type_id=6 */ /* [5] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 6),
/* CONST type_id=7 */ /* [6] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 7),
/* PTR type_id=4 */ /* [7] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 4),
BTF_END_RAW,
},
.str_sec = "\0A\0m\0B\0n",
.str_sec_size = sizeof("\0A\0m\0B\0n"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "loop_test8_map",
.key_size = sizeof(int),
.value_size = sizeof(void *),
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Loop detected",
},
{
.descr = "string section does not end with null",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int") - 1,
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid string section",
},
{
.descr = "empty string section",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = 0,
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid string section",
},
{
.descr = "empty type section",
.raw_types = {
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "No type found",
},
{
.descr = "btf_header test. Longer hdr_len",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.hdr_len_delta = 4,
.err_str = "Unsupported btf_header",
},
{
.descr = "btf_header test. Gap between hdr and type",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.type_off_delta = 4,
.err_str = "Unsupported section found",
},
{
.descr = "btf_header test. Gap between type and str",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.str_off_delta = 4,
.err_str = "Unsupported section found",
},
{
.descr = "btf_header test. Overlap between type and str",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.str_off_delta = -4,
.err_str = "Section overlap found",
},
{
.descr = "btf_header test. Larger BTF size",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.str_len_delta = -4,
.err_str = "Unsupported section found",
},
{
.descr = "btf_header test. Smaller BTF size",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "\0int",
.str_sec_size = sizeof("\0int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "hdr_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.str_len_delta = 4,
.err_str = "Total section length too long",
},
{
.descr = "array test. index_type/elem_type \"int\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(1, 1, 16),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "array test. index_type/elem_type \"const int\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 3, 16),
/* CONST type_id=1 */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "array test. index_type \"const int:31\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int:31 */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 31, 4),
/* int[16] */ /* [3] */
BTF_TYPE_ARRAY_ENC(1, 4, 16),
/* CONST type_id=2 */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid index",
},
{
.descr = "array test. elem_type \"const int:31\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int:31 */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 31, 4),
/* int[16] */ /* [3] */
BTF_TYPE_ARRAY_ENC(4, 1, 16),
/* CONST type_id=2 */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid array of int",
},
{
.descr = "array test. index_type \"void\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(1, 0, 16),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid index",
},
{
.descr = "array test. index_type \"const void\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(1, 3, 16),
/* CONST type_id=0 (void) */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid index",
},
{
.descr = "array test. elem_type \"const void\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 1, 16),
/* CONST type_id=0 (void) */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid elem",
},
{
.descr = "array test. elem_type \"const void *\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void *[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 1, 16),
/* CONST type_id=4 */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4),
/* void* */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "array test. index_type \"const void *\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* const void *[16] */ /* [2] */
BTF_TYPE_ARRAY_ENC(3, 3, 16),
/* CONST type_id=4 */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4),
/* void* */ /* [4] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid index",
},
{
.descr = "array test. t->size != 0\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* int[16] */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 1),
BTF_ARRAY_ENC(1, 1, 16),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "size != 0",
},
{
.descr = "int test. invalid int_data",
.raw_types = {
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), 4),
0x10000000,
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid int_data",
},
{
.descr = "invalid BTF_INFO",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_ENC(0, 0x20000000, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info",
},
{
.descr = "fwd test. t->type != 0\"",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* fwd type */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 1),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_test_map",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "type != 0",
},
{
.descr = "typedef (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(0, 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "typedef (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__!int",
.str_sec_size = sizeof("\0__!int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "ptr type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "ptr_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "volatile type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "volatile_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "const type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "const_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "restrict type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "restrict_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "fwd type (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__skb",
.str_sec_size = sizeof("\0__skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "fwd type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__!skb",
.str_sec_size = sizeof("\0__!skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "array type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */
BTF_ARRAY_ENC(1, 1, 4),
BTF_END_RAW,
},
.str_sec = "\0__skb",
.str_sec_size = sizeof("\0__skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "struct type (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A",
.str_sec_size = sizeof("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A!\0B",
.str_sec_size = sizeof("\0A!\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "struct member (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A",
.str_sec_size = sizeof("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct member (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A\0B*",
.str_sec_size = sizeof("\0A\0B*"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum type (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A\0B",
.str_sec_size = sizeof("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "enum type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A!\0B",
.str_sec_size = sizeof("\0A!\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum member (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(0, 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum member (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A!",
.str_sec_size = sizeof("\0A!"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "arraymap invalid btf key (a bit field)",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* 32 bit int with 32 bit offset */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_map_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 2,
.value_type_id = 1,
.max_entries = 4,
.map_create_err = true,
},
{
.descr = "arraymap invalid btf key (!= 32 bits)",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* 16 bit int with 0 bit offset */ /* [2] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_map_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 2,
.value_type_id = 1,
.max_entries = 4,
.map_create_err = true,
},
{
.descr = "arraymap invalid btf value (too small)",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_map_check_btf",
.key_size = sizeof(int),
/* btf_value_size < map->value_size */
.value_size = sizeof(__u64),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.map_create_err = true,
},
{
.descr = "arraymap invalid btf value (too big)",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_map_check_btf",
.key_size = sizeof(int),
/* btf_value_size > map->value_size */
.value_size = sizeof(__u16),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.map_create_err = true,
},
{
.descr = "func proto (int (*)(int, unsigned int))",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* int (*)(int, unsigned int) */
BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (vararg)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int, unsigned int, ...) */
BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_FUNC_PROTO_ARG_ENC(0, 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (vararg with name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int b, ... c) */
BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0a\0b\0c",
.str_sec_size = sizeof("\0a\0b\0c"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#3",
},
{
.descr = "func proto (arg after vararg)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, ..., unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 0),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_END_RAW,
},
.str_sec = "\0a\0b",
.str_sec_size = sizeof("\0a\0b"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#2",
},
{
.descr = "func proto (CONST=>TYPEDEF=>PTR=>FUNC_PROTO)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* typedef void (*func_ptr)(int, unsigned int) */
BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [3] */
/* const func_ptr */
BTF_CONST_ENC(3), /* [4] */
BTF_PTR_ENC(6), /* [5] */
BTF_FUNC_PROTO_ENC(0, 2), /* [6] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
},
.str_sec = "\0func_ptr",
.str_sec_size = sizeof("\0func_ptr"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (TYPEDEF=>FUNC_PROTO)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
},
.str_sec = "\0func_typedef",
.str_sec_size = sizeof("\0func_typedef"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (btf_resolve(arg))",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* void (*)(const void *) */
BTF_FUNC_PROTO_ENC(0, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(0, 3),
BTF_CONST_ENC(4), /* [3] */
BTF_PTR_ENC(0), /* [4] */
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (Not all arg has name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int, unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_END_RAW,
},
.str_sec = "\0b",
.str_sec_size = sizeof("\0b"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func proto (Bad arg name_off)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int <bad_name_off>) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2),
BTF_END_RAW,
},
.str_sec = "\0a",
.str_sec_size = sizeof("\0a"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#2",
},
{
.descr = "func proto (Bad arg name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int !!!) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_END_RAW,
},
.str_sec = "\0a\0!!!",
.str_sec_size = sizeof("\0a\0!!!"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#2",
},
{
.descr = "func proto (Invalid return type)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* <bad_ret_type> (*)(int, unsigned int) */
BTF_FUNC_PROTO_ENC(100, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid return type",
},
{
.descr = "func proto (with func name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void func_proto(int, unsigned int) */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 2), 0), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
},
.str_sec = "\0func_proto",
.str_sec_size = sizeof("\0func_proto"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "func proto (const void arg)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(const void) */
BTF_FUNC_PROTO_ENC(0, 1), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(0, 4),
BTF_CONST_ENC(0), /* [4] */
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#1",
},
{
.descr = "func (void func(int a, unsigned int b))",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
/* void func(int a, unsigned int b) */
BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0a\0b\0func",
.str_sec_size = sizeof("\0a\0b\0func"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "func (No func name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
/* void <no_name>(int a, unsigned int b) */
BTF_FUNC_ENC(0, 3), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0a\0b",
.str_sec_size = sizeof("\0a\0b"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "func (Invalid func name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
/* void !!!(int a, unsigned int b) */
BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0a\0b\0!!!",
.str_sec_size = sizeof("\0a\0b\0!!!"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "func (Some arg has no name)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
/* void func(int a, unsigned int) */
BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0a\0func",
.str_sec_size = sizeof("\0a\0func"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid arg#2",
},
{
.descr = "func (Non zero vlen)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
/* void (*)(int a, unsigned int b) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
/* void func(int a, unsigned int b) */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 2), 3), /* [4] */
BTF_END_RAW,
},
.str_sec = "\0a\0b\0func",
.str_sec_size = sizeof("\0a\0b\0func"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid func linkage",
},
{
.descr = "func (Not referring to FUNC_PROTO)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_ENC(NAME_TBD, 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0func",
.str_sec_size = sizeof("\0func"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "invalid int kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 1, 0), 4), /* [2] */
BTF_INT_ENC(0, 0, 32),
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "int_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid ptr kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 1, 0), 1), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "ptr_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid array kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 1, 0), 0), /* [2] */
BTF_ARRAY_ENC(1, 1, 1),
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "valid fwd kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "invalid typedef kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_TYPEDEF, 1, 0), 1), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid volatile kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 1, 0), 1), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "volatile_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid const kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 1, 0), 1), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "const_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid restrict kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 1, 0), 1), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "restrict_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid func kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 0), 0), /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 1, 0), 2), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "invalid func_proto kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 1, 0), 0), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "func_proto_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "valid struct, kind_flag, bitfield_size = 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 8), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 0)),
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 32)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid struct, kind_flag, int member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 4)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid union, kind_flag, int member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "union_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid struct, kind_flag, enum member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 4)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid union, kind_flag, enum member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "union_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid struct, kind_flag, typedef member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 4)),
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "valid union, kind_flag, typedef member, bitfield_size != 0",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 0)),
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "union_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "invalid struct, kind_flag, bitfield_size greater than struct size",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 20)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
{
.descr = "invalid struct, kind_flag, bitfield base_type int not regular",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 20, 4), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 20)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member base type",
},
{
.descr = "invalid struct, kind_flag, base_type int not regular",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 12, 4), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 8)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member base type",
},
{
.descr = "invalid union, kind_flag, bitfield_size greater than struct size",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 2), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(8, 0)),
BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "union_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
{
.descr = "invalid struct, kind_flag, int member, bitfield_size = 0, wrong byte alignment",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member offset",
},
{
.descr = "invalid struct, kind_flag, enum member, bitfield_size = 0, wrong byte alignment",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid member offset",
},
{
.descr = "128-bit int",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "int_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct, 128-bit int member",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct, 120-bit int member bitfield",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct, kind_flag, 128-bit int member",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct, kind_flag, 120-bit int member bitfield",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
/*
* typedef int arr_t[16];
* struct s {
* arr_t *a;
* };
*/
{
.descr = "struct->ptr->typedef->array->int size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 5, 16), /* [4] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "ptr_mod_chain_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16,
.key_type_id = 5 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
/*
* typedef int arr_t[16][8][4];
* struct s {
* arr_t *a;
* };
*/
{
.descr = "struct->ptr->typedef->multi-array->int size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 7, 16), /* [4] */
BTF_TYPE_ARRAY_ENC(6, 7, 8), /* [5] */
BTF_TYPE_ARRAY_ENC(7, 7, 4), /* [6] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "multi_arr_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16 * 8 * 4,
.key_type_id = 7 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
/*
* typedef int int_t;
* typedef int_t arr3_t[4];
* typedef arr3_t arr2_t[8];
* typedef arr2_t arr1_t[16];
* struct s {
* arr1_t *a;
* };
*/
{
.descr = "typedef/multi-arr mix size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 10, 16), /* [4] */
BTF_TYPEDEF_ENC(NAME_TBD, 6), /* [5] */
BTF_TYPE_ARRAY_ENC(7, 10, 8), /* [6] */
BTF_TYPEDEF_ENC(NAME_TBD, 8), /* [7] */
BTF_TYPE_ARRAY_ENC(9, 10, 4), /* [8] */
BTF_TYPEDEF_ENC(NAME_TBD, 10), /* [9] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [10] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr1_t\0arr2_t\0arr3_t\0int_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_arra_mix_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16 * 8 * 4,
.key_type_id = 10 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
/*
* elf .rodata section size 4 and btf .rodata section vlen 0.
*/
{
.descr = "datasec: vlen == 0",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* .rodata section */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 0), 4),
/* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0.rodata"),
.map_type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "float test #1, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [2] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 8), /* [4] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 12), /* [5] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 16), /* [6] */
BTF_STRUCT_ENC(NAME_TBD, 5, 48), /* [7] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_MEMBER_ENC(NAME_TBD, 3, 32),
BTF_MEMBER_ENC(NAME_TBD, 4, 64),
BTF_MEMBER_ENC(NAME_TBD, 5, 128),
BTF_MEMBER_ENC(NAME_TBD, 6, 256),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0_Float16\0float\0double\0_Float80\0long_double"
"\0floats\0a\0b\0c\0d\0e"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 48,
.key_type_id = 1,
.value_type_id = 7,
.max_entries = 1,
},
{
.descr = "float test #2, invalid vlen",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 1), 4),
/* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0float"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "vlen != 0",
},
{
.descr = "float test #3, invalid kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 1, 0), 4),
/* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0float"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "float test #4, member does not fit",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
BTF_STRUCT_ENC(NAME_TBD, 1, 2), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0float\0floats\0x"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Member exceeds struct_size",
},
{
.descr = "float test #5, member is not properly aligned",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] */
BTF_MEMBER_ENC(NAME_TBD, 2, 8),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0float\0floats\0x"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Member is not properly aligned",
},
{
.descr = "float test #6, invalid size",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* [1] */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 6), /* [2] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0float"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "float_type_check_btf",
.key_size = sizeof(int),
.value_size = 6,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_size",
},
{
.descr = "decl_tag test #1, struct/member, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
BTF_DECL_TAG_ENC(NAME_TBD, 2, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 8,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
},
{
.descr = "decl_tag test #2, union/member, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
BTF_DECL_TAG_ENC(NAME_TBD, 2, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
},
{
.descr = "decl_tag test #3, variable, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
BTF_DECL_TAG_ENC(NAME_TBD, 3, -1),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0global\0tag1\0tag2"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "decl_tag test #4, func/parameter, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_DECL_TAG_ENC(NAME_TBD, 3, -1),
BTF_DECL_TAG_ENC(NAME_TBD, 3, 0),
BTF_DECL_TAG_ENC(NAME_TBD, 3, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "decl_tag test #5, invalid value",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_DECL_TAG_ENC(0, 2, -1),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid value",
},
{
.descr = "decl_tag test #6, invalid target type",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_DECL_TAG_ENC(NAME_TBD, 1, -1),
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type",
},
{
.descr = "decl_tag test #7, invalid vlen",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 1), 2), (0),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag1"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "vlen != 0",
},
{
.descr = "decl_tag test #8, invalid kflag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), 2), (-1),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag1"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid btf_info kind_flag",
},
{
.descr = "decl_tag test #9, var, invalid component_idx",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "decl_tag test #10, struct member, invalid component_idx",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
BTF_DECL_TAG_ENC(NAME_TBD, 2, 2),
BTF_END_RAW,
},
BTF_STR_SEC("\0m1\0m2\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 8,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "decl_tag test #11, func parameter, invalid component_idx",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_DECL_TAG_ENC(NAME_TBD, 3, 2),
BTF_END_RAW,
},
BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "decl_tag test #12, < -1 component_idx",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_DECL_TAG_ENC(NAME_TBD, 3, -2),
BTF_END_RAW,
},
BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "decl_tag test #13, typedef, well-formed",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "decl_tag test #14, typedef, invalid component_idx",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "decl_tag test #15, func, invalid func proto",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), /* [2] */
BTF_FUNC_ENC(NAME_TBD, 8), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag\0func"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Invalid type_id",
},
{
.descr = "decl_tag test #16, func proto, return type",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */
BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag1"),
.btf_load_err = true,
.err_str = "Invalid return type",
},
{
.descr = "decl_tag test #17, func proto, argument",
.raw_types = {
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 4), (-1), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [2] */
BTF_FUNC_PROTO_ENC(0, 1), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_VAR_ENC(NAME_TBD, 2, 0), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0local\0tag1\0var"),
.btf_load_err = true,
.err_str = "Invalid arg#1",
},
{
.descr = "decl_tag test #18, decl_tag as the map key type",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0m1\0m2\0tag"),
.map_type = BPF_MAP_TYPE_HASH,
.map_name = "tag_type_check_btf",
.key_size = 8,
.value_size = 4,
.key_type_id = 3,
.value_type_id = 1,
.max_entries = 1,
.map_create_err = true,
},
{
.descr = "decl_tag test #19, decl_tag as the map value type",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0m1\0m2\0tag"),
.map_type = BPF_MAP_TYPE_HASH,
.map_name = "tag_type_check_btf",
.key_size = 4,
.value_size = 8,
.key_type_id = 1,
.value_type_id = 3,
.max_entries = 1,
.map_create_err = true,
},
{
.descr = "type_tag test #1",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "type_tag test #2, type tag order",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_CONST_ENC(3), /* [2] */
BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Type tags don't precede modifiers",
},
{
.descr = "type_tag test #3, type tag order",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
BTF_CONST_ENC(4), /* [3] */
BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Type tags don't precede modifiers",
},
{
.descr = "type_tag test #4, type tag order",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */
BTF_CONST_ENC(4), /* [3] */
BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Type tags don't precede modifiers",
},
{
.descr = "type_tag test #5, type tag order",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
BTF_CONST_ENC(1), /* [3] */
BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "type_tag test #6, type tag order",
.raw_types = {
BTF_PTR_ENC(2), /* [1] */
BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
BTF_CONST_ENC(4), /* [3] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
BTF_PTR_ENC(6), /* [5] */
BTF_CONST_ENC(2), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
.btf_load_err = true,
.err_str = "Type tags don't precede modifiers",
},
{
.descr = "enum64 test #1, unsigned, size 8",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [2] */
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0a\0b\0c"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 8,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
},
{
.descr = "enum64 test #2, signed, size 4",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 1, 2), 4), /* [2] */
BTF_ENUM64_ENC(NAME_TBD, -1, 0),
BTF_ENUM64_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0a\0b\0c"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 2,
.max_entries = 1,
},
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
{
return start < end - 1 ? start + 1 : NULL;
}
static int get_raw_sec_size(const __u32 *raw_types)
{
int i;
for (i = MAX_NR_RAW_U32 - 1;
i >= 0 && raw_types[i] != BTF_END_RAW;
i--)
;
return i < 0 ? i : i * sizeof(raw_types[0]);
}
static void *btf_raw_create(const struct btf_header *hdr,
const __u32 *raw_types,
const char *str,
unsigned int str_sec_size,
unsigned int *btf_size,
const char **ret_next_str)
{
const char *next_str = str, *end_str = str + str_sec_size;
const char **strs_idx = NULL, **tmp_strs_idx;
int strs_cap = 0, strs_cnt = 0, next_str_idx = 0;
unsigned int size_needed, offset;
struct btf_header *ret_hdr;
int i, type_sec_size, err = 0;
uint32_t *ret_types;
void *raw_btf = NULL;
type_sec_size = get_raw_sec_size(raw_types);
if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
return NULL;
size_needed = sizeof(*hdr) + type_sec_size + str_sec_size;
raw_btf = malloc(size_needed);
if (CHECK(!raw_btf, "Cannot allocate memory for raw_btf"))
return NULL;
/* Copy header */
memcpy(raw_btf, hdr, sizeof(*hdr));
offset = sizeof(*hdr);
/* Index strings */
while ((next_str = get_next_str(next_str, end_str))) {
if (strs_cnt == strs_cap) {
strs_cap += max(16, strs_cap / 2);
tmp_strs_idx = realloc(strs_idx,
sizeof(*strs_idx) * strs_cap);
if (CHECK(!tmp_strs_idx,
"Cannot allocate memory for strs_idx")) {
err = -1;
goto done;
}
strs_idx = tmp_strs_idx;
}
strs_idx[strs_cnt++] = next_str;
next_str += strlen(next_str);
}
/* Copy type section */
ret_types = raw_btf + offset;
for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) {
if (raw_types[i] == NAME_TBD) {
if (CHECK(next_str_idx == strs_cnt,
"Error in getting next_str #%d",
next_str_idx)) {
err = -1;
goto done;
}
ret_types[i] = strs_idx[next_str_idx++] - str;
} else if (IS_NAME_NTH(raw_types[i])) {
int idx = GET_NAME_NTH_IDX(raw_types[i]);
if (CHECK(idx <= 0 || idx > strs_cnt,
"Error getting string #%d, strs_cnt:%d",
idx, strs_cnt)) {
err = -1;
goto done;
}
ret_types[i] = strs_idx[idx-1] - str;
} else {
ret_types[i] = raw_types[i];
}
}
offset += type_sec_size;
/* Copy string section */
memcpy(raw_btf + offset, str, str_sec_size);
ret_hdr = (struct btf_header *)raw_btf;
ret_hdr->type_len = type_sec_size;
ret_hdr->str_off = type_sec_size;
ret_hdr->str_len = str_sec_size;
*btf_size = size_needed;
if (ret_next_str)
*ret_next_str =
next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
done:
free(strs_idx);
if (err) {
free(raw_btf);
return NULL;
}
return raw_btf;
}
static int load_raw_btf(const void *raw_data, size_t raw_size)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
int btf_fd;
if (always_log) {
opts.log_buf = btf_log_buf,
opts.log_size = BTF_LOG_BUF_SIZE,
opts.log_level = 1;
}
btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf_fd < 0 && !always_log) {
opts.log_buf = btf_log_buf,
opts.log_size = BTF_LOG_BUF_SIZE,
opts.log_level = 1;
btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
}
return btf_fd;
}
static void do_test_raw(unsigned int test_num)
{
struct btf_raw_test *test = &raw_tests[test_num - 1];
LIBBPF_OPTS(bpf_map_create_opts, opts);
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
struct btf_header *hdr;
void *raw_btf;
int err;
if (!test__start_subtest(test->descr))
return;
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
return;
hdr = raw_btf;
hdr->hdr_len = (int)hdr->hdr_len + test->hdr_len_delta;
hdr->type_off = (int)hdr->type_off + test->type_off_delta;
hdr->str_off = (int)hdr->str_off + test->str_off_delta;
hdr->str_len = (int)hdr->str_len + test->str_len_delta;
*btf_log_buf = '\0';
btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
err = ((btf_fd < 0) != test->btf_load_err);
if (CHECK(err, "btf_fd:%d test->btf_load_err:%u",
btf_fd, test->btf_load_err) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
"expected err_str:%s\n", test->err_str)) {
err = -1;
goto done;
}
if (err || btf_fd < 0)
goto done;
opts.btf_fd = btf_fd;
opts.btf_key_type_id = test->key_type_id;
opts.btf_value_type_id = test->value_type_id;
map_fd = bpf_map_create(test->map_type, test->map_name,
test->key_size, test->value_size, test->max_entries, &opts);
err = ((map_fd < 0) != test->map_create_err);
CHECK(err, "map_fd:%d test->map_create_err:%u",
map_fd, test->map_create_err);
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd >= 0)
close(btf_fd);
if (map_fd >= 0)
close(map_fd);
}
struct btf_get_info_test {
const char *descr;
const char *str_sec;
__u32 raw_types[MAX_NR_RAW_U32];
__u32 str_sec_size;
int btf_size_delta;
int (*special_test)(unsigned int test_num);
};
static int test_big_btf_info(unsigned int test_num);
static int test_btf_id(unsigned int test_num);
const struct btf_get_info_test get_info_tests[] = {
{
.descr = "== raw_btf_size+1",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.btf_size_delta = 1,
},
{
.descr = "== raw_btf_size-3",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.btf_size_delta = -3,
},
{
.descr = "Large bpf_btf_info",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.special_test = test_big_btf_info,
},
{
.descr = "BTF ID",
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
/* unsigned int */ /* [2] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.special_test = test_btf_id,
},
};
static int test_big_btf_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
uint8_t *raw_btf = NULL, *user_btf = NULL;
unsigned int raw_btf_size;
struct {
struct bpf_btf_info info;
uint64_t garbage;
} info_garbage;
struct bpf_btf_info *info;
int btf_fd = -1, err;
uint32_t info_len;
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
return -1;
*btf_log_buf = '\0';
user_btf = malloc(raw_btf_size);
if (CHECK(!user_btf, "!user_btf")) {
err = -1;
goto done;
}
btf_fd = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
/*
* GET_INFO should error out if the userspace info
* has non zero tailing bytes.
*/
info = &info_garbage.info;
memset(info, 0, sizeof(*info));
info_garbage.garbage = 0xdeadbeef;
info_len = sizeof(info_garbage);
info->btf = ptr_to_u64(user_btf);
info->btf_size = raw_btf_size;
err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(!err, "!err")) {
err = -1;
goto done;
}
/*
* GET_INFO should succeed even info_len is larger than
* the kernel supported as long as tailing bytes are zero.
* The kernel supported info len should also be returned
* to userspace.
*/
info_garbage.garbage = 0;
err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
err = -1;
goto done;
}
fprintf(stderr, "OK");
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
free(user_btf);
if (btf_fd >= 0)
close(btf_fd);
return err;
}
static int test_btf_id(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
LIBBPF_OPTS(bpf_map_create_opts, opts);
uint8_t *raw_btf = NULL, *user_btf[2] = {};
int btf_fd[2] = {-1, -1}, map_fd = -1;
struct bpf_map_info map_info = {};
struct bpf_btf_info info[2] = {};
unsigned int raw_btf_size;
uint32_t info_len;
int err, i, ret;
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
return -1;
*btf_log_buf = '\0';
for (i = 0; i < 2; i++) {
user_btf[i] = malloc(raw_btf_size);
if (CHECK(!user_btf[i], "!user_btf[%d]", i)) {
err = -1;
goto done;
}
info[i].btf = ptr_to_u64(user_btf[i]);
info[i].btf_size = raw_btf_size;
}
btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
/* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */
info_len = sizeof(info[0]);
err = bpf_btf_get_info_by_fd(btf_fd[0], &info[0], &info_len);
if (CHECK(err, "errno:%d", errno)) {
err = -1;
goto done;
}
btf_fd[1] = bpf_btf_get_fd_by_id(info[0].id);
if (CHECK(btf_fd[1] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
ret = 0;
err = bpf_btf_get_info_by_fd(btf_fd[1], &info[1], &info_len);
if (CHECK(err || info[0].id != info[1].id ||
info[0].btf_size != info[1].btf_size ||
(ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)),
"err:%d errno:%d id0:%u id1:%u btf_size0:%u btf_size1:%u memcmp:%d",
err, errno, info[0].id, info[1].id,
info[0].btf_size, info[1].btf_size, ret)) {
err = -1;
goto done;
}
/* Test btf members in struct bpf_map_info */
opts.btf_fd = btf_fd[0];
opts.btf_key_type_id = 1;
opts.btf_value_type_id = 2;
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id",
sizeof(int), sizeof(int), 4, &opts);
if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
info_len = sizeof(map_info);
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
if (CHECK(err || map_info.btf_id != info[0].id ||
map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2,
"err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u",
err, errno, info[0].id, map_info.btf_id, map_info.btf_key_type_id,
map_info.btf_value_type_id)) {
err = -1;
goto done;
}
for (i = 0; i < 2; i++) {
close(btf_fd[i]);
btf_fd[i] = -1;
}
/* Test BTF ID is removed from the kernel */
btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
close(btf_fd[0]);
btf_fd[0] = -1;
/* The map holds the last ref to BTF and its btf_id */
close(map_fd);
map_fd = -1;
btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
if (CHECK(btf_fd[0] >= 0, "BTF lingers")) {
err = -1;
goto done;
}
fprintf(stderr, "OK");
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
if (map_fd >= 0)
close(map_fd);
for (i = 0; i < 2; i++) {
free(user_btf[i]);
if (btf_fd[i] >= 0)
close(btf_fd[i]);
}
return err;
}
static void do_test_get_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
unsigned int raw_btf_size, user_btf_size, expected_nbytes;
uint8_t *raw_btf = NULL, *user_btf = NULL;
struct bpf_btf_info info = {};
int btf_fd = -1, err, ret;
uint32_t info_len;
if (!test__start_subtest(test->descr))
return;
if (test->special_test) {
err = test->special_test(test_num);
if (CHECK(err, "failed: %d\n", err))
return;
}
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
return;
*btf_log_buf = '\0';
user_btf = malloc(raw_btf_size);
if (CHECK(!user_btf, "!user_btf")) {
err = -1;
goto done;
}
btf_fd = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd <= 0, "errno:%d", errno)) {
err = -1;
goto done;
}
user_btf_size = (int)raw_btf_size + test->btf_size_delta;
expected_nbytes = min(raw_btf_size, user_btf_size);
if (raw_btf_size > expected_nbytes)
memset(user_btf + expected_nbytes, 0xff,
raw_btf_size - expected_nbytes);
info_len = sizeof(info);
info.btf = ptr_to_u64(user_btf);
info.btf_size = user_btf_size;
ret = 0;
err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
err, errno, info.id, info_len, sizeof(info),
raw_btf_size, info.btf_size, expected_nbytes, ret)) {
err = -1;
goto done;
}
while (expected_nbytes < raw_btf_size) {
fprintf(stderr, "%u...", expected_nbytes);
if (CHECK(user_btf[expected_nbytes++] != 0xff,
"user_btf[%u]:%x != 0xff", expected_nbytes - 1,
user_btf[expected_nbytes - 1])) {
err = -1;
goto done;
}
}
fprintf(stderr, "OK");
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
free(user_btf);
if (btf_fd >= 0)
close(btf_fd);
}
struct btf_file_test {
const char *file;
bool btf_kv_notfound;
};
static struct btf_file_test file_tests[] = {
{ .file = "test_btf_newkv.bpf.o", },
{ .file = "test_btf_nokv.bpf.o", .btf_kv_notfound = true, },
};
static void do_test_file(unsigned int test_num)
{
const struct btf_file_test *test = &file_tests[test_num - 1];
const char *expected_fnames[] = {"_dummy_tracepoint",
"test_long_fname_1",
"test_long_fname_2"};
struct btf_ext *btf_ext = NULL;
struct bpf_prog_info info = {};
struct bpf_object *obj = NULL;
struct bpf_func_info *finfo;
struct bpf_program *prog;
__u32 info_len, rec_size;
bool has_btf_ext = false;
struct btf *btf = NULL;
void *func_info = NULL;
struct bpf_map *map;
int i, err, prog_fd;
if (!test__start_subtest(test->file))
return;
btf = btf__parse_elf(test->file, &btf_ext);
err = libbpf_get_error(btf);
if (err) {
if (err == -ENOENT) {
printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
test__skip();
return;
}
return;
}
btf__free(btf);
has_btf_ext = btf_ext != NULL;
btf_ext__free(btf_ext);
/* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
obj = bpf_object__open(test->file);
err = libbpf_get_error(obj);
if (CHECK(err, "obj: %d", err))
return;
prog = bpf_object__next_program(obj, NULL);
if (CHECK(!prog, "Cannot find bpf_prog")) {
err = -1;
goto done;
}
bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
err = bpf_object__load(obj);
if (CHECK(err < 0, "bpf_object__load: %d", err))
goto done;
prog_fd = bpf_program__fd(prog);
map = bpf_object__find_map_by_name(obj, "btf_map");
if (CHECK(!map, "btf_map not found")) {
err = -1;
goto done;
}
err = (bpf_map__btf_key_type_id(map) == 0 || bpf_map__btf_value_type_id(map) == 0)
!= test->btf_kv_notfound;
if (CHECK(err, "btf_key_type_id:%u btf_value_type_id:%u test->btf_kv_notfound:%u",
bpf_map__btf_key_type_id(map), bpf_map__btf_value_type_id(map),
test->btf_kv_notfound))
goto done;
if (!has_btf_ext)
goto skip;
/* get necessary program info */
info_len = sizeof(struct bpf_prog_info);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
}
if (CHECK(info.nr_func_info != 3,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
err = -1;
goto done;
}
rec_size = info.func_info_rec_size;
if (CHECK(rec_size != sizeof(struct bpf_func_info),
"incorrect info.func_info_rec_size (1st) %d\n", rec_size)) {
err = -1;
goto done;
}
func_info = malloc(info.nr_func_info * rec_size);
if (CHECK(!func_info, "out of memory")) {
err = -1;
goto done;
}
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
info.nr_func_info = 3;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
}
if (CHECK(info.nr_func_info != 3,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
goto done;
}
if (CHECK(info.func_info_rec_size != rec_size,
"incorrect info.func_info_rec_size (2nd) %d",
info.func_info_rec_size)) {
err = -1;
goto done;
}
btf = btf__load_from_kernel_by_id(info.btf_id);
err = libbpf_get_error(btf);
if (CHECK(err, "cannot get btf from kernel, err: %d", err))
goto done;
/* check three functions */
finfo = func_info;
for (i = 0; i < 3; i++) {
const struct btf_type *t;
const char *fname;
t = btf__type_by_id(btf, finfo->type_id);
if (CHECK(!t, "btf__type_by_id failure: id %u",
finfo->type_id)) {
err = -1;
goto done;
}
fname = btf__name_by_offset(btf, t->name_off);
err = strcmp(fname, expected_fnames[i]);
/* for the second and third functions in .text section,
* the compiler may order them either way.
*/
if (i && err)
err = strcmp(fname, expected_fnames[3 - i]);
if (CHECK(err, "incorrect fname %s", fname ? : "")) {
err = -1;
goto done;
}
finfo = (void *)finfo + rec_size;
}
skip:
fprintf(stderr, "OK");
done:
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
btf__free(btf);
free(func_info);
bpf_object__close(obj);
}
const char *pprint_enum_str[] = {
"ENUM_ZERO",
"ENUM_ONE",
"ENUM_TWO",
"ENUM_THREE",
};
struct pprint_mapv {
uint32_t ui32;
uint16_t ui16;
/* 2 bytes hole */
int32_t si32;
uint32_t unused_bits2a:2,
bits28:28,
unused_bits2b:2;
union {
uint64_t ui64;
uint8_t ui8a[8];
};
enum {
ENUM_ZERO,
ENUM_ONE,
ENUM_TWO,
ENUM_THREE,
} aenum;
uint32_t ui32b;
uint32_t bits2c:2;
uint8_t si8_4[2][2];
};
#ifdef __SIZEOF_INT128__
struct pprint_mapv_int128 {
__int128 si128a;
__int128 si128b;
unsigned __int128 bits3:3;
unsigned __int128 bits80:80;
unsigned __int128 ui128;
};
#endif
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
/* unsighed char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
/* unsigned int */ /* [3] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
/* int */ /* [4] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* unsigned long long */ /* [5] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
/* 2 bits */ /* [6] */
BTF_TYPE_INT_ENC(0, 0, 0, 2, 2),
/* 28 bits */ /* [7] */
BTF_TYPE_INT_ENC(0, 0, 0, 28, 4),
/* uint8_t[8] */ /* [8] */
BTF_TYPE_ARRAY_ENC(9, 1, 8),
/* typedef unsigned char uint8_t */ /* [9] */
BTF_TYPEDEF_ENC(NAME_TBD, 1),
/* typedef unsigned short uint16_t */ /* [10] */
BTF_TYPEDEF_ENC(NAME_TBD, 2),
/* typedef unsigned int uint32_t */ /* [11] */
BTF_TYPEDEF_ENC(NAME_TBD, 3),
/* typedef int int32_t */ /* [12] */
BTF_TYPEDEF_ENC(NAME_TBD, 4),
/* typedef unsigned long long uint64_t *//* [13] */
BTF_TYPEDEF_ENC(NAME_TBD, 5),
/* union (anon) */ /* [14] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
/* enum (anon) */ /* [15] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 6, 96), /* unused_bits2a */
BTF_MEMBER_ENC(NAME_TBD, 7, 98), /* bits28 */
BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */
BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */
BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */
BTF_END_RAW,
},
BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
.max_entries = 128,
},
{
/* this type will have the same type as the
* first .raw_types definition, but struct type will
* be encoded with kind_flag set.
*/
.raw_types = {
/* unsighed char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
/* unsigned int */ /* [3] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
/* int */ /* [4] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* unsigned long long */ /* [5] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
/* uint8_t[8] */ /* [8] */
BTF_TYPE_ARRAY_ENC(9, 1, 8),
/* typedef unsigned char uint8_t */ /* [9] */
BTF_TYPEDEF_ENC(NAME_TBD, 1),
/* typedef unsigned short uint16_t */ /* [10] */
BTF_TYPEDEF_ENC(NAME_TBD, 2),
/* typedef unsigned int uint32_t */ /* [11] */
BTF_TYPEDEF_ENC(NAME_TBD, 3),
/* typedef int int32_t */ /* [12] */
BTF_TYPEDEF_ENC(NAME_TBD, 4),
/* typedef unsigned long long uint64_t *//* [13] */
BTF_TYPEDEF_ENC(NAME_TBD, 5),
/* union (anon) */ /* [14] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
/* enum (anon) */ /* [15] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */
BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */
BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */
BTF_END_RAW,
},
BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
.max_entries = 128,
},
{
/* this type will have the same layout as the
* first .raw_types definition. The struct type will
* be encoded with kind_flag set, bitfield members
* are added typedef/const/volatile, and bitfield members
* will have both int and enum types.
*/
.raw_types = {
/* unsighed char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
/* unsigned int */ /* [3] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
/* int */ /* [4] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
/* unsigned long long */ /* [5] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
/* uint8_t[8] */ /* [8] */
BTF_TYPE_ARRAY_ENC(9, 1, 8),
/* typedef unsigned char uint8_t */ /* [9] */
BTF_TYPEDEF_ENC(NAME_TBD, 1),
/* typedef unsigned short uint16_t */ /* [10] */
BTF_TYPEDEF_ENC(NAME_TBD, 2),
/* typedef unsigned int uint32_t */ /* [11] */
BTF_TYPEDEF_ENC(NAME_TBD, 3),
/* typedef int int32_t */ /* [12] */
BTF_TYPEDEF_ENC(NAME_TBD, 4),
/* typedef unsigned long long uint64_t *//* [13] */
BTF_TYPEDEF_ENC(NAME_TBD, 5),
/* union (anon) */ /* [14] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
/* enum (anon) */ /* [15] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
BTF_MEMBER_ENC(NAME_TBD, 20, BTF_MEMBER_OFFSET(0, 264)), /* si8_4 */
/* typedef unsigned int ___int */ /* [17] */
BTF_TYPEDEF_ENC(NAME_TBD, 18),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
BTF_TYPE_ARRAY_ENC(21, 1, 2), /* [20] */
BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [21] */
BTF_END_RAW,
},
BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
.max_entries = 128,
},
#ifdef __SIZEOF_INT128__
{
/* test int128 */
.raw_types = {
/* unsigned int */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
/* __int128 */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16),
/* unsigned __int128 */ /* [3] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16),
/* struct pprint_mapv_int128 */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64),
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */
BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */
BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */
BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */
BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */
BTF_END_RAW,
},
BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv_int128),
.key_type_id = 1,
.value_type_id = 4,
.max_entries = 128,
.mapv_kind = PPRINT_MAPV_KIND_INT128,
},
#endif
};
static struct btf_pprint_test_meta {
const char *descr;
enum bpf_map_type map_type;
const char *map_name;
bool ordered_map;
bool lossless_map;
bool percpu_map;
} pprint_tests_meta[] = {
{
.descr = "BTF pretty print array",
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "pprint_test_array",
.ordered_map = true,
.lossless_map = true,
.percpu_map = false,
},
{
.descr = "BTF pretty print hash",
.map_type = BPF_MAP_TYPE_HASH,
.map_name = "pprint_test_hash",
.ordered_map = false,
.lossless_map = true,
.percpu_map = false,
},
{
.descr = "BTF pretty print lru hash",
.map_type = BPF_MAP_TYPE_LRU_HASH,
.map_name = "pprint_test_lru_hash",
.ordered_map = false,
.lossless_map = false,
.percpu_map = false,
},
{
.descr = "BTF pretty print percpu array",
.map_type = BPF_MAP_TYPE_PERCPU_ARRAY,
.map_name = "pprint_test_percpu_array",
.ordered_map = true,
.lossless_map = true,
.percpu_map = true,
},
{
.descr = "BTF pretty print percpu hash",
.map_type = BPF_MAP_TYPE_PERCPU_HASH,
.map_name = "pprint_test_percpu_hash",
.ordered_map = false,
.lossless_map = true,
.percpu_map = true,
},
{
.descr = "BTF pretty print lru percpu hash",
.map_type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
.map_name = "pprint_test_lru_percpu_hash",
.ordered_map = false,
.lossless_map = false,
.percpu_map = true,
},
};
static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
{
if (mapv_kind == PPRINT_MAPV_KIND_BASIC)
return sizeof(struct pprint_mapv);
#ifdef __SIZEOF_INT128__
if (mapv_kind == PPRINT_MAPV_KIND_INT128)
return sizeof(struct pprint_mapv_int128);
#endif
assert(0);
}
static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
void *mapv, uint32_t i,
int num_cpus, int rounded_value_size)
{
int cpu;
if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
struct pprint_mapv *v = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
v->ui32 = i + cpu;
v->si32 = -i;
v->unused_bits2a = 3;
v->bits28 = i;
v->unused_bits2b = 3;
v->ui64 = i;
v->aenum = i & 0x03;
v->ui32b = 4;
v->bits2c = 1;
v->si8_4[0][0] = (cpu + i) & 0xff;
v->si8_4[0][1] = (cpu + i + 1) & 0xff;
v->si8_4[1][0] = (cpu + i + 2) & 0xff;
v->si8_4[1][1] = (cpu + i + 3) & 0xff;
v = (void *)v + rounded_value_size;
}
}
#ifdef __SIZEOF_INT128__
if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
struct pprint_mapv_int128 *v = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
v->si128a = i;
v->si128b = -i;
v->bits3 = i & 0x07;
v->bits80 = (((unsigned __int128)1) << 64) + i;
v->ui128 = (((unsigned __int128)2) << 64) + i;
v = (void *)v + rounded_value_size;
}
}
#endif
}
ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
char *expected_line, ssize_t line_size,
bool percpu_map, unsigned int next_key,
int cpu, void *mapv)
{
ssize_t nexpected_line = -1;
if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
struct pprint_mapv *v = mapv;
nexpected_line = snprintf(expected_line, line_size,
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
"{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"%u,0x%x,[[%d,%d],[%d,%d]]}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
v->ui32, v->si32,
v->unused_bits2a,
v->bits28,
v->unused_bits2b,
(__u64)v->ui64,
v->ui8a[0], v->ui8a[1],
v->ui8a[2], v->ui8a[3],
v->ui8a[4], v->ui8a[5],
v->ui8a[6], v->ui8a[7],
pprint_enum_str[v->aenum],
v->ui32b,
v->bits2c,
v->si8_4[0][0], v->si8_4[0][1],
v->si8_4[1][0], v->si8_4[1][1]);
}
#ifdef __SIZEOF_INT128__
if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
struct pprint_mapv_int128 *v = mapv;
nexpected_line = snprintf(expected_line, line_size,
"%s%u: {0x%lx,0x%lx,0x%lx,"
"0x%lx%016lx,0x%lx%016lx}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
(uint64_t)v->si128a,
(uint64_t)v->si128b,
(uint64_t)v->bits3,
(uint64_t)(v->bits80 >> 64),
(uint64_t)v->bits80,
(uint64_t)(v->ui128 >> 64),
(uint64_t)v->ui128);
}
#endif
return nexpected_line;
}
static int check_line(const char *expected_line, int nexpected_line,
int expected_line_len, const char *line)
{
if (CHECK(nexpected_line == expected_line_len,
"expected_line is too long"))
return -1;
if (strcmp(expected_line, line)) {
fprintf(stderr, "unexpected pprint output\n");
fprintf(stderr, "expected: %s", expected_line);
fprintf(stderr, " read: %s", line);
return -1;
}
return 0;
}
static void do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
LIBBPF_OPTS(bpf_map_create_opts, opts);
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
unsigned int key, nr_read_elems;
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
char expected_line[255];
FILE *pin_file = NULL;
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
void *mapv = NULL;
uint8_t *raw_btf;
ssize_t nread;
if (!test__start_subtest(test->descr))
return;
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
return;
*btf_log_buf = '\0';
btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(btf_fd < 0, "errno:%d\n", errno)) {
err = -1;
goto done;
}
opts.btf_fd = btf_fd;
opts.btf_key_type_id = test->key_type_id;
opts.btf_value_type_id = test->value_type_id;
map_fd = bpf_map_create(test->map_type, test->map_name,
test->key_size, test->value_size, test->max_entries, &opts);
if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
ret = snprintf(pin_path, sizeof(pin_path), "%s/%s",
"/sys/fs/bpf", test->map_name);
if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long",
"/sys/fs/bpf", test->map_name)) {
err = -1;
goto done;
}
err = bpf_obj_pin(map_fd, pin_path);
if (CHECK(err, "bpf_obj_pin(%s): errno:%d.", pin_path, errno))
goto done;
percpu_map = test->percpu_map;
num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);
mapv = calloc(num_cpus, rounded_value_size);
if (CHECK(!mapv, "mapv allocation failure")) {
err = -1;
goto done;
}
for (key = 0; key < test->max_entries; key++) {
set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);
bpf_map_update_elem(map_fd, &key, mapv, 0);
}
pin_file = fopen(pin_path, "r");
if (CHECK(!pin_file, "fopen(%s): errno:%d", pin_path, errno)) {
err = -1;
goto done;
}
/* Skip lines start with '#' */
while ((nread = getline(&line, &line_len, pin_file)) > 0 &&
*line == '#')
;
if (CHECK(nread <= 0, "Unexpected EOF")) {
err = -1;
goto done;
}
nr_read_elems = 0;
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
ssize_t nexpected_line;
unsigned int next_key;
void *cmapv;
int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);
cmapv = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
if (percpu_map) {
/* for percpu map, the format looks like:
* <key>: {
* cpu0: <value_on_cpu0>
* cpu1: <value_on_cpu1>
* ...
* cpun: <value_on_cpun>
* }
*
* let us verify the line containing the key here.
*/
if (cpu == 0) {
nexpected_line = snprintf(expected_line,
sizeof(expected_line),
"%u: {\n",
next_key);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err < 0)
goto done;
}
/* read value@cpu */
nread = getline(&line, &line_len, pin_file);
if (nread < 0)
break;
}
nexpected_line = get_pprint_expected_line(mapv_kind, expected_line,
sizeof(expected_line),
percpu_map, next_key,
cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err < 0)
goto done;
cmapv = cmapv + rounded_value_size;
}
if (percpu_map) {
/* skip the last bracket for the percpu map */
nread = getline(&line, &line_len, pin_file);
if (nread < 0)
break;
}
nread = getline(&line, &line_len, pin_file);
} while (++nr_read_elems < test->max_entries && nread > 0);
if (lossless_map &&
CHECK(nr_read_elems < test->max_entries,
"Unexpected EOF. nr_read_elems:%u test->max_entries:%u",
nr_read_elems, test->max_entries)) {
err = -1;
goto done;
}
if (CHECK(nread > 0, "Unexpected extra pprint output: %s", line)) {
err = -1;
goto done;
}
err = 0;
done:
if (mapv)
free(mapv);
if (!err)
fprintf(stderr, "OK");
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd >= 0)
close(btf_fd);
if (map_fd >= 0)
close(map_fd);
if (pin_file)
fclose(pin_file);
unlink(pin_path);
free(line);
}
static void test_pprint(void)
{
unsigned int i;
/* test various maps with the first test template */
for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
pprint_test_template[0].descr = pprint_tests_meta[i].descr;
pprint_test_template[0].map_type = pprint_tests_meta[i].map_type;
pprint_test_template[0].map_name = pprint_tests_meta[i].map_name;
pprint_test_template[0].ordered_map = pprint_tests_meta[i].ordered_map;
pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
do_test_pprint(0);
}
/* test rest test templates with the first map */
for (i = 1; i < ARRAY_SIZE(pprint_test_template); i++) {
pprint_test_template[i].descr = pprint_tests_meta[0].descr;
pprint_test_template[i].map_type = pprint_tests_meta[0].map_type;
pprint_test_template[i].map_name = pprint_tests_meta[0].map_name;
pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
do_test_pprint(i);
}
}
#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
(insn_off), (file_off), (line_off), ((line_num) << 10 | ((line_col) & 0x3ff))
static struct prog_info_raw_test {
const char *descr;
const char *str_sec;
const char *err_str;
__u32 raw_types[MAX_NR_RAW_U32];
__u32 str_sec_size;
struct bpf_insn insns[MAX_INSNS];
__u32 prog_type;
__u32 func_info[MAX_SUBPROGS][2];
__u32 func_info_rec_size;
__u32 func_info_cnt;
__u32 line_info[MAX_NR_RAW_U32];
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
__u32 dead_code_cnt;
__u32 dead_code_mask;
__u32 dead_func_cnt;
__u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
.str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info = { {0, 5}, {3, 6} },
.func_info_rec_size = 8,
.func_info_cnt = 2,
.line_info = { BTF_END_RAW },
},
{
.descr = "func_type (Incorrect func_info_rec_size)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
.str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info = { {0, 5}, {3, 6} },
.func_info_rec_size = 4,
.func_info_cnt = 2,
.line_info = { BTF_END_RAW },
.expected_prog_load_failure = true,
},
{
.descr = "func_type (Incorrect func_info_cnt)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
.str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info = { {0, 5}, {3, 6} },
.func_info_rec_size = 8,
.func_info_cnt = 1,
.line_info = { BTF_END_RAW },
.expected_prog_load_failure = true,
},
{
.descr = "func_type (Incorrect bpf_func_info.insn_off)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
BTF_END_RAW,
},
.str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
.str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info = { {0, 5}, {2, 6} },
.func_info_rec_size = 8,
.func_info_cnt = 2,
.line_info = { BTF_END_RAW },
.expected_prog_load_failure = true,
},
{
.descr = "line_info (No subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
},
{
.descr = "line_info (No subprog. insn_off >= prog->len)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
BPF_LINE_INFO_ENC(4, 0, 0, 5, 6),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.err_str = "line_info[4].insn_off",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (Zero bpf insn code)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0unsigned long\0u64\0u64 a=1;\0return a;"),
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(1, 0, 0, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.err_str = "Invalid insn code at line_info[1]",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (No subprog. zero tailing line_info",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 0,
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
.nr_jited_ksyms = 1,
},
{
.descr = "line_info (No subprog. nonzero tailing line_info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 1,
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
.nr_jited_ksyms = 1,
.err_str = "nonzero tailing record in line_info",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
},
{
.descr = "line_info (subprog + func_info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {5, 3} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
},
{
.descr = "line_info (subprog. missing 1st func line info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.err_str = "missing bpf_line_info for func#0",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (subprog. missing 2nd func line info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.err_str = "missing bpf_line_info for func#1",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (subprog. unordered insn offset)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.err_str = "Invalid line_info[2].insn_off",
.expected_prog_load_failure = true,
},
{
.descr = "line_info (dead start)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 1,
.dead_code_mask = 0x01,
},
{
.descr = "line_info (dead end)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 2,
.dead_code_mask = 0x28,
},
{
.descr = "line_info (dead code + subprog + func_info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {14, 3} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 9,
.dead_code_mask = 0x3fe,
},
{
.descr = "line_info (dead subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
"\0return 0;\0return 0;\0/* dead */\0/* dead */"
"\0/* dead */\0return bla + 1;\0return bla + 1;"
"\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {6, 3}, {9, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 3,
.dead_code_mask = 0x70,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead last subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
"\0return 0;\0/* dead */\0/* dead */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {5, 3} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 2,
.dead_code_mask = 0x18,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead subprog + dead start)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
"\0return 0;\0return 0;\0return 0;"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0return b + 1;\0return b + 1;\0return b + 1;"),
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {7, 3}, {10, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 5,
.dead_code_mask = 0x1e2,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead subprog + dead start w/ move)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
"\0return 0;\0return 0;\0/* dead */\0/* dead */"
"\0/* dead */\0return bla + 1;\0return bla + 1;"
"\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {6, 3}, {9, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 3,
.dead_code_mask = 0x70,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead end + subprog start w/ no linfo)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
BPF_CALL_REL(3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 3}, {6, 4}, },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
},
};
static size_t probe_prog_length(const struct bpf_insn *fp)
{
size_t len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static __u32 *patch_name_tbd(const __u32 *raw_u32,
const char *str, __u32 str_off,
unsigned int str_sec_size,
unsigned int *ret_size)
{
int i, raw_u32_size = get_raw_sec_size(raw_u32);
const char *end_str = str + str_sec_size;
const char *next_str = str + str_off;
__u32 *new_u32 = NULL;
if (raw_u32_size == -1)
return ERR_PTR(-EINVAL);
if (!raw_u32_size) {
*ret_size = 0;
return NULL;
}
new_u32 = malloc(raw_u32_size);
if (!new_u32)
return ERR_PTR(-ENOMEM);
for (i = 0; i < raw_u32_size / sizeof(raw_u32[0]); i++) {
if (raw_u32[i] == NAME_TBD) {
next_str = get_next_str(next_str, end_str);
if (CHECK(!next_str, "Error in getting next_str\n")) {
free(new_u32);
return ERR_PTR(-EINVAL);
}
new_u32[i] = next_str - str;
next_str += strlen(next_str);
} else {
new_u32[i] = raw_u32[i];
}
}
*ret_size = raw_u32_size;
return new_u32;
}
static int test_get_finfo(const struct prog_info_raw_test *test,
int prog_fd)
{
struct bpf_prog_info info = {};
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
__u32 nr_func_info;
int err;
/* get necessary lens */
info_len = sizeof(struct bpf_prog_info);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
nr_func_info = test->func_info_cnt - test->dead_func_cnt;
if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
}
rec_size = info.func_info_rec_size;
if (CHECK(rec_size != sizeof(struct bpf_func_info),
"incorrect info.func_info_rec_size (1st) %d", rec_size)) {
return -1;
}
if (!info.nr_func_info)
return 0;
func_info = malloc(info.nr_func_info * rec_size);
if (CHECK(!func_info, "out of memory"))
return -1;
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
}
if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
goto done;
}
if (CHECK(info.func_info_rec_size != rec_size,
"incorrect info.func_info_rec_size (2nd) %d",
info.func_info_rec_size)) {
err = -1;
goto done;
}
finfo = func_info;
for (i = 0; i < nr_func_info; i++) {
if (test->dead_func_mask & (1 << i))
continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
err = -1;
goto done;
}
finfo = (void *)finfo + rec_size;
}
err = 0;
done:
free(func_info);
return err;
}
static int test_get_linfo(const struct prog_info_raw_test *test,
const void *patched_linfo,
__u32 cnt, int prog_fd)
{
__u32 i, info_len, nr_jited_ksyms, nr_jited_func_lens;
__u64 *jited_linfo = NULL, *jited_ksyms = NULL;
__u32 rec_size, jited_rec_size, jited_cnt;
struct bpf_line_info *linfo = NULL;
__u32 cur_func_len, ksyms_found;
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
__u32 dead_insns;
int err;
jited_cnt = cnt;
rec_size = sizeof(*linfo);
jited_rec_size = sizeof(*jited_linfo);
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err < 0, "err:%d errno:%d", err, errno)) {
err = -1;
goto done;
}
if (!info.jited_prog_len) {
/* prog is not jited */
jited_cnt = 0;
nr_jited_ksyms = 1;
nr_jited_func_lens = 1;
}
if (CHECK(info.nr_line_info != cnt ||
info.nr_jited_line_info != jited_cnt ||
info.nr_jited_ksyms != nr_jited_ksyms ||
info.nr_jited_func_lens != nr_jited_func_lens ||
(!info.nr_line_info && info.nr_jited_line_info),
"info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) nr_jited_ksyms:%u(expected:%u) nr_jited_func_lens:%u(expected:%u)",
info.nr_line_info, cnt,
info.nr_jited_line_info, jited_cnt,
info.nr_jited_ksyms, nr_jited_ksyms,
info.nr_jited_func_lens, nr_jited_func_lens)) {
err = -1;
goto done;
}
if (CHECK(info.line_info_rec_size != sizeof(struct bpf_line_info) ||
info.jited_line_info_rec_size != sizeof(__u64),
"info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)",
info.line_info_rec_size, rec_size,
info.jited_line_info_rec_size, jited_rec_size)) {
err = -1;
goto done;
}
if (!cnt)
return 0;
rec_size = info.line_info_rec_size;
jited_rec_size = info.jited_line_info_rec_size;
memset(&info, 0, sizeof(info));
linfo = calloc(cnt, rec_size);
if (CHECK(!linfo, "!linfo")) {
err = -1;
goto done;
}
info.nr_line_info = cnt;
info.line_info_rec_size = rec_size;
info.line_info = ptr_to_u64(linfo);
if (jited_cnt) {
jited_linfo = calloc(jited_cnt, jited_rec_size);
jited_ksyms = calloc(nr_jited_ksyms, sizeof(*jited_ksyms));
jited_func_lens = calloc(nr_jited_func_lens,
sizeof(*jited_func_lens));
if (CHECK(!jited_linfo || !jited_ksyms || !jited_func_lens,
"jited_linfo:%p jited_ksyms:%p jited_func_lens:%p",
jited_linfo, jited_ksyms, jited_func_lens)) {
err = -1;
goto done;
}
info.nr_jited_line_info = jited_cnt;
info.jited_line_info_rec_size = jited_rec_size;
info.jited_line_info = ptr_to_u64(jited_linfo);
info.nr_jited_ksyms = nr_jited_ksyms;
info.jited_ksyms = ptr_to_u64(jited_ksyms);
info.nr_jited_func_lens = nr_jited_func_lens;
info.jited_func_lens = ptr_to_u64(jited_func_lens);
}
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
/*
* Only recheck the info.*line_info* fields.
* Other fields are not the concern of this test.
*/
if (CHECK(err < 0 ||
info.nr_line_info != cnt ||
(jited_cnt && !info.jited_line_info) ||
info.nr_jited_line_info != jited_cnt ||
info.line_info_rec_size != rec_size ||
info.jited_line_info_rec_size != jited_rec_size,
"err:%d errno:%d info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) line_info_rec_size:%u(expected:%u) jited_linfo_rec_size:%u(expected:%u) line_info:%p jited_line_info:%p",
err, errno,
info.nr_line_info, cnt,
info.nr_jited_line_info, jited_cnt,
info.line_info_rec_size, rec_size,
info.jited_line_info_rec_size, jited_rec_size,
(void *)(long)info.line_info,
(void *)(long)info.jited_line_info)) {
err = -1;
goto done;
}
dead_insns = 0;
while (test->dead_code_mask & (1 << dead_insns))
dead_insns++;
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
while (test->dead_code_mask & (1 << (i + dead_insns)))
dead_insns++;
expected_linfo = patched_linfo +
((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
i - 1, linfo[i - 1].insn_off)) {
err = -1;
goto done;
}
if (CHECK(linfo[i].file_name_off != expected_linfo->file_name_off ||
linfo[i].line_off != expected_linfo->line_off ||
linfo[i].line_col != expected_linfo->line_col,
"linfo[%u] (%u, %u, %u) != (%u, %u, %u)", i,
linfo[i].file_name_off,
linfo[i].line_off,
linfo[i].line_col,
expected_linfo->file_name_off,
expected_linfo->line_off,
expected_linfo->line_col)) {
err = -1;
goto done;
}
}
if (!jited_cnt) {
fprintf(stderr, "not jited. skipping jited_line_info check. ");
err = 0;
goto done;
}
if (CHECK(jited_linfo[0] != jited_ksyms[0],
"jited_linfo[0]:%lx != jited_ksyms[0]:%lx",
(long)(jited_linfo[0]), (long)(jited_ksyms[0]))) {
err = -1;
goto done;
}
ksyms_found = 1;
cur_func_len = jited_func_lens[0];
cur_func_ksyms = jited_ksyms[0];
for (i = 1; i < jited_cnt; i++) {
if (ksyms_found < nr_jited_ksyms &&
jited_linfo[i] == jited_ksyms[ksyms_found]) {
cur_func_ksyms = jited_ksyms[ksyms_found];
cur_func_len = jited_ksyms[ksyms_found];
ksyms_found++;
continue;
}
if (CHECK(jited_linfo[i] <= jited_linfo[i - 1],
"jited_linfo[%u]:%lx <= jited_linfo[%u]:%lx",
i, (long)jited_linfo[i],
i - 1, (long)(jited_linfo[i - 1]))) {
err = -1;
goto done;
}
if (CHECK(jited_linfo[i] - cur_func_ksyms > cur_func_len,
"jited_linfo[%u]:%lx - %lx > %u",
i, (long)jited_linfo[i], (long)cur_func_ksyms,
cur_func_len)) {
err = -1;
goto done;
}
}
if (CHECK(ksyms_found != nr_jited_ksyms,
"ksyms_found:%u != nr_jited_ksyms:%u",
ksyms_found, nr_jited_ksyms)) {
err = -1;
goto done;
}
err = 0;
done:
free(linfo);
free(jited_linfo);
free(jited_ksyms);
free(jited_func_lens);
return err;
}
static void do_test_info_raw(unsigned int test_num)
{
const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
unsigned int raw_btf_size, linfo_str_off, linfo_size = 0;
int btf_fd = -1, prog_fd = -1, err = 0;
void *raw_btf, *patched_linfo = NULL;
const char *ret_next_str;
union bpf_attr attr = {};
if (!test__start_subtest(test->descr))
return;
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, &ret_next_str);
if (!raw_btf)
return;
*btf_log_buf = '\0';
btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) {
err = -1;
goto done;
}
if (*btf_log_buf && always_log)
fprintf(stderr, "\n%s", btf_log_buf);
*btf_log_buf = '\0';
linfo_str_off = ret_next_str - test->str_sec;
patched_linfo = patch_name_tbd(test->line_info,
test->str_sec, linfo_str_off,
test->str_sec_size, &linfo_size);
err = libbpf_get_error(patched_linfo);
if (err) {
fprintf(stderr, "error in creating raw bpf_line_info");
err = -1;
goto done;
}
attr.prog_type = test->prog_type;
attr.insns = ptr_to_u64(test->insns);
attr.insn_cnt = probe_prog_length(test->insns);
attr.license = ptr_to_u64("GPL");
attr.prog_btf_fd = btf_fd;
attr.func_info_rec_size = test->func_info_rec_size;
attr.func_info_cnt = test->func_info_cnt;
attr.func_info = ptr_to_u64(test->func_info);
attr.log_buf = ptr_to_u64(btf_log_buf);
attr.log_size = BTF_LOG_BUF_SIZE;
attr.log_level = 1;
if (linfo_size) {
attr.line_info_rec_size = test->line_info_rec_size;
attr.line_info = ptr_to_u64(patched_linfo);
attr.line_info_cnt = linfo_size / attr.line_info_rec_size;
}
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
err = ((prog_fd < 0) != test->expected_prog_load_failure);
if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d",
prog_fd, test->expected_prog_load_failure, errno) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
"expected err_str:%s", test->err_str)) {
err = -1;
goto done;
}
if (prog_fd < 0)
goto done;
err = test_get_finfo(test, prog_fd);
if (err)
goto done;
err = test_get_linfo(test, patched_linfo,
attr.line_info_cnt - test->dead_code_cnt,
prog_fd);
if (err)
goto done;
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd >= 0)
close(btf_fd);
if (prog_fd >= 0)
close(prog_fd);
if (!libbpf_get_error(patched_linfo))
free(patched_linfo);
}
struct btf_raw_data {
__u32 raw_types[MAX_NR_RAW_U32];
const char *str_sec;
__u32 str_sec_size;
};
struct btf_dedup_test {
const char *descr;
struct btf_raw_data input;
struct btf_raw_data expect;
struct btf_dedup_opts opts;
};
static struct btf_dedup_test dedup_tests[] = {
{
.descr = "dedup: unused strings filtering",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8),
BTF_END_RAW,
},
BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0long"),
},
},
{
.descr = "dedup: strings deduplication",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8),
BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0long int\0int\0long int\0int"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0long int"),
},
},
{
.descr = "dedup: struct example #1",
/*
* struct s {
* struct s *next;
* const int *a;
* int b[16];
* int c;
* }
*/
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
BTF_PTR_ENC(6), /* [5] */
/* const -> [1] int */
BTF_CONST_ENC(1), /* [6] */
/* tag -> [3] struct s */
BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
/* tag -> [3] struct s, member 1 */
BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
/* full copy of the above */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */
BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */
BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */
BTF_MEMBER_ENC(NAME_NTH(3), 12, 0),
BTF_MEMBER_ENC(NAME_NTH(4), 13, 64),
BTF_MEMBER_ENC(NAME_NTH(5), 10, 128),
BTF_MEMBER_ENC(NAME_NTH(6), 9, 640),
BTF_MEMBER_ENC(NAME_NTH(8), 15, 672),
BTF_PTR_ENC(11), /* [12] */
BTF_PTR_ENC(14), /* [13] */
BTF_CONST_ENC(9), /* [14] */
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */
BTF_DECL_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */
BTF_DECL_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"),
},
.expect = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
BTF_STRUCT_ENC(NAME_NTH(8), 5, 88), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(7), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
BTF_PTR_ENC(6), /* [5] */
/* const -> [1] int */
BTF_CONST_ENC(1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */
BTF_END_RAW,
},
BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
},
},
{
.descr = "dedup: struct <-> fwd resolution w/ hash collision",
/*
* // CU 1:
* struct x;
* struct s {
* struct x *x;
* };
* // CU 2:
* struct x {};
* struct s {
* struct x *x;
* };
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
/* CU 2 */
BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */
BTF_PTR_ENC(4), /* [5] ptr -> [4] */
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */
BTF_MEMBER_ENC(NAME_TBD, 5, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"),
},
.expect = {
.raw_types = {
BTF_PTR_ENC(3), /* [1] ptr -> [3] */
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0x"),
},
.opts = {
.force_collisions = true, /* force hash collisions */
},
},
{
.descr = "dedup: void equiv check",
/*
* // CU 1:
* struct s {
* struct {} *x;
* };
* // CU 2:
* struct s {
* int *x;
* };
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
/* CU 2 */
BTF_PTR_ENC(0), /* [4] ptr -> void */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0x"),
},
.expect = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
/* CU 2 */
BTF_PTR_ENC(0), /* [4] ptr -> void */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0x"),
},
.opts = {
.force_collisions = true, /* force hash collisions */
},
},
{
.descr = "dedup: all possible kinds (no duplicates)",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
BTF_PTR_ENC(0), /* [8] ptr */
BTF_CONST_ENC(8), /* [9] const */
BTF_VOLATILE_ENC(8), /* [10] volatile */
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_ENUM_ENC(NAME_TBD, 1),
BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
BTF_PTR_ENC(0), /* [8] ptr */
BTF_CONST_ENC(8), /* [9] const */
BTF_VOLATILE_ENC(8), /* [10] volatile */
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
BTF_ENUM64_ENC(NAME_TBD, 0, 0),
BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
},
{
.descr = "dedup: no int/float duplicates",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
/* different name */
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
/* different encoding */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
/* different bit offset */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
/* different bit size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
/* all allowed sizes */
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0some other int\0float"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
/* different name */
BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
/* different encoding */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
/* different bit offset */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
/* different bit size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
/* all allowed sizes */
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0some other int\0float"),
},
},
{
.descr = "dedup: enum fwd resolution",
.input = {
.raw_types = {
/* [1] fwd enum 'e1' before full enum */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
/* [2] full enum 'e1' after fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 123),
/* [3] full enum 'e2' before fwd */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(4), 456),
/* [4] fwd enum 'e2' after full enum */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
/* [5] fwd enum with different size, size does not matter for fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
/* [6] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 321),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
.expect = {
.raw_types = {
/* [1] full enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 123),
/* [2] full enum 'e2' */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(4), 456),
/* [3] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 321),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
},
{
.descr = "dedup: datasec and vars pass-through",
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* static int t */
BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
/* int, referenced from [5] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
/* another static int t */
BTF_VAR_ENC(NAME_NTH(2), 4, 0), /* [5] */
/* another .bss section */ /* [6] */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(5, 0, 4),
BTF_END_RAW,
},
BTF_STR_SEC("\0.bss\0t"),
},
.expect = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* static int t */
BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */
/* .bss section */ /* [3] */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
/* another static int t */
BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [4] */
/* another .bss section */ /* [5] */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(4, 0, 4),
BTF_END_RAW,
},
BTF_STR_SEC("\0.bss\0t"),
},
.opts = {
.force_collisions = true
},
},
{
.descr = "dedup: func/func_arg/var tags",
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* static int t */
BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
/* void f(int a1, int a2) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
/* tag -> t */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
/* tag -> func */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */
/* tag -> func arg a1 */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
},
},
{
.descr = "dedup: func/func_param tags",
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* void f(int a1, int a2) */
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
/* void f(int a1, int a2) */
BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */
/* tag -> f: tag1, tag2 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */
/* tag -> f/a2: tag1, tag2 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */
/* tag -> f: tag1, tag3 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */
/* tag -> f/a2: tag1, tag3 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */
BTF_END_RAW,
},
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */
BTF_END_RAW,
},
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
},
},
{
.descr = "dedup: struct/struct_member tags",
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
/* tag -> t: tag1, tag2 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
/* tag -> t/m2: tag1, tag2 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
/* tag -> t: tag1, tag3 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */
/* tag -> t/m2: tag1, tag3 */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
BTF_DECL_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
},
},
{
.descr = "dedup: typedef tags",
.input = {
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [3] */
/* tag -> t: tag1, tag2 */
BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [5] */
/* tag -> t: tag1, tag3 */
BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [3] */
BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
},
},
{
.descr = "dedup: btf_type_tag #1",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
/* ptr -> tag1 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */
BTF_PTR_ENC(8), /* [9] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> int */
BTF_PTR_ENC(2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #2",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #3",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #4",
.input = {
.raw_types = {
/* ptr -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
/* ptr -> tag1 -> long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1"),
},
.expect = {
.raw_types = {
/* ptr -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
/* ptr -> tag1 -> long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1"),
},
},
{
.descr = "dedup: btf_type_tag #5, struct",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)),
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [4] */
BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [5] */
BTF_MEMBER_ENC(NAME_NTH(3), 4, BTF_MEMBER_OFFSET(0, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0t\0m"),
},
.expect = {
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)),
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0t\0m"),
},
},
{
.descr = "dedup: enum64, standalone",
.input = {
.raw_types = {
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
.expect = {
.raw_types = {
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
},
{
.descr = "dedup: enum64, fwd resolution",
.input = {
.raw_types = {
/* [1] fwd enum64 'e1' before full enum */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
/* [2] full enum64 'e1' after fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
/* [3] full enum64 'e2' before fwd */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
/* [4] fwd enum64 'e2' after full enum */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
/* [5] incompatible full enum64 with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
.expect = {
.raw_types = {
/* [1] full enum64 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
/* [2] full enum64 'e2' */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
/* [3] incompatible full enum64 with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
},
{
.descr = "dedup: enum and enum64, no dedup",
.input = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] enum64 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
.expect = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] enum64 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
},
{
.descr = "dedup: enum of different size: no dedup",
.input = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
BTF_ENUM_ENC(NAME_NTH(2), 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
.expect = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
BTF_ENUM_ENC(NAME_NTH(2), 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val"),
},
},
{
.descr = "dedup: enum fwd to enum64",
.input = {
.raw_types = {
/* [1] enum64 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
/* [2] enum 'e1' fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
/* [3] typedef enum 'e1' td */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0td"),
},
.expect = {
.raw_types = {
/* [1] enum64 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
/* [2] typedef enum 'e1' td */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0td"),
},
},
{
.descr = "dedup: enum64 fwd to enum",
.input = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] enum64 'e1' fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
/* [3] typedef enum 'e1' td */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0td"),
},
.expect = {
.raw_types = {
/* [1] enum 'e1' */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 1),
/* [2] typedef enum 'e1' td */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0td"),
},
},
{
.descr = "dedup: standalone fwd declaration struct",
/*
* Verify that CU1:foo and CU2:foo would be unified and that
* typedef/ptr would be updated to point to CU1:foo.
*
* // CU 1:
* struct foo { int x; };
*
* // CU 2:
* struct foo;
* typedef struct foo *foo_ptr;
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
.expect = {
.raw_types = {
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
BTF_PTR_ENC(1), /* [3] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
},
{
.descr = "dedup: standalone fwd declaration union",
/*
* Verify that CU1:foo and CU2:foo would be unified and that
* typedef/ptr would be updated to point to CU1:foo.
* Same as "dedup: standalone fwd declaration struct" but for unions.
*
* // CU 1:
* union foo { int x; };
*
* // CU 2:
* union foo;
* typedef union foo *foo_ptr;
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_TBD, 1), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
.expect = {
.raw_types = {
BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
BTF_PTR_ENC(1), /* [3] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
},
{
.descr = "dedup: standalone fwd declaration wrong kind",
/*
* Negative test for btf_dedup_resolve_fwds:
* - CU1:foo is a struct, C2:foo is a union, thus CU2:foo is not deduped;
* - typedef/ptr should remain unchanged as well.
*
* // CU 1:
* struct foo { int x; };
*
* // CU 2:
* union foo;
* typedef union foo *foo_ptr;
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
.expect = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0foo_ptr"),
},
},
{
.descr = "dedup: standalone fwd declaration name conflict",
/*
* Negative test for btf_dedup_resolve_fwds:
* - two candidates for CU2:foo dedup, thus it is unchanged;
* - typedef/ptr should remain unchanged as well.
*
* // CU 1:
* struct foo { int x; };
*
* // CU 2:
* struct foo;
* typedef struct foo *foo_ptr;
*
* // CU 3:
* struct foo { int x; int y; };
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
/* CU 3 */
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
},
.expect = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
/* CU 2 */
BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
BTF_PTR_ENC(3), /* [4] */
BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
/* CU 3 */
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
},
},
};
static int btf_type_size(const struct btf_type *t)
{
int base_size = sizeof(struct btf_type);
__u16 vlen = BTF_INFO_VLEN(t->info);
__u16 kind = BTF_INFO_KIND(t->info);
switch (kind) {
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
case BTF_KIND_ENUM:
return base_size + vlen * sizeof(struct btf_enum);
case BTF_KIND_ENUM64:
return base_size + vlen * sizeof(struct btf_enum64);
case BTF_KIND_ARRAY:
return base_size + sizeof(struct btf_array);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
return base_size + vlen * sizeof(struct btf_member);
case BTF_KIND_FUNC_PROTO:
return base_size + vlen * sizeof(struct btf_param);
case BTF_KIND_VAR:
return base_size + sizeof(struct btf_var);
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
case BTF_KIND_DECL_TAG:
return base_size + sizeof(struct btf_decl_tag);
default:
fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
return -EINVAL;
}
}
static void dump_btf_strings(const char *strs, __u32 len)
{
const char *cur = strs;
int i = 0;
while (cur < strs + len) {
fprintf(stderr, "string #%d: '%s'\n", i, cur);
cur += strlen(cur) + 1;
i++;
}
}
static void do_test_dedup(unsigned int test_num)
{
struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
const struct btf_header *test_hdr, *expect_hdr;
struct btf *test_btf = NULL, *expect_btf = NULL;
const void *test_btf_data, *expect_btf_data;
const char *ret_test_next_str, *ret_expect_next_str;
const char *test_strs, *expect_strs;
const char *test_str_cur;
const char *expect_str_cur, *expect_str_end;
unsigned int raw_btf_size;
void *raw_btf;
int err = 0, i;
if (!test__start_subtest(test->descr))
return;
raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
test->input.str_sec, test->input.str_sec_size,
&raw_btf_size, &ret_test_next_str);
if (!raw_btf)
return;
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
err = libbpf_get_error(test_btf);
free(raw_btf);
if (CHECK(err, "invalid test_btf errno:%d", err)) {
err = -1;
goto done;
}
raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types,
test->expect.str_sec,
test->expect.str_sec_size,
&raw_btf_size, &ret_expect_next_str);
if (!raw_btf)
return;
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
err = libbpf_get_error(expect_btf);
free(raw_btf);
if (CHECK(err, "invalid expect_btf errno:%d", err)) {
err = -1;
goto done;
}
test->opts.sz = sizeof(test->opts);
err = btf__dedup(test_btf, &test->opts);
if (CHECK(err, "btf_dedup failed errno:%d", err)) {
err = -1;
goto done;
}
test_btf_data = btf__raw_data(test_btf, &test_btf_size);
expect_btf_data = btf__raw_data(expect_btf, &expect_btf_size);
if (CHECK(test_btf_size != expect_btf_size,
"test_btf_size:%u != expect_btf_size:%u",
test_btf_size, expect_btf_size)) {
err = -1;
goto done;
}
test_hdr = test_btf_data;
test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off;
expect_hdr = expect_btf_data;
expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off;
if (CHECK(test_hdr->str_len != expect_hdr->str_len,
"test_hdr->str_len:%u != expect_hdr->str_len:%u",
test_hdr->str_len, expect_hdr->str_len)) {
fprintf(stderr, "\ntest strings:\n");
dump_btf_strings(test_strs, test_hdr->str_len);
fprintf(stderr, "\nexpected strings:\n");
dump_btf_strings(expect_strs, expect_hdr->str_len);
err = -1;
goto done;
}
expect_str_cur = expect_strs;
expect_str_end = expect_strs + expect_hdr->str_len;
while (expect_str_cur < expect_str_end) {
size_t test_len, expect_len;
int off;
off = btf__find_str(test_btf, expect_str_cur);
if (CHECK(off < 0, "exp str '%s' not found: %d\n", expect_str_cur, off)) {
err = -1;
goto done;
}
test_str_cur = btf__str_by_offset(test_btf, off);
test_len = strlen(test_str_cur);
expect_len = strlen(expect_str_cur);
if (CHECK(test_len != expect_len,
"test_len:%zu != expect_len:%zu "
"(test_str:%s, expect_str:%s)",
test_len, expect_len, test_str_cur, expect_str_cur)) {
err = -1;
goto done;
}
if (CHECK(strcmp(test_str_cur, expect_str_cur),
"test_str:%s != expect_str:%s",
test_str_cur, expect_str_cur)) {
err = -1;
goto done;
}
expect_str_cur += expect_len + 1;
}
test_nr_types = btf__type_cnt(test_btf);
expect_nr_types = btf__type_cnt(expect_btf);
if (CHECK(test_nr_types != expect_nr_types,
"test_nr_types:%u != expect_nr_types:%u",
test_nr_types, expect_nr_types)) {
err = -1;
goto done;
}
for (i = 1; i < test_nr_types; i++) {
const struct btf_type *test_type, *expect_type;
int test_size, expect_size;
test_type = btf__type_by_id(test_btf, i);
expect_type = btf__type_by_id(expect_btf, i);
test_size = btf_type_size(test_type);
expect_size = btf_type_size(expect_type);
if (CHECK(test_size != expect_size,
"type #%d: test_size:%d != expect_size:%u",
i, test_size, expect_size)) {
err = -1;
goto done;
}
if (CHECK(btf_kind(test_type) != btf_kind(expect_type),
"type %d kind: exp %d != got %u\n",
i, btf_kind(expect_type), btf_kind(test_type))) {
err = -1;
goto done;
}
if (CHECK(test_type->info != expect_type->info,
"type %d info: exp %d != got %u\n",
i, expect_type->info, test_type->info)) {
err = -1;
goto done;
}
if (CHECK(test_type->size != expect_type->size,
"type %d size/type: exp %d != got %u\n",
i, expect_type->size, test_type->size)) {
err = -1;
goto done;
}
}
done:
btf__free(test_btf);
btf__free(expect_btf);
}
void test_btf(void)
{
int i;
always_log = env.verbosity > VERBOSE_NONE;
for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
do_test_raw(i);
for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
do_test_get_info(i);
for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
do_test_file(i);
for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
do_test_info_raw(i);
for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
do_test_dedup(i);
test_pprint();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <test_progs.h>
#include "test_perf_link.skel.h"
static void burn_cpu(void)
{
volatile int j = 0;
cpu_set_t cpu_set;
int i, err;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
ASSERT_OK(err, "set_thread_affinity");
/* spin the loop for a while (random high number) */
for (i = 0; i < 1000000; ++i)
++j;
}
/* TODO: often fails in concurrent mode */
void serial_test_perf_link(void)
{
struct test_perf_link *skel = NULL;
struct perf_event_attr attr;
int pfd = -1, link_fd = -1, err;
int run_cnt_before, run_cnt_after;
struct bpf_link_info info;
__u32 info_len = sizeof(info);
/* create perf event */
memset(&attr, 0, sizeof(attr));
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
skel = test_perf_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
BPF_PERF_EVENT, NULL);
if (!ASSERT_GE(link_fd, 0, "link_fd"))
goto cleanup;
memset(&info, 0, sizeof(info));
err = bpf_link_get_info_by_fd(link_fd, &info, &info_len);
if (!ASSERT_OK(err, "link_get_info"))
goto cleanup;
ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type");
ASSERT_GT(info.id, 0, "link_id");
ASSERT_GT(info.prog_id, 0, "link_prog_id");
/* ensure we get at least one perf_event prog execution */
burn_cpu();
ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt");
/* perf_event is still active, but we close link and BPF program
* shouldn't be executed anymore
*/
close(link_fd);
link_fd = -1;
/* make sure there are no stragglers */
kern_sync_rcu();
run_cnt_before = skel->bss->run_cnt;
burn_cpu();
run_cnt_after = skel->bss->run_cnt;
ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_before_after");
cleanup:
if (link_fd >= 0)
close(link_fd);
if (pfd >= 0)
close(pfd);
test_perf_link__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/perf_link.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <uapi/linux/if_link.h>
#include <test_progs.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include "network_helpers.h"
#include "test_assign_reuse.skel.h"
#define NS_TEST "assign_reuse"
#define LOOPBACK 1
#define PORT 4443
static int attach_reuseport(int sock_fd, int prog_fd)
{
return setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF,
&prog_fd, sizeof(prog_fd));
}
static __u64 cookie(int fd)
{
__u64 cookie = 0;
socklen_t cookie_len = sizeof(cookie);
int ret;
ret = getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len);
ASSERT_OK(ret, "cookie");
ASSERT_GT(cookie, 0, "cookie_invalid");
return cookie;
}
static int echo_test_udp(int fd_sv)
{
struct sockaddr_storage addr = {};
socklen_t len = sizeof(addr);
char buff[1] = {};
int fd_cl = -1, ret;
fd_cl = connect_to_fd(fd_sv, 100);
ASSERT_GT(fd_cl, 0, "create_client");
ASSERT_EQ(getsockname(fd_cl, (void *)&addr, &len), 0, "getsockname");
ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client");
ret = recv(fd_sv, buff, sizeof(buff), 0);
if (ret < 0) {
close(fd_cl);
return errno;
}
ASSERT_EQ(ret, 1, "recv_server");
ASSERT_EQ(sendto(fd_sv, buff, sizeof(buff), 0, (void *)&addr, len), 1, "send_server");
ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client");
close(fd_cl);
return 0;
}
static int echo_test_tcp(int fd_sv)
{
char buff[1] = {};
int fd_cl = -1, fd_sv_cl = -1;
fd_cl = connect_to_fd(fd_sv, 100);
if (fd_cl < 0)
return errno;
fd_sv_cl = accept(fd_sv, NULL, NULL);
ASSERT_GE(fd_sv_cl, 0, "accept_fd");
ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client");
ASSERT_EQ(recv(fd_sv_cl, buff, sizeof(buff), 0), 1, "recv_server");
ASSERT_EQ(send(fd_sv_cl, buff, sizeof(buff), 0), 1, "send_server");
ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client");
close(fd_sv_cl);
close(fd_cl);
return 0;
}
void run_assign_reuse(int family, int sotype, const char *ip, __u16 port)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.ifindex = LOOPBACK,
.attach_point = BPF_TC_INGRESS,
);
DECLARE_LIBBPF_OPTS(bpf_tc_opts, tc_opts,
.handle = 1,
.priority = 1,
);
bool hook_created = false, tc_attached = false;
int ret, fd_tc, fd_accept, fd_drop, fd_map;
int *fd_sv = NULL;
__u64 fd_val;
struct test_assign_reuse *skel;
const int zero = 0;
skel = test_assign_reuse__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
skel->rodata->dest_port = port;
ret = test_assign_reuse__load(skel);
if (!ASSERT_OK(ret, "skel_load"))
goto cleanup;
ASSERT_EQ(skel->bss->sk_cookie_seen, 0, "cookie_init");
fd_tc = bpf_program__fd(skel->progs.tc_main);
fd_accept = bpf_program__fd(skel->progs.reuse_accept);
fd_drop = bpf_program__fd(skel->progs.reuse_drop);
fd_map = bpf_map__fd(skel->maps.sk_map);
fd_sv = start_reuseport_server(family, sotype, ip, port, 100, 1);
if (!ASSERT_NEQ(fd_sv, NULL, "start_reuseport_server"))
goto cleanup;
ret = attach_reuseport(*fd_sv, fd_drop);
if (!ASSERT_OK(ret, "attach_reuseport"))
goto cleanup;
fd_val = *fd_sv;
ret = bpf_map_update_elem(fd_map, &zero, &fd_val, BPF_NOEXIST);
if (!ASSERT_OK(ret, "bpf_sk_map"))
goto cleanup;
ret = bpf_tc_hook_create(&tc_hook);
if (ret == 0)
hook_created = true;
ret = ret == -EEXIST ? 0 : ret;
if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = fd_tc;
ret = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
goto cleanup;
tc_attached = true;
if (sotype == SOCK_STREAM)
ASSERT_EQ(echo_test_tcp(*fd_sv), ECONNREFUSED, "drop_tcp");
else
ASSERT_EQ(echo_test_udp(*fd_sv), EAGAIN, "drop_udp");
ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once");
skel->bss->sk_cookie_seen = 0;
skel->bss->reuseport_executed = 0;
ASSERT_OK(attach_reuseport(*fd_sv, fd_accept), "attach_reuseport(accept)");
if (sotype == SOCK_STREAM)
ASSERT_EQ(echo_test_tcp(*fd_sv), 0, "echo_tcp");
else
ASSERT_EQ(echo_test_udp(*fd_sv), 0, "echo_udp");
ASSERT_EQ(skel->bss->sk_cookie_seen, cookie(*fd_sv),
"cookie_mismatch");
ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once");
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
ret = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(ret, "bpf_tc_detach");
}
if (hook_created) {
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
}
test_assign_reuse__destroy(skel);
free_fds(fd_sv, 1);
}
void test_assign_reuse(void)
{
struct nstoken *tok = NULL;
SYS(out, "ip netns add %s", NS_TEST);
SYS(cleanup, "ip -net %s link set dev lo up", NS_TEST);
tok = open_netns(NS_TEST);
if (!ASSERT_OK_PTR(tok, "netns token"))
return;
if (test__start_subtest("tcpv4"))
run_assign_reuse(AF_INET, SOCK_STREAM, "127.0.0.1", PORT);
if (test__start_subtest("tcpv6"))
run_assign_reuse(AF_INET6, SOCK_STREAM, "::1", PORT);
if (test__start_subtest("udpv4"))
run_assign_reuse(AF_INET, SOCK_DGRAM, "127.0.0.1", PORT);
if (test__start_subtest("udpv6"))
run_assign_reuse(AF_INET6, SOCK_DGRAM, "::1", PORT);
cleanup:
close_netns(tok);
SYS_NOFAIL("ip netns delete %s", NS_TEST);
out:
return;
}
| linux-master | tools/testing/selftests/bpf/prog_tests/assign_reuse.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
#include "bind4_prog.skel.h"
#include "freplace_progmap.skel.h"
#include "xdp_dummy.skel.h"
typedef int (*test_cb)(struct bpf_object *obj);
static int check_data_map(struct bpf_object *obj, int prog_cnt, bool reset)
{
struct bpf_map *data_map = NULL, *map;
__u64 *result = NULL;
const int zero = 0;
__u32 duration = 0;
int ret = -1, i;
result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
if (CHECK(!result, "alloc_memory", "failed to alloc memory"))
return -ENOMEM;
bpf_object__for_each_map(map, obj)
if (bpf_map__is_internal(map)) {
data_map = map;
break;
}
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto out;
ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, result);
if (CHECK(ret, "get_result",
"failed to get output data: %d\n", ret))
goto out;
for (i = 0; i < prog_cnt; i++) {
if (CHECK(result[i] != 1, "result",
"fexit_bpf2bpf result[%d] failed err %llu\n",
i, result[i]))
goto out;
result[i] = 0;
}
if (reset) {
ret = bpf_map_update_elem(bpf_map__fd(data_map), &zero, result, 0);
if (CHECK(ret, "reset_result", "failed to reset result\n"))
goto out;
}
ret = 0;
out:
free(result);
return ret;
}
static void test_fexit_bpf2bpf_common(const char *obj_file,
const char *target_obj_file,
int prog_cnt,
const char **prog_name,
bool run_prog,
test_cb cb)
{
struct bpf_object *obj = NULL, *tgt_obj;
__u32 tgt_prog_id, info_len;
struct bpf_prog_info prog_info = {};
struct bpf_program **prog = NULL, *p;
struct bpf_link **link = NULL;
int err, tgt_fd, i;
struct btf *btf;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6),
.repeat = 1,
);
err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (!ASSERT_OK(err, "tgt_prog_load"))
return;
info_len = sizeof(prog_info);
err = bpf_prog_get_info_by_fd(tgt_fd, &prog_info, &info_len);
if (!ASSERT_OK(err, "tgt_fd_get_info"))
goto close_prog;
tgt_prog_id = prog_info.id;
btf = bpf_object__btf(tgt_obj);
link = calloc(sizeof(struct bpf_link *), prog_cnt);
if (!ASSERT_OK_PTR(link, "link_ptr"))
goto close_prog;
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
if (!ASSERT_OK_PTR(prog, "prog_ptr"))
goto close_prog;
obj = bpf_object__open_file(obj_file, NULL);
if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
bpf_object__for_each_program(p, obj) {
err = bpf_program__set_attach_target(p, tgt_fd, NULL);
ASSERT_OK(err, "set_attach_target");
}
err = bpf_object__load(obj);
if (!ASSERT_OK(err, "obj_load"))
goto close_prog;
for (i = 0; i < prog_cnt; i++) {
struct bpf_link_info link_info;
struct bpf_program *pos;
const char *pos_sec_name;
char *tgt_name;
__s32 btf_id;
tgt_name = strstr(prog_name[i], "/");
if (!ASSERT_OK_PTR(tgt_name, "tgt_name"))
goto close_prog;
btf_id = btf__find_by_name_kind(btf, tgt_name + 1, BTF_KIND_FUNC);
prog[i] = NULL;
bpf_object__for_each_program(pos, obj) {
pos_sec_name = bpf_program__section_name(pos);
if (pos_sec_name && !strcmp(pos_sec_name, prog_name[i])) {
prog[i] = pos;
break;
}
}
if (!ASSERT_OK_PTR(prog[i], prog_name[i]))
goto close_prog;
link[i] = bpf_program__attach_trace(prog[i]);
if (!ASSERT_OK_PTR(link[i], "attach_trace"))
goto close_prog;
info_len = sizeof(link_info);
memset(&link_info, 0, sizeof(link_info));
err = bpf_link_get_info_by_fd(bpf_link__fd(link[i]),
&link_info, &info_len);
ASSERT_OK(err, "link_fd_get_info");
ASSERT_EQ(link_info.tracing.attach_type,
bpf_program__expected_attach_type(prog[i]),
"link_attach_type");
ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id");
ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id");
}
if (cb) {
err = cb(obj);
if (err)
goto close_prog;
}
if (!run_prog)
goto close_prog;
err = bpf_prog_test_run_opts(tgt_fd, &topts);
ASSERT_OK(err, "prog_run");
ASSERT_EQ(topts.retval, 0, "prog_run_ret");
if (check_data_map(obj, prog_cnt, false))
goto close_prog;
close_prog:
for (i = 0; i < prog_cnt; i++)
bpf_link__destroy(link[i]);
bpf_object__close(obj);
bpf_object__close(tgt_obj);
free(link);
free(prog);
}
static void test_target_no_callees(void)
{
const char *prog_name[] = {
"fexit/test_pkt_md_access",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.bpf.o",
"./test_pkt_md_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
static void test_target_yes_callees(void)
{
const char *prog_name[] = {
"fexit/test_pkt_access",
"fexit/test_pkt_access_subprog1",
"fexit/test_pkt_access_subprog2",
"fexit/test_pkt_access_subprog3",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o",
"./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
static void test_func_replace(void)
{
const char *prog_name[] = {
"fexit/test_pkt_access",
"fexit/test_pkt_access_subprog1",
"fexit/test_pkt_access_subprog2",
"fexit/test_pkt_access_subprog3",
"freplace/get_skb_len",
"freplace/get_skb_ifindex",
"freplace/get_constant",
"freplace/test_pkt_write_access_subprog",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o",
"./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
static void test_func_replace_verify(void)
{
const char *prog_name[] = {
"freplace/do_bind",
};
test_fexit_bpf2bpf_common("./freplace_connect4.bpf.o",
"./connect4_prog.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
static int test_second_attach(struct bpf_object *obj)
{
const char *prog_name = "security_new_get_constant";
const char *tgt_name = "get_constant";
const char *tgt_obj_file = "./test_pkt_access.bpf.o";
struct bpf_program *prog = NULL;
struct bpf_object *tgt_obj;
struct bpf_link *link;
int err = 0, tgt_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6),
.repeat = 1,
);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (!ASSERT_OK_PTR(prog, "find_prog"))
return -ENOENT;
err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (!ASSERT_OK(err, "second_prog_load"))
return err;
link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name);
if (!ASSERT_OK_PTR(link, "second_link"))
goto out;
err = bpf_prog_test_run_opts(tgt_fd, &topts);
if (!ASSERT_OK(err, "ipv6 test_run"))
goto out;
if (!ASSERT_OK(topts.retval, "ipv6 retval"))
goto out;
err = check_data_map(obj, 1, true);
if (err)
goto out;
out:
bpf_link__destroy(link);
bpf_object__close(tgt_obj);
return err;
}
static void test_func_replace_multi(void)
{
const char *prog_name[] = {
"freplace/get_constant",
};
test_fexit_bpf2bpf_common("./freplace_get_constant.bpf.o",
"./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, test_second_attach);
}
static void test_fmod_ret_freplace(void)
{
struct bpf_object *freplace_obj = NULL, *pkt_obj, *fmod_obj = NULL;
const char *freplace_name = "./freplace_get_constant.bpf.o";
const char *fmod_ret_name = "./fmod_ret_freplace.bpf.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
const char *tgt_name = "./test_pkt_access.bpf.o";
struct bpf_link *freplace_link = NULL;
struct bpf_program *prog;
__u32 duration = 0;
int err, pkt_fd, attach_prog_fd;
err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
tgt_name, err, errno))
return;
freplace_obj = bpf_object__open_file(freplace_name, NULL);
if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open"))
goto out;
prog = bpf_object__next_program(freplace_obj, NULL);
err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
ASSERT_OK(err, "freplace__set_attach_target");
err = bpf_object__load(freplace_obj);
if (CHECK(err, "freplace_obj_load", "err %d\n", err))
goto out;
freplace_link = bpf_program__attach_trace(prog);
if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace"))
goto out;
fmod_obj = bpf_object__open_file(fmod_ret_name, NULL);
if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open"))
goto out;
attach_prog_fd = bpf_program__fd(prog);
prog = bpf_object__next_program(fmod_obj, NULL);
err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL);
ASSERT_OK(err, "fmod_ret_set_attach_target");
err = bpf_object__load(fmod_obj);
if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n"))
goto out;
out:
bpf_link__destroy(freplace_link);
bpf_object__close(freplace_obj);
bpf_object__close(fmod_obj);
bpf_object__close(pkt_obj);
}
static void test_func_sockmap_update(void)
{
const char *prog_name[] = {
"freplace/cls_redirect",
};
test_fexit_bpf2bpf_common("./freplace_cls_redirect.bpf.o",
"./test_cls_redirect.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
static void test_obj_load_failure_common(const char *obj_file,
const char *target_obj_file)
{
/*
* standalone test that asserts failure to load freplace prog
* because of invalid return code.
*/
struct bpf_object *obj = NULL, *pkt_obj;
struct bpf_program *prog;
int err, pkt_fd;
__u32 duration = 0;
err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
target_obj_file, err, errno))
return;
obj = bpf_object__open_file(obj_file, NULL);
if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
prog = bpf_object__next_program(obj, NULL);
err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
ASSERT_OK(err, "set_attach_target");
/* It should fail to load the program */
err = bpf_object__load(obj);
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
goto close_prog;
close_prog:
bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
static void test_func_replace_return_code(void)
{
/* test invalid return code in the replaced program */
test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o",
"./connect4_prog.bpf.o");
}
static void test_func_map_prog_compatibility(void)
{
/* test with spin lock map value in the replaced program */
test_obj_load_failure_common("./freplace_attach_probe.bpf.o",
"./test_attach_probe.bpf.o");
}
static void test_func_replace_global_func(void)
{
const char *prog_name[] = {
"freplace/test_pkt_access",
};
test_fexit_bpf2bpf_common("./freplace_global_func.bpf.o",
"./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *btf;
int ret;
ret = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (ret)
return ret;
if (!info.btf_id)
return -EINVAL;
btf = btf__load_from_kernel_by_id(info.btf_id);
ret = libbpf_get_error(btf);
if (ret)
return ret;
ret = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
btf__free(btf);
return ret;
}
static int load_fentry(int attach_prog_fd, int attach_btf_id)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.expected_attach_type = BPF_TRACE_FENTRY,
.attach_prog_fd = attach_prog_fd,
.attach_btf_id = attach_btf_id,
);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
return bpf_prog_load(BPF_PROG_TYPE_TRACING,
"bind4_fentry",
"GPL",
insns,
ARRAY_SIZE(insns),
&opts);
}
static void test_fentry_to_cgroup_bpf(void)
{
struct bind4_prog *skel = NULL;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int cgroup_fd = -1;
int fentry_fd = -1;
int btf_id;
cgroup_fd = test__join_cgroup("/fentry_to_cgroup_bpf");
if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
return;
skel = bind4_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
goto cleanup;
skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.bind_v4_prog, "bpf_program__attach_cgroup"))
goto cleanup;
btf_id = find_prog_btf_id("bind_v4_prog", bpf_program__fd(skel->progs.bind_v4_prog));
if (!ASSERT_GE(btf_id, 0, "find_prog_btf_id"))
goto cleanup;
fentry_fd = load_fentry(bpf_program__fd(skel->progs.bind_v4_prog), btf_id);
if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
goto cleanup;
/* Make sure bpf_prog_get_info_by_fd works correctly when attaching
* to another BPF program.
*/
ASSERT_OK(bpf_prog_get_info_by_fd(fentry_fd, &info, &info_len),
"bpf_prog_get_info_by_fd");
ASSERT_EQ(info.btf_id, 0, "info.btf_id");
ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
ASSERT_GT(info.attach_btf_obj_id, 0, "info.attach_btf_obj_id");
cleanup:
if (cgroup_fd >= 0)
close(cgroup_fd);
if (fentry_fd >= 0)
close(fentry_fd);
bind4_prog__destroy(skel);
}
static void test_func_replace_progmap(void)
{
struct bpf_cpumap_val value = { .qsize = 1 };
struct freplace_progmap *skel = NULL;
struct xdp_dummy *tgt_skel = NULL;
__u32 key = 0;
int err;
skel = freplace_progmap__open();
if (!ASSERT_OK_PTR(skel, "prog_open"))
return;
tgt_skel = xdp_dummy__open_and_load();
if (!ASSERT_OK_PTR(tgt_skel, "tgt_prog_load"))
goto out;
err = bpf_program__set_attach_target(skel->progs.xdp_cpumap_prog,
bpf_program__fd(tgt_skel->progs.xdp_dummy_prog),
"xdp_dummy_prog");
if (!ASSERT_OK(err, "set_attach_target"))
goto out;
err = freplace_progmap__load(skel);
if (!ASSERT_OK(err, "obj_load"))
goto out;
/* Prior to fixing the kernel, loading the PROG_TYPE_EXT 'redirect'
* program above will cause the map owner type of 'cpumap' to be set to
* PROG_TYPE_EXT. This in turn will cause the bpf_map_update_elem()
* below to fail, because the program we are inserting into the map is
* of PROG_TYPE_XDP. After fixing the kernel, the initial ownership will
* be correctly resolved to the *target* of the PROG_TYPE_EXT program
* (i.e., PROG_TYPE_XDP) and the map update will succeed.
*/
value.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_drop_prog);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.cpu_map),
&key, &value, 0);
ASSERT_OK(err, "map_update");
out:
xdp_dummy__destroy(tgt_skel);
freplace_progmap__destroy(skel);
}
/* NOTE: affect other tests, must run in serial mode */
void serial_test_fexit_bpf2bpf(void)
{
if (test__start_subtest("target_no_callees"))
test_target_no_callees();
if (test__start_subtest("target_yes_callees"))
test_target_yes_callees();
if (test__start_subtest("func_replace"))
test_func_replace();
if (test__start_subtest("func_replace_verify"))
test_func_replace_verify();
if (test__start_subtest("func_sockmap_update"))
test_func_sockmap_update();
if (test__start_subtest("func_replace_return_code"))
test_func_replace_return_code();
if (test__start_subtest("func_map_prog_compatibility"))
test_func_map_prog_compatibility();
if (test__start_subtest("func_replace_multi"))
test_func_replace_multi();
if (test__start_subtest("fmod_ret_freplace"))
test_fmod_ret_freplace();
if (test__start_subtest("func_replace_global_func"))
test_func_replace_global_func();
if (test__start_subtest("fentry_to_cgroup_bpf"))
test_fentry_to_cgroup_bpf();
if (test__start_subtest("func_replace_progmap"))
test_func_replace_progmap();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Test suite of lwt_xmit BPF programs that redirect packets
* The file tests focus not only if these programs work as expected normally,
* but also if they can handle abnormal situations gracefully.
*
* WARNING
* -------
* This test suite may crash the kernel, thus should be run in a VM.
*
* Setup:
* ---------
* All tests are performed in a single netns. Two lwt encap routes are setup for
* each subtest:
*
* ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err
* ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err
*
* Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section
* of this object holds a program entry to test. The BPF object is built from
* progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the
* attachment for lwt programs are not supported by libbpf yet.
*
* For testing, ping commands are run in the test netns:
*
* ping 10.0.0.<ifindex> -c 1 -w 1 -s 100
* ping 20.0.0.<ifindex> -c 1 -w 1 -s 100
*
* Scenarios:
* --------------------------------
* 1. Redirect to a running tap/tun device
* 2. Redirect to a down tap/tun device
* 3. Redirect to a vlan device with lower layer down
*
* Case 1, ping packets should be received by packet socket on target device
* when redirected to ingress, and by tun/tap fd when redirected to egress.
*
* Case 2,3 are considered successful as long as they do not crash the kernel
* as a regression.
*
* Case 1,2 use tap device to test redirect to device that requires MAC
* header, and tun device to test the case with no MAC header added.
*/
#include <sys/socket.h>
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_tun.h>
#include <linux/icmp.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include "lwt_helpers.h"
#include "test_progs.h"
#include "network_helpers.h"
#define BPF_OBJECT "test_lwt_redirect.bpf.o"
#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac")
#define EGRESS_SEC(need_mac) ((need_mac) ? "redir_egress" : "redir_egress_nomac")
#define LOCAL_SRC "10.0.0.1"
#define CIDR_TO_INGRESS "10.0.0.0/24"
#define CIDR_TO_EGRESS "20.0.0.0/24"
/* ping to redirect toward given dev, with last byte of dest IP being the target
* device index.
*
* Note: ping command inside BPF-CI is busybox version, so it does not have certain
* function, such like -m option to set packet mark.
*/
static void ping_dev(const char *dev, bool is_ingress)
{
int link_index = if_nametoindex(dev);
char ip[256];
if (!ASSERT_GE(link_index, 0, "if_nametoindex"))
return;
if (is_ingress)
snprintf(ip, sizeof(ip), "10.0.0.%d", link_index);
else
snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);
/* We won't get a reply. Don't fail here */
SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
ip, ICMP_PAYLOAD_SIZE);
}
static int new_packet_sock(const char *ifname)
{
int err = 0;
int ignore_outgoing = 1;
int ifindex = -1;
int s = -1;
s = socket(AF_PACKET, SOCK_RAW, 0);
if (!ASSERT_GE(s, 0, "socket(AF_PACKET)"))
return -1;
ifindex = if_nametoindex(ifname);
if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) {
close(s);
return -1;
}
struct sockaddr_ll addr = {
.sll_family = AF_PACKET,
.sll_protocol = htons(ETH_P_IP),
.sll_ifindex = ifindex,
};
err = bind(s, (struct sockaddr *)&addr, sizeof(addr));
if (!ASSERT_OK(err, "bind(AF_PACKET)")) {
close(s);
return -1;
}
/* Use packet socket to capture only the ingress, so we can distinguish
* the case where a regression that actually redirects the packet to
* the egress.
*/
err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING,
&ignore_outgoing, sizeof(ignore_outgoing));
if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) {
close(s);
return -1;
}
err = fcntl(s, F_SETFL, O_NONBLOCK);
if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) {
close(s);
return -1;
}
return s;
}
static int expect_icmp(char *buf, ssize_t len)
{
struct ethhdr *eth = (struct ethhdr *)buf;
if (len < (ssize_t)sizeof(*eth))
return -1;
if (eth->h_proto == htons(ETH_P_IP))
return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth));
return -1;
}
static int expect_icmp_nomac(char *buf, ssize_t len)
{
return __expect_icmp_ipv4(buf, len);
}
static void send_and_capture_test_packets(const char *test_name, int tap_fd,
const char *target_dev, bool need_mac)
{
int psock = -1;
struct timeval timeo = {
.tv_sec = 0,
.tv_usec = 250000,
};
int ret = -1;
filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac;
ping_dev(target_dev, false);
ret = wait_for_packet(tap_fd, filter, &timeo);
if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) {
log_err("%s egress test fails", test_name);
goto out;
}
psock = new_packet_sock(target_dev);
ping_dev(target_dev, true);
ret = wait_for_packet(psock, filter, &timeo);
if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) {
log_err("%s ingress test fails", test_name);
goto out;
}
out:
if (psock >= 0)
close(psock);
}
static int setup_redirect_target(const char *target_dev, bool need_mac)
{
int target_index = -1;
int tap_fd = -1;
tap_fd = open_tuntap(target_dev, need_mac);
if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
goto fail;
target_index = if_nametoindex(target_dev);
if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
goto fail;
SYS(fail, "ip link add link_err type dummy");
SYS(fail, "ip link set lo up");
SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
SYS(fail, "ip link set link_err up");
SYS(fail, "ip link set %s up", target_dev);
SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac));
SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac));
return tap_fd;
fail:
if (tap_fd >= 0)
close(tap_fd);
return -1;
}
static void test_lwt_redirect_normal(void)
{
const char *target_dev = "tap0";
int tap_fd = -1;
bool need_mac = true;
tap_fd = setup_redirect_target(target_dev, need_mac);
if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
return;
send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
close(tap_fd);
}
static void test_lwt_redirect_normal_nomac(void)
{
const char *target_dev = "tun0";
int tap_fd = -1;
bool need_mac = false;
tap_fd = setup_redirect_target(target_dev, need_mac);
if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
return;
send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
close(tap_fd);
}
/* This test aims to prevent regression of future. As long as the kernel does
* not panic, it is considered as success.
*/
static void __test_lwt_redirect_dev_down(bool need_mac)
{
const char *target_dev = "tap0";
int tap_fd = -1;
tap_fd = setup_redirect_target(target_dev, need_mac);
if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
return;
SYS(out, "ip link set %s down", target_dev);
ping_dev(target_dev, true);
ping_dev(target_dev, false);
out:
close(tap_fd);
}
static void test_lwt_redirect_dev_down(void)
{
__test_lwt_redirect_dev_down(true);
}
static void test_lwt_redirect_dev_down_nomac(void)
{
__test_lwt_redirect_dev_down(false);
}
/* This test aims to prevent regression of future. As long as the kernel does
* not panic, it is considered as success.
*/
static void test_lwt_redirect_dev_carrier_down(void)
{
const char *lower_dev = "tap0";
const char *vlan_dev = "vlan100";
int tap_fd = -1;
tap_fd = setup_redirect_target(lower_dev, true);
if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
return;
SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev);
SYS(out, "ip link set %s up", vlan_dev);
SYS(out, "ip link set %s down", lower_dev);
ping_dev(vlan_dev, true);
ping_dev(vlan_dev, false);
out:
close(tap_fd);
}
static void *test_lwt_redirect_run(void *arg)
{
netns_delete();
RUN_TEST(lwt_redirect_normal);
RUN_TEST(lwt_redirect_normal_nomac);
RUN_TEST(lwt_redirect_dev_down);
RUN_TEST(lwt_redirect_dev_down_nomac);
RUN_TEST(lwt_redirect_dev_carrier_down);
return NULL;
}
void test_lwt_redirect(void)
{
pthread_t test_thread;
int err;
/* Run the tests in their own thread to isolate the namespace changes
* so they do not affect the environment of other tests.
* (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
*/
err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL);
if (ASSERT_OK(err, "pthread_create"))
ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lwt_redirect.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
enum {
QUEUE,
STACK,
};
static void test_queue_stack_map_by_type(int type)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE], val;
int i, err, prog_fd, map_in_fd, map_out_fd;
char file[32], buf[128];
struct bpf_object *obj;
struct iphdr iph;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
/* Fill test values to be used */
for (i = 0; i < MAP_SIZE; i++)
vals[i] = rand();
if (type == QUEUE)
strncpy(file, "./test_queue_map.bpf.o", sizeof(file));
else if (type == STACK)
strncpy(file, "./test_stack_map.bpf.o", sizeof(file));
else
return;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
map_in_fd = bpf_find_map(__func__, obj, "map_in");
if (map_in_fd < 0)
goto out;
map_out_fd = bpf_find_map(__func__, obj, "map_out");
if (map_out_fd < 0)
goto out;
/* Push 32 elements to the input map */
for (i = 0; i < MAP_SIZE; i++) {
err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
if (CHECK_FAIL(err))
goto out;
}
/* The eBPF program pushes iph.saddr in the output map,
* pops the input map and saves this value in iph.daddr
*/
for (i = 0; i < MAP_SIZE; i++) {
if (type == QUEUE) {
val = vals[i];
pkt_v4.iph.saddr = vals[i] * 5;
} else if (type == STACK) {
val = vals[MAP_SIZE - 1 - i];
pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
}
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (err || topts.retval ||
topts.data_size_out != sizeof(pkt_v4))
break;
memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph));
if (iph.daddr != val)
break;
}
ASSERT_OK(err, "bpf_map_pop_elem");
ASSERT_OK(topts.retval, "bpf_map_pop_elem test retval");
ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4),
"bpf_map_pop_elem data_size_out");
ASSERT_EQ(iph.daddr, val, "bpf_map_pop_elem iph.daddr");
/* Queue is empty, program should return TC_ACT_SHOT */
topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "check-queue-stack-map-empty");
ASSERT_EQ(topts.retval, 2 /* TC_ACT_SHOT */,
"check-queue-stack-map-empty test retval");
ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4),
"check-queue-stack-map-empty data_size_out");
/* Check that the program pushed elements correctly */
for (i = 0; i < MAP_SIZE; i++) {
err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
ASSERT_OK(err, "bpf_map_lookup_and_delete_elem");
ASSERT_EQ(val, vals[i] * 5, "bpf_map_push_elem val");
}
out:
pkt_v4.iph.saddr = 0;
bpf_object__close(obj);
}
void test_queue_stack_map(void)
{
test_queue_stack_map_by_type(QUEUE);
test_queue_stack_map_by_type(STACK);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/queue_stack_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <linux/compiler.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "test_tcp_hdr_options.h"
#include "test_tcp_hdr_options.skel.h"
#include "test_misc_tcp_hdr_options.skel.h"
#define LO_ADDR6 "::1"
#define CG_NAME "/tcpbpf-hdr-opt-test"
static struct bpf_test_option exp_passive_estab_in;
static struct bpf_test_option exp_active_estab_in;
static struct bpf_test_option exp_passive_fin_in;
static struct bpf_test_option exp_active_fin_in;
static struct hdr_stg exp_passive_hdr_stg;
static struct hdr_stg exp_active_hdr_stg = { .active = true, };
static struct test_misc_tcp_hdr_options *misc_skel;
static struct test_tcp_hdr_options *skel;
static int lport_linum_map_fd;
static int hdr_stg_map_fd;
static __u32 duration;
static int cg_fd;
struct sk_fds {
int srv_fd;
int passive_fd;
int active_fd;
int passive_lport;
int active_lport;
};
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return -1;
if (!ASSERT_OK(system("ip link set dev lo up"), "run ip cmd"))
return -1;
return 0;
}
static void print_hdr_stg(const struct hdr_stg *hdr_stg, const char *prefix)
{
fprintf(stderr, "%s{active:%u, resend_syn:%u, syncookie:%u, fastopen:%u}\n",
prefix ? : "", hdr_stg->active, hdr_stg->resend_syn,
hdr_stg->syncookie, hdr_stg->fastopen);
}
static void print_option(const struct bpf_test_option *opt, const char *prefix)
{
fprintf(stderr, "%s{flags:0x%x, max_delack_ms:%u, rand:0x%x}\n",
prefix ? : "", opt->flags, opt->max_delack_ms, opt->rand);
}
static void sk_fds_close(struct sk_fds *sk_fds)
{
close(sk_fds->srv_fd);
close(sk_fds->passive_fd);
close(sk_fds->active_fd);
}
static int sk_fds_shutdown(struct sk_fds *sk_fds)
{
int ret, abyte;
shutdown(sk_fds->active_fd, SHUT_WR);
ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte));
if (!ASSERT_EQ(ret, 0, "read-after-shutdown(passive_fd):"))
return -1;
shutdown(sk_fds->passive_fd, SHUT_WR);
ret = read(sk_fds->active_fd, &abyte, sizeof(abyte));
if (!ASSERT_EQ(ret, 0, "read-after-shutdown(active_fd):"))
return -1;
return 0;
}
static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
{
const char fast[] = "FAST!!!";
struct sockaddr_in6 addr6;
socklen_t len;
sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
if (!ASSERT_NEQ(sk_fds->srv_fd, -1, "start_server"))
goto error;
if (fast_open)
sk_fds->active_fd = fastopen_connect(sk_fds->srv_fd, fast,
sizeof(fast), 0);
else
sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0);
if (!ASSERT_NEQ(sk_fds->active_fd, -1, "")) {
close(sk_fds->srv_fd);
goto error;
}
len = sizeof(addr6);
if (!ASSERT_OK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
&len), "getsockname(srv_fd)"))
goto error_close;
sk_fds->passive_lport = ntohs(addr6.sin6_port);
len = sizeof(addr6);
if (!ASSERT_OK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
&len), "getsockname(active_fd)"))
goto error_close;
sk_fds->active_lport = ntohs(addr6.sin6_port);
sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0);
if (!ASSERT_NEQ(sk_fds->passive_fd, -1, "accept(srv_fd)"))
goto error_close;
if (fast_open) {
char bytes_in[sizeof(fast)];
int ret;
ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in));
if (!ASSERT_EQ(ret, sizeof(fast), "read fastopen syn data")) {
close(sk_fds->passive_fd);
goto error_close;
}
}
return 0;
error_close:
close(sk_fds->active_fd);
close(sk_fds->srv_fd);
error:
memset(sk_fds, -1, sizeof(*sk_fds));
return -1;
}
static int check_hdr_opt(const struct bpf_test_option *exp,
const struct bpf_test_option *act,
const char *hdr_desc)
{
if (!ASSERT_EQ(memcmp(exp, act, sizeof(*exp)), 0, hdr_desc)) {
print_option(exp, "expected: ");
print_option(act, " actual: ");
return -1;
}
return 0;
}
static int check_hdr_stg(const struct hdr_stg *exp, int fd,
const char *stg_desc)
{
struct hdr_stg act;
if (!ASSERT_OK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
"map_lookup(hdr_stg_map_fd)"))
return -1;
if (!ASSERT_EQ(memcmp(exp, &act, sizeof(*exp)), 0, stg_desc)) {
print_hdr_stg(exp, "expected: ");
print_hdr_stg(&act, " actual: ");
return -1;
}
return 0;
}
static int check_error_linum(const struct sk_fds *sk_fds)
{
unsigned int nr_errors = 0;
struct linum_err linum_err;
int lport;
lport = sk_fds->passive_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:passive(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
lport = sk_fds->active_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:active(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
return nr_errors;
}
static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
{
const __u32 expected_inherit_cb_flags =
BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_STATE_CB_FLAG;
if (sk_fds_shutdown(sk_fds))
goto check_linum;
if (!ASSERT_EQ(expected_inherit_cb_flags, skel->bss->inherit_cb_flags,
"inherit_cb_flags"))
goto check_linum;
if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd,
"passive_hdr_stg"))
goto check_linum;
if (check_hdr_stg(&exp_active_hdr_stg, sk_fds->active_fd,
"active_hdr_stg"))
goto check_linum;
if (check_hdr_opt(&exp_passive_estab_in, &skel->bss->passive_estab_in,
"passive_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_active_estab_in, &skel->bss->active_estab_in,
"active_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_passive_fin_in, &skel->bss->passive_fin_in,
"passive_fin_in"))
goto check_linum;
check_hdr_opt(&exp_active_fin_in, &skel->bss->active_fin_in,
"active_fin_in");
check_linum:
ASSERT_FALSE(check_error_linum(sk_fds), "check_error_linum");
sk_fds_close(sk_fds);
}
static void prepare_out(void)
{
skel->bss->active_syn_out = exp_passive_estab_in;
skel->bss->passive_synack_out = exp_active_estab_in;
skel->bss->active_fin_out = exp_passive_fin_in;
skel->bss->passive_fin_out = exp_active_fin_in;
}
static void reset_test(void)
{
size_t optsize = sizeof(struct bpf_test_option);
int lport, err;
memset(&skel->bss->passive_synack_out, 0, optsize);
memset(&skel->bss->passive_fin_out, 0, optsize);
memset(&skel->bss->passive_estab_in, 0, optsize);
memset(&skel->bss->passive_fin_in, 0, optsize);
memset(&skel->bss->active_syn_out, 0, optsize);
memset(&skel->bss->active_fin_out, 0, optsize);
memset(&skel->bss->active_estab_in, 0, optsize);
memset(&skel->bss->active_fin_in, 0, optsize);
skel->bss->inherit_cb_flags = 0;
skel->data->test_kind = TCPOPT_EXP;
skel->data->test_magic = 0xeB9F;
memset(&exp_passive_estab_in, 0, optsize);
memset(&exp_active_estab_in, 0, optsize);
memset(&exp_passive_fin_in, 0, optsize);
memset(&exp_active_fin_in, 0, optsize);
memset(&exp_passive_hdr_stg, 0, sizeof(exp_passive_hdr_stg));
memset(&exp_active_hdr_stg, 0, sizeof(exp_active_hdr_stg));
exp_active_hdr_stg.active = true;
err = bpf_map_get_next_key(lport_linum_map_fd, NULL, &lport);
while (!err) {
bpf_map_delete_elem(lport_linum_map_fd, &lport);
err = bpf_map_get_next_key(lport_linum_map_fd, &lport, &lport);
}
}
static void fastopen_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.fastopen = true;
prepare_out();
/* Allow fastopen without fastopen cookie */
if (write_sysctl("/proc/sys/net/ipv4/tcp_fastopen", "1543"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, true)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void syncookie_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS |
OPTION_F_RESEND;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.syncookie = true;
exp_active_hdr_stg.resend_syn = true;
prepare_out();
/* Clear the RESEND to ensure the bpf prog can learn
* want_cookie and set the RESEND by itself.
*/
skel->bss->passive_synack_out.flags &= ~OPTION_F_RESEND;
/* Enforce syncookie mode */
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void fin(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_fin_in.flags = OPTION_F_RAND;
exp_passive_fin_in.rand = 0xfa;
exp_active_fin_in.flags = OPTION_F_RAND;
exp_active_fin_in.rand = 0xce;
prepare_out();
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void __simple_estab(bool exprm)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
prepare_out();
if (!exprm) {
skel->data->test_kind = 0xB9;
skel->data->test_magic = 0;
}
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void no_exprm_estab(void)
{
__simple_estab(false);
}
static void simple_estab(void)
{
__simple_estab(true);
}
static void misc(void)
{
const char send_msg[] = "MISC!!!";
char recv_msg[sizeof(send_msg)];
const unsigned int nr_data = 2;
struct bpf_link *link;
struct sk_fds sk_fds;
int i, ret;
lport_linum_map_fd = bpf_map__fd(misc_skel->maps.lport_linum_map);
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
if (!ASSERT_OK_PTR(link, "attach_cgroup(misc_estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
for (i = 0; i < nr_data; i++) {
/* MSG_EOR to ensure skb will not be combined */
ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg),
MSG_EOR);
if (!ASSERT_EQ(ret, sizeof(send_msg), "send(msg)"))
goto check_linum;
ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
goto check_linum;
}
if (sk_fds_shutdown(&sk_fds))
goto check_linum;
ASSERT_EQ(misc_skel->bss->nr_syn, 1, "unexpected nr_syn");
ASSERT_EQ(misc_skel->bss->nr_data, nr_data, "unexpected nr_data");
/* The last ACK may have been delayed, so it is either 1 or 2. */
CHECK(misc_skel->bss->nr_pure_ack != 1 &&
misc_skel->bss->nr_pure_ack != 2,
"unexpected nr_pure_ack",
"expected (1 or 2) != actual (%u)\n",
misc_skel->bss->nr_pure_ack);
ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin");
ASSERT_EQ(misc_skel->bss->nr_hwtstamp, 0, "nr_hwtstamp");
check_linum:
ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum");
sk_fds_close(&sk_fds);
bpf_link__destroy(link);
}
struct test {
const char *desc;
void (*run)(void);
};
#define DEF_TEST(name) { #name, name }
static struct test tests[] = {
DEF_TEST(simple_estab),
DEF_TEST(no_exprm_estab),
DEF_TEST(syncookie_estab),
DEF_TEST(fastopen_estab),
DEF_TEST(fin),
DEF_TEST(misc),
};
void test_tcp_hdr_options(void)
{
int i;
skel = test_tcp_hdr_options__open_and_load();
if (!ASSERT_OK_PTR(skel, "open and load skel"))
return;
misc_skel = test_misc_tcp_hdr_options__open_and_load();
if (!ASSERT_OK_PTR(misc_skel, "open and load misc test skel"))
goto skel_destroy;
cg_fd = test__join_cgroup(CG_NAME);
if (!ASSERT_GE(cg_fd, 0, "join_cgroup"))
goto skel_destroy;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
if (create_netns())
break;
tests[i].run();
reset_test();
}
close(cg_fd);
skel_destroy:
test_misc_tcp_hdr_options__destroy(misc_skel);
test_tcp_hdr_options__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#include "test_cgroup_link.skel.h"
static __u32 duration = 0;
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
static struct test_cgroup_link *skel = NULL;
int ping_and_check(int exp_calls, int exp_alt_calls)
{
skel->bss->calls = 0;
skel->bss->alt_calls = 0;
CHECK_FAIL(system(PING_CMD));
if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
"exp %d, got %d\n", exp_calls, skel->bss->calls))
return -EINVAL;
if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
"exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
return -EINVAL;
return 0;
}
void serial_test_cgroup_link(void)
{
struct {
const char *path;
int fd;
} cgs[] = {
{ "/cg1" },
{ "/cg1/cg2" },
{ "/cg1/cg2/cg3" },
{ "/cg1/cg2/cg3/cg4" },
};
int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
struct bpf_link_info info;
int i = 0, err, prog_fd;
bool detach_legacy = false;
skel = test_cgroup_link__open_and_load();
if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
return;
prog_fd = bpf_program__fd(skel->progs.egress);
err = setup_cgroup_environment();
if (CHECK(err, "cg_init", "failed: %d\n", err))
goto cleanup;
for (i = 0; i < cg_nr; i++) {
cgs[i].fd = create_and_get_cgroup(cgs[i].path);
if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
goto cleanup;
}
err = join_cgroup(cgs[last_cg].path);
if (CHECK(err, "cg_join", "fail: %d\n", err))
goto cleanup;
for (i = 0; i < cg_nr; i++) {
links[i] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[i].fd);
if (!ASSERT_OK_PTR(links[i], "cg_attach"))
goto cleanup;
}
ping_and_check(cg_nr, 0);
/* query the number of attached progs and attach flags in root cg */
err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
0, &attach_flags, NULL, &prog_cnt);
CHECK_FAIL(err);
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
goto cleanup;
/* query the number of effective progs in last cg */
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, NULL, NULL,
&prog_cnt);
CHECK_FAIL(err);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
/* query the effective prog IDs in last cg */
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, NULL, prog_ids,
&prog_cnt);
CHECK_FAIL(err);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
for (i = 1; i < prog_cnt; i++) {
CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
"idx %d, prev id %d, cur id %d\n",
i, prog_ids[i - 1], prog_ids[i]);
}
/* detach bottom program and ping again */
bpf_link__destroy(links[last_cg]);
links[last_cg] = NULL;
ping_and_check(cg_nr - 1, 0);
/* mix in with non link-based multi-attachments */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = true;
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr + 1, 0);
/* detach link */
bpf_link__destroy(links[last_cg]);
links[last_cg] = NULL;
/* detach legacy */
err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = false;
/* attach legacy exclusive prog attachment */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, 0);
if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
goto cleanup;
detach_legacy = true;
/* attempt to mix in with multi-attach bpf_link */
tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) {
bpf_link__destroy(tmp_link);
goto cleanup;
}
ping_and_check(cg_nr, 0);
/* detach */
err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = false;
ping_and_check(cg_nr - 1, 0);
/* attach back link-based one */
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr, 0);
/* check legacy exclusive prog can't be attached */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, 0);
if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
goto cleanup;
}
/* replace BPF programs inside their links for all but first link */
for (i = 1; i < cg_nr; i++) {
err = bpf_link__update_program(links[i], skel->progs.egress_alt);
if (CHECK(err, "prog_upd", "link #%d\n", i))
goto cleanup;
}
ping_and_check(1, cg_nr - 1);
/* Attempt program update with wrong expected BPF program */
link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
link_upd_opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bpf_link__fd(links[0]),
bpf_program__fd(skel->progs.egress_alt),
&link_upd_opts);
if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
"unexpectedly succeeded, err %d, errno %d\n", err, -errno))
goto cleanup;
/* Compare-exchange single link program from egress to egress_alt */
link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
link_upd_opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bpf_link__fd(links[0]),
bpf_program__fd(skel->progs.egress_alt),
&link_upd_opts);
if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
goto cleanup;
/* ping */
ping_and_check(0, cg_nr);
/* close cgroup FDs before detaching links */
for (i = 0; i < cg_nr; i++) {
if (cgs[i].fd > 0) {
close(cgs[i].fd);
cgs[i].fd = -1;
}
}
/* BPF programs should still get called */
ping_and_check(0, cg_nr);
prog_id = link_info_prog_id(links[0], &info);
CHECK(prog_id == 0, "link_info", "failed\n");
CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
err = bpf_link__detach(links[0]);
if (CHECK(err, "link_detach", "failed %d\n", err))
goto cleanup;
/* cgroup_id should be zero in link_info */
prog_id = link_info_prog_id(links[0], &info);
CHECK(prog_id == 0, "link_info", "failed\n");
CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
/* First BPF program shouldn't be called anymore */
ping_and_check(0, cg_nr - 1);
/* leave cgroup and remove them, don't detach programs */
cleanup_cgroup_environment();
/* BPF programs should have been auto-detached */
ping_and_check(0, 0);
cleanup:
if (detach_legacy)
bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS);
for (i = 0; i < cg_nr; i++) {
bpf_link__destroy(links[i]);
}
test_cgroup_link__destroy(skel);
for (i = 0; i < cg_nr; i++) {
if (cgs[i].fd > 0)
close(cgs[i].fd);
}
cleanup_cgroup_environment();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_link.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Google LLC.
*/
#define _GNU_SOURCE
#include <sys/mount.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "connect_ping.skel.h"
/* 2001:db8::1 */
#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } }
static const struct in6_addr bindaddr_v6 = BINDADDR_V6;
static void subtest(int cgroup_fd, struct connect_ping *skel,
int family, int do_bind)
{
struct sockaddr_in sa4 = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
};
struct sockaddr_in6 sa6 = {
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_LOOPBACK_INIT,
};
struct sockaddr *sa;
socklen_t sa_len;
int protocol;
int sock_fd;
switch (family) {
case AF_INET:
sa = (struct sockaddr *)&sa4;
sa_len = sizeof(sa4);
protocol = IPPROTO_ICMP;
break;
case AF_INET6:
sa = (struct sockaddr *)&sa6;
sa_len = sizeof(sa6);
protocol = IPPROTO_ICMPV6;
break;
}
memset(skel->bss, 0, sizeof(*skel->bss));
skel->bss->do_bind = do_bind;
sock_fd = socket(family, SOCK_DGRAM, protocol);
if (!ASSERT_GE(sock_fd, 0, "sock-create"))
return;
if (!ASSERT_OK(connect(sock_fd, sa, sa_len), "connect"))
goto close_sock;
if (!ASSERT_EQ(skel->bss->invocations_v4, family == AF_INET ? 1 : 0,
"invocations_v4"))
goto close_sock;
if (!ASSERT_EQ(skel->bss->invocations_v6, family == AF_INET6 ? 1 : 0,
"invocations_v6"))
goto close_sock;
if (!ASSERT_EQ(skel->bss->has_error, 0, "has_error"))
goto close_sock;
if (!ASSERT_OK(getsockname(sock_fd, sa, &sa_len),
"getsockname"))
goto close_sock;
switch (family) {
case AF_INET:
if (!ASSERT_EQ(sa4.sin_family, family, "sin_family"))
goto close_sock;
if (!ASSERT_EQ(sa4.sin_addr.s_addr,
htonl(do_bind ? 0x01010101 : INADDR_LOOPBACK),
"sin_addr"))
goto close_sock;
break;
case AF_INET6:
if (!ASSERT_EQ(sa6.sin6_family, AF_INET6, "sin6_family"))
goto close_sock;
if (!ASSERT_EQ(memcmp(&sa6.sin6_addr,
do_bind ? &bindaddr_v6 : &in6addr_loopback,
sizeof(sa6.sin6_addr)),
0, "sin6_addr"))
goto close_sock;
break;
}
close_sock:
close(sock_fd);
}
void test_connect_ping(void)
{
struct connect_ping *skel;
int cgroup_fd;
if (!ASSERT_OK(unshare(CLONE_NEWNET | CLONE_NEWNS), "unshare"))
return;
/* overmount sysfs, and making original sysfs private so overmount
* does not propagate to other mntns.
*/
if (!ASSERT_OK(mount("none", "/sys", NULL, MS_PRIVATE, NULL),
"remount-private-sys"))
return;
if (!ASSERT_OK(mount("sysfs", "/sys", "sysfs", 0, NULL),
"mount-sys"))
return;
if (!ASSERT_OK(mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL),
"mount-bpf"))
goto clean_mount;
if (!ASSERT_OK(system("ip link set dev lo up"), "lo-up"))
goto clean_mount;
if (!ASSERT_OK(system("ip addr add 1.1.1.1 dev lo"), "lo-addr-v4"))
goto clean_mount;
if (!ASSERT_OK(system("ip -6 addr add 2001:db8::1 dev lo"), "lo-addr-v6"))
goto clean_mount;
if (write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0"))
goto clean_mount;
cgroup_fd = test__join_cgroup("/connect_ping");
if (!ASSERT_GE(cgroup_fd, 0, "cg-create"))
goto clean_mount;
skel = connect_ping__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel-load"))
goto close_cgroup;
skel->links.connect_v4_prog =
bpf_program__attach_cgroup(skel->progs.connect_v4_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.connect_v4_prog, "cg-attach-v4"))
goto skel_destroy;
skel->links.connect_v6_prog =
bpf_program__attach_cgroup(skel->progs.connect_v6_prog, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.connect_v6_prog, "cg-attach-v6"))
goto skel_destroy;
/* Connect a v4 ping socket to localhost, assert that only v4 is called,
* and called exactly once, and that the socket's bound address is
* original loopback address.
*/
if (test__start_subtest("ipv4"))
subtest(cgroup_fd, skel, AF_INET, 0);
/* Connect a v4 ping socket to localhost, assert that only v4 is called,
* and called exactly once, and that the socket's bound address is
* address we explicitly bound.
*/
if (test__start_subtest("ipv4-bind"))
subtest(cgroup_fd, skel, AF_INET, 1);
/* Connect a v6 ping socket to localhost, assert that only v6 is called,
* and called exactly once, and that the socket's bound address is
* original loopback address.
*/
if (test__start_subtest("ipv6"))
subtest(cgroup_fd, skel, AF_INET6, 0);
/* Connect a v6 ping socket to localhost, assert that only v6 is called,
* and called exactly once, and that the socket's bound address is
* address we explicitly bound.
*/
if (test__start_subtest("ipv6-bind"))
subtest(cgroup_fd, skel, AF_INET6, 1);
skel_destroy:
connect_ping__destroy(skel);
close_cgroup:
close(cgroup_fd);
clean_mount:
umount2("/sys", MNT_DETACH);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/connect_ping.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
#include "test_log_buf.skel.h"
static size_t libbpf_log_pos;
static char libbpf_log_buf[1024 * 1024];
static bool libbpf_log_error;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
{
int emitted_cnt;
size_t left_cnt;
left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
libbpf_log_error = true;
return 0;
}
libbpf_log_pos += emitted_cnt;
return 0;
}
static void obj_load_log_buf(void)
{
libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
LIBBPF_OPTS(bpf_object_open_opts, opts);
const size_t log_buf_sz = 1024 * 1024;
struct test_log_buf* skel;
char *obj_log_buf, *good_log_buf, *bad_log_buf;
int err;
obj_log_buf = malloc(3 * log_buf_sz);
if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
return;
good_log_buf = obj_log_buf + log_buf_sz;
bad_log_buf = obj_log_buf + 2 * log_buf_sz;
obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
opts.kernel_log_buf = obj_log_buf;
opts.kernel_log_size = log_buf_sz;
opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
/* In the first round every prog has its own log_buf, so libbpf logs
* don't have program failure logs
*/
skel = test_log_buf__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
/* set very verbose level for good_prog so we always get detailed logs */
bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
bpf_program__set_log_level(skel->progs.good_prog, 2);
bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
/* log_level 0 with custom log_buf means that verbose logs are not
* requested if program load is successful, but libbpf should retry
* with log_level 1 on error and put program's verbose load log into
* custom log_buf
*/
bpf_program__set_log_level(skel->progs.bad_prog, 0);
err = test_log_buf__load(skel);
if (!ASSERT_ERR(err, "unexpected_load_success"))
goto cleanup;
ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
/* there should be no prog loading log because we specified per-prog log buf */
ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
"libbpf_log_not_empty");
ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
"good_log_verbose");
ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
"bad_log_not_empty");
if (env.verbosity > VERBOSE_NONE) {
printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
}
/* reset everything */
test_log_buf__destroy(skel);
obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
libbpf_log_buf[0] = '\0';
libbpf_log_pos = 0;
libbpf_log_error = false;
/* In the second round we let bad_prog's failure be logged through print callback */
opts.kernel_log_buf = NULL; /* let everything through into print callback */
opts.kernel_log_size = 0;
opts.kernel_log_level = 1;
skel = test_log_buf__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
/* set normal verbose level for good_prog to check log_level is taken into account */
bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
bpf_program__set_log_level(skel->progs.good_prog, 1);
err = test_log_buf__load(skel);
if (!ASSERT_ERR(err, "unexpected_load_success"))
goto cleanup;
ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
/* this time prog loading error should be logged through print callback */
ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
"libbpf_log_correct");
ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
"good_log_ok");
ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
if (env.verbosity > VERBOSE_NONE) {
printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
}
cleanup:
free(obj_log_buf);
test_log_buf__destroy(skel);
libbpf_set_print(old_print_cb);
}
static void bpf_prog_load_log_buf(void)
{
const struct bpf_insn good_prog_insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn);
const struct bpf_insn bad_prog_insns[] = {
BPF_EXIT_INSN(),
};
size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn);
LIBBPF_OPTS(bpf_prog_load_opts, opts);
const size_t log_buf_sz = 1024 * 1024;
char *log_buf;
int fd = -1;
log_buf = malloc(log_buf_sz);
if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
return;
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
/* with log_level == 0 log_buf shoud stay empty for good prog */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
ASSERT_STREQ(log_buf, "", "good_log_0");
ASSERT_GE(fd, 0, "good_fd1");
if (fd >= 0)
close(fd);
fd = -1;
/* log_level == 2 should always fill log_buf, even for good prog */
log_buf[0] = '\0';
opts.log_level = 2;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);
fd = -1;
/* log_level == 0 should fill log_buf for bad prog */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
bad_prog_insns, bad_prog_insn_cnt, &opts);
ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
ASSERT_LT(fd, 0, "bad_fd");
if (fd >= 0)
close(fd);
fd = -1;
free(log_buf);
}
static void bpf_btf_load_log_buf(void)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
const size_t log_buf_sz = 1024 * 1024;
const void *raw_btf_data;
__u32 raw_btf_size;
struct btf *btf;
char *log_buf = NULL;
int fd = -1;
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "empty_btf"))
return;
ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
raw_btf_data = btf__raw_data(btf, &raw_btf_size);
if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
goto cleanup;
log_buf = malloc(log_buf_sz);
if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
goto cleanup;
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
/* with log_level == 0 log_buf shoud stay empty for good BTF */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
ASSERT_STREQ(log_buf, "", "good_log_0");
ASSERT_GE(fd, 0, "good_fd1");
if (fd >= 0)
close(fd);
fd = -1;
/* log_level == 2 should always fill log_buf, even for good BTF */
log_buf[0] = '\0';
opts.log_level = 2;
fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
printf("LOG_BUF: %s\n", log_buf);
ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);
fd = -1;
/* make BTF bad, add pointer pointing to non-existing type */
ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
raw_btf_data = btf__raw_data(btf, &raw_btf_size);
if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
goto cleanup;
/* log_level == 0 should fill log_buf for bad BTF */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
printf("LOG_BUF: %s\n", log_buf);
ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
ASSERT_LT(fd, 0, "bad_fd");
if (fd >= 0)
close(fd);
fd = -1;
cleanup:
free(log_buf);
btf__free(btf);
}
void test_log_buf(void)
{
if (test__start_subtest("obj_load_log_buf"))
obj_load_log_buf();
if (test__start_subtest("bpf_prog_load_log_buf"))
bpf_prog_load_log_buf();
if (test__start_subtest("bpf_btf_load_log_buf"))
bpf_btf_load_log_buf();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/log_buf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "nested_trust_failure.skel.h"
#include "nested_trust_success.skel.h"
void test_nested_trust(void)
{
RUN_TESTS(nested_trust_success);
RUN_TESTS(nested_trust_failure);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/nested_trust.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <cgroup_helpers.h>
#include <test_progs.h>
#include "cgrp_kfunc_failure.skel.h"
#include "cgrp_kfunc_success.skel.h"
static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
{
struct cgrp_kfunc_success *skel;
int err;
skel = cgrp_kfunc_success__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
skel->bss->pid = getpid();
err = cgrp_kfunc_success__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
return skel;
cleanup:
cgrp_kfunc_success__destroy(skel);
return NULL;
}
static int mkdir_rm_test_dir(void)
{
int fd;
const char *cgrp_path = "cgrp_kfunc";
fd = create_and_get_cgroup(cgrp_path);
if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd"))
return -1;
close(fd);
remove_cgroup(cgrp_path);
return 0;
}
static void run_success_test(const char *prog_name)
{
struct cgrp_kfunc_success *skel;
struct bpf_program *prog;
struct bpf_link *link = NULL;
skel = open_load_cgrp_kfunc_skel();
if (!ASSERT_OK_PTR(skel, "open_load_skel"))
return;
if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "attached_link"))
goto cleanup;
ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count");
if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir"))
goto cleanup;
ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count");
ASSERT_OK(skel->bss->err, "post_rmdir_err");
cleanup:
bpf_link__destroy(link);
cgrp_kfunc_success__destroy(skel);
}
static const char * const success_tests[] = {
"test_cgrp_acquire_release_argument",
"test_cgrp_acquire_leave_in_map",
"test_cgrp_xchg_release",
"test_cgrp_get_release",
"test_cgrp_get_ancestors",
"test_cgrp_from_id",
};
void test_cgrp_kfunc(void)
{
int i, err;
err = setup_cgroup_environment();
if (!ASSERT_OK(err, "cgrp_env_setup"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i]))
continue;
run_success_test(success_tests[i]);
}
RUN_TESTS(cgrp_kfunc_failure);
cleanup:
cleanup_cgroup_environment();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
__u64 num;
map_fd = bpf_find_map(__func__, obj, "result_number");
if (CHECK_FAIL(map_fd < 0))
return;
struct {
char *name;
uint32_t key;
__u64 num;
} tests[] = {
{ "relocate .bss reference", 0, 0 },
{ "relocate .data reference", 1, 42 },
{ "relocate .rodata reference", 2, 24 },
{ "relocate .bss reference", 3, 0 },
{ "relocate .data reference", 4, 0xffeeff },
{ "relocate .rodata reference", 5, 0xabab },
{ "relocate .bss reference", 6, 1234 },
{ "relocate .bss reference", 7, 0 },
{ "relocate .rodata reference", 8, 0xab },
{ "relocate .rodata reference", 9, 0x1111111111111111 },
{ "relocate .rodata reference", 10, ~0 },
};
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %llx expected %llx\n",
err, num, tests[i].num);
}
}
static void test_global_data_string(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
char str[32];
map_fd = bpf_find_map(__func__, obj, "result_string");
if (CHECK_FAIL(map_fd < 0))
return;
struct {
char *name;
uint32_t key;
char str[32];
} tests[] = {
{ "relocate .rodata reference", 0, "abcdefghijklmnopqrstuvwxyz" },
{ "relocate .data reference", 1, "abcdefghijklmnopqrstuvwxyz" },
{ "relocate .bss reference", 2, "" },
{ "relocate .data reference", 3, "abcdexghijklmnopqrstuvwxyz" },
{ "relocate .bss reference", 4, "\0\0hello" },
};
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
err, str, tests[i].str);
}
}
struct foo {
__u8 a;
__u32 b;
__u64 c;
};
static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
struct foo val;
map_fd = bpf_find_map(__func__, obj, "result_struct");
if (CHECK_FAIL(map_fd < 0))
return;
struct {
char *name;
uint32_t key;
struct foo val;
} tests[] = {
{ "relocate .rodata reference", 0, { 42, 0xfefeefef, 0x1111111111111111ULL, } },
{ "relocate .bss reference", 1, { } },
{ "relocate .rodata reference", 2, { } },
{ "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
};
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",
err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c);
}
}
static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
{
int err = -ENOMEM, map_fd, zero = 0;
struct bpf_map *map, *map2;
__u8 *buff;
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
if (!ASSERT_OK_PTR(map, "map"))
return;
if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal"))
return;
/* ensure we can lookup internal maps by their ELF names */
map2 = bpf_object__find_map_by_name(obj, ".rodata");
if (!ASSERT_EQ(map, map2, "same_maps"))
return;
map_fd = bpf_map__fd(map);
if (CHECK_FAIL(map_fd < 0))
return;
buff = malloc(bpf_map__value_size(map));
if (buff)
err = bpf_map_update_elem(map_fd, &zero, buff, 0);
free(buff);
CHECK(!err || errno != EPERM, "test .rodata read-only map",
"err %d errno %d\n", err, errno);
}
void test_global_data(void)
{
const char *file = "./test_global_data.bpf.o";
struct bpf_object *obj;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (!ASSERT_OK(err, "load program"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "pass global data run err");
ASSERT_OK(topts.retval, "pass global data run retval");
test_global_data_number(obj, topts.duration);
test_global_data_string(obj, topts.duration);
test_global_data_struct(obj, topts.duration);
test_global_data_rdonly(obj, topts.duration);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/global_data.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <test_progs.h>
#include "perf_event_stackmap.skel.h"
#ifndef noinline
#define noinline __attribute__((noinline))
#endif
noinline int func_1(void)
{
static int val = 1;
val += 1;
usleep(100);
return val;
}
noinline int func_2(void)
{
return func_1();
}
noinline int func_3(void)
{
return func_2();
}
noinline int func_4(void)
{
return func_3();
}
noinline int func_5(void)
{
return func_4();
}
noinline int func_6(void)
{
int i, val = 1;
for (i = 0; i < 100; i++)
val += func_5();
return val;
}
void test_perf_event_stackmap(void)
{
struct perf_event_attr attr = {
/* .type = PERF_TYPE_SOFTWARE, */
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.precise_ip = 2,
.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
PERF_SAMPLE_CALLCHAIN,
.branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
.freq = 1,
.sample_freq = read_perf_max_sample_freq(),
.size = sizeof(struct perf_event_attr),
};
struct perf_event_stackmap *skel;
__u32 duration = 0;
cpu_set_t cpu_set;
int pmu_fd, err;
skel = perf_event_stackmap__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = perf_event_stackmap__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (pmu_fd < 0) {
printf("%s:SKIP:cpu doesn't support the event\n", __func__);
test__skip();
goto cleanup;
}
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
/* create kernel and user stack traces for testing */
func_6();
CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
cleanup:
perf_event_stackmap__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c |
// SPDX-License-Identifier: GPL-2.0
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
#include <test_progs.h>
#include "test_xdp_with_cpumap_frags_helpers.skel.h"
#include "test_xdp_with_cpumap_helpers.skel.h"
#define IFINDEX_LO 1
static void test_xdp_with_cpumap_helpers(void)
{
struct test_xdp_with_cpumap_helpers *skel;
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
struct bpf_cpumap_val val = {
.qsize = 192,
};
int err, prog_fd, map_fd;
__u32 idx = 0;
skel = test_xdp_with_cpumap_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
return;
prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP"))
goto out_close;
err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
ASSERT_OK(err, "XDP program detach");
prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
map_fd = bpf_map__fd(skel->maps.cpu_map);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = prog_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_OK(err, "Add program to cpumap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
ASSERT_OK(err, "Read cpumap entry");
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");
/* can not attach BPF_XDP_CPUMAP program to a device */
err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program"))
bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
val.qsize = 192;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry");
/* Try to attach BPF_XDP program with frags to cpumap when we have
* already loaded a BPF_XDP program on the map
*/
idx = 1;
val.qsize = 192;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry");
out_close:
test_xdp_with_cpumap_helpers__destroy(skel);
}
static void test_xdp_with_cpumap_frags_helpers(void)
{
struct test_xdp_with_cpumap_frags_helpers *skel;
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
struct bpf_cpumap_val val = {
.qsize = 192,
};
int err, frags_prog_fd, map_fd;
__u32 idx = 0;
skel = test_xdp_with_cpumap_frags_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
return;
frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags);
map_fd = bpf_map__fd(skel->maps.cpu_map);
err = bpf_prog_get_info_by_fd(frags_prog_fd, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = frags_prog_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_OK(err, "Add program to cpumap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
ASSERT_OK(err, "Read cpumap entry");
ASSERT_EQ(info.id, val.bpf_prog.id,
"Match program id to cpumap entry prog_id");
/* Try to attach BPF_XDP program to cpumap when we have
* already loaded a BPF_XDP program with frags on the map
*/
idx = 1;
val.qsize = 192;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry");
out_close:
test_xdp_with_cpumap_frags_helpers__destroy(skel);
}
void serial_test_xdp_cpumap_attach(void)
{
if (test__start_subtest("CPUMAP with programs in entries"))
test_xdp_with_cpumap_helpers();
if (test__start_subtest("CPUMAP with frags programs in entries"))
test_xdp_with_cpumap_frags_helpers();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include <test_progs.h>
#include "test_autoattach.skel.h"
void test_autoattach(void)
{
struct test_autoattach *skel;
skel = test_autoattach__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto cleanup;
/* disable auto-attach for prog2 */
bpf_program__set_autoattach(skel->progs.prog2, false);
ASSERT_TRUE(bpf_program__autoattach(skel->progs.prog1), "autoattach_prog1");
ASSERT_FALSE(bpf_program__autoattach(skel->progs.prog2), "autoattach_prog2");
if (!ASSERT_OK(test_autoattach__attach(skel), "skel_attach"))
goto cleanup;
usleep(1);
ASSERT_TRUE(skel->bss->prog1_called, "attached_prog1");
ASSERT_FALSE(skel->bss->prog2_called, "attached_prog2");
cleanup:
test_autoattach__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/autoattach.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_xdp_context_test_run.skel.h"
void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts,
__u32 data_meta, __u32 data, __u32 data_end,
__u32 ingress_ifindex, __u32 rx_queue_index,
__u32 egress_ifindex)
{
struct xdp_md ctx = {
.data = data,
.data_end = data_end,
.data_meta = data_meta,
.ingress_ifindex = ingress_ifindex,
.rx_queue_index = rx_queue_index,
.egress_ifindex = egress_ifindex,
};
int err;
opts.ctx_in = &ctx;
opts.ctx_size_in = sizeof(ctx);
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_EQ(errno, EINVAL, "errno-EINVAL");
ASSERT_ERR(err, "bpf_prog_test_run");
}
void test_xdp_context_test_run(void)
{
struct test_xdp_context_test_run *skel = NULL;
char data[sizeof(pkt_v4) + sizeof(__u32)];
char bad_ctx[sizeof(struct xdp_md) + 1];
struct xdp_md ctx_in, ctx_out;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
.data_size_in = sizeof(data),
.ctx_out = &ctx_out,
.ctx_size_out = sizeof(ctx_out),
.repeat = 1,
);
int err, prog_fd;
skel = test_xdp_context_test_run__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog_fd = bpf_program__fd(skel->progs.xdp_context);
/* Data past the end of the kernel's struct xdp_md must be 0 */
bad_ctx[sizeof(bad_ctx) - 1] = 1;
opts.ctx_in = bad_ctx;
opts.ctx_size_in = sizeof(bad_ctx);
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_EQ(errno, E2BIG, "extradata-errno");
ASSERT_ERR(err, "bpf_prog_test_run(extradata)");
*(__u32 *)data = XDP_PASS;
*(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4;
opts.ctx_in = &ctx_in;
opts.ctx_size_in = sizeof(ctx_in);
memset(&ctx_in, 0, sizeof(ctx_in));
ctx_in.data_meta = 0;
ctx_in.data = sizeof(__u32);
ctx_in.data_end = ctx_in.data + sizeof(pkt_v4);
err = bpf_prog_test_run_opts(prog_fd, &opts);
ASSERT_OK(err, "bpf_prog_test_run(valid)");
ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval");
ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize");
ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize");
ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta");
ASSERT_EQ(ctx_out.data, 0, "valid-data");
ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend");
/* Meta data's size must be a multiple of 4 */
test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0);
/* data_meta must reference the start of data */
test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
0, 0, 0);
/* Meta data must be 32 bytes or smaller */
test_xdp_context_error(prog_fd, opts, 0, 36, sizeof(data), 0, 0, 0);
/* Total size of data must match data_end - data_meta */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
sizeof(data) - 1, 0, 0, 0);
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
sizeof(data) + 1, 0, 0, 0);
/* RX queue cannot be specified without specifying an ingress */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
0, 1, 0);
/* Interface 1 is always the loopback interface which always has only
* one RX queue (index 0). This makes index 1 an invalid rx queue index
* for interface 1.
*/
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
1, 1, 0);
/* The egress cannot be specified */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
0, 0, 1);
test_xdp_context_test_run__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
#include <string.h>
#include <linux/bpf.h>
#include <test_progs.h>
#include "test_ptr_untrusted.skel.h"
#define TP_NAME "sched_switch"
void serial_test_ptr_untrusted(void)
{
struct test_ptr_untrusted *skel;
int err;
skel = test_ptr_untrusted__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
/* First, attach lsm prog */
skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run);
if (!ASSERT_OK_PTR(skel->links.lsm_run, "lsm_attach"))
goto cleanup;
/* Second, attach raw_tp prog. The lsm prog will be triggered. */
skel->links.raw_tp_run = bpf_program__attach_raw_tracepoint(skel->progs.raw_tp_run,
TP_NAME);
if (!ASSERT_OK_PTR(skel->links.raw_tp_run, "raw_tp_attach"))
goto cleanup;
err = strncmp(skel->bss->tp_name, TP_NAME, strlen(TP_NAME));
ASSERT_EQ(err, 0, "cmp_tp_name");
cleanup:
test_ptr_untrusted__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Hengqi Chen */
#include <test_progs.h>
#include <sys/un.h>
#include "test_skc_to_unix_sock.skel.h"
static const char *sock_path = "@skc_to_unix_sock";
void test_skc_to_unix_sock(void)
{
struct test_skc_to_unix_sock *skel;
struct sockaddr_un sockaddr;
int err, sockfd = 0;
skel = test_skc_to_unix_sock__open();
if (!ASSERT_OK_PTR(skel, "could not open BPF object"))
return;
skel->rodata->my_pid = getpid();
err = test_skc_to_unix_sock__load(skel);
if (!ASSERT_OK(err, "could not load BPF object"))
goto cleanup;
err = test_skc_to_unix_sock__attach(skel);
if (!ASSERT_OK(err, "could not attach BPF object"))
goto cleanup;
/* trigger unix_listen */
sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
if (!ASSERT_GT(sockfd, 0, "socket failed"))
goto cleanup;
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_UNIX;
strncpy(sockaddr.sun_path, sock_path, strlen(sock_path));
sockaddr.sun_path[0] = '\0';
err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
if (!ASSERT_OK(err, "bind failed"))
goto cleanup;
err = listen(sockfd, 1);
if (!ASSERT_OK(err, "listen failed"))
goto cleanup;
ASSERT_EQ(strcmp(skel->bss->path, sock_path), 0, "bpf_skc_to_unix_sock failed");
cleanup:
if (sockfd)
close(sockfd);
test_skc_to_unix_sock__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "skb_load_bytes.skel.h"
void test_skb_load_bytes(void)
{
struct skb_load_bytes *skel;
int err, prog_fd, test_result;
struct __sk_buff skb = { 0 };
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
);
skel = skb_load_bytes__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
prog_fd = bpf_program__fd(skel->progs.skb_process);
if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
goto out;
skel->bss->load_offset = (uint32_t)(-1);
err = bpf_prog_test_run_opts(prog_fd, &tattr);
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
goto out;
test_result = skel->bss->test_result;
if (!ASSERT_EQ(test_result, -EFAULT, "offset -1"))
goto out;
skel->bss->load_offset = (uint32_t)10;
err = bpf_prog_test_run_opts(prog_fd, &tattr);
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
goto out;
test_result = skel->bss->test_result;
if (!ASSERT_EQ(test_result, 0, "offset 10"))
goto out;
out:
skb_load_bytes__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <test_progs.h>
#include "cap_helpers.h"
#include "verifier_and.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bounds.skel.h"
#include "verifier_bounds_deduction.skel.h"
#include "verifier_bounds_deduction_non_const.skel.h"
#include "verifier_bounds_mix_sign_unsign.skel.h"
#include "verifier_bpf_get_stack.skel.h"
#include "verifier_bswap.skel.h"
#include "verifier_btf_ctx_access.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
#include "verifier_cgroup_skb.skel.h"
#include "verifier_cgroup_storage.skel.h"
#include "verifier_const_or.skel.h"
#include "verifier_ctx.skel.h"
#include "verifier_ctx_sk_msg.skel.h"
#include "verifier_d_path.skel.h"
#include "verifier_direct_packet_access.skel.h"
#include "verifier_direct_stack_access_wraparound.skel.h"
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
#include "verifier_gotol.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
#include "verifier_helper_restricted.skel.h"
#include "verifier_helper_value_access.skel.h"
#include "verifier_int_ptr.skel.h"
#include "verifier_jeq_infer_not_null.skel.h"
#include "verifier_ld_ind.skel.h"
#include "verifier_ldsx.skel.h"
#include "verifier_leak_ptr.skel.h"
#include "verifier_loops1.skel.h"
#include "verifier_lwt.skel.h"
#include "verifier_map_in_map.skel.h"
#include "verifier_map_ptr.skel.h"
#include "verifier_map_ptr_mixing.skel.h"
#include "verifier_map_ret_val.skel.h"
#include "verifier_masking.skel.h"
#include "verifier_meta_access.skel.h"
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
#include "verifier_raw_tp_writable.skel.h"
#include "verifier_reg_equal.skel.h"
#include "verifier_ref_tracking.skel.h"
#include "verifier_regalloc.skel.h"
#include "verifier_ringbuf.skel.h"
#include "verifier_runtime_jit.skel.h"
#include "verifier_scalar_ids.skel.h"
#include "verifier_sdiv.skel.h"
#include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h"
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
#include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_typedef.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_unpriv.skel.h"
#include "verifier_unpriv_perf.skel.h"
#include "verifier_value_adj_spill.skel.h"
#include "verifier_value.skel.h"
#include "verifier_value_illegal_alu.skel.h"
#include "verifier_value_or_null.skel.h"
#include "verifier_value_ptr_arith.skel.h"
#include "verifier_var_off.skel.h"
#include "verifier_xadd.skel.h"
#include "verifier_xdp.skel.h"
#include "verifier_xdp_direct_packet_access.skel.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
__maybe_unused
static void run_tests_aux(const char *skel_name,
skel_elf_bytes_fn elf_bytes_factory,
pre_execution_cb pre_execution_cb)
{
struct test_loader tester = {};
__u64 old_caps;
int err;
/* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */
err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps);
if (err) {
PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
return;
}
test_loader__set_pre_execution_cb(&tester, pre_execution_cb);
test_loader__run_subtests(&tester, skel_name, elf_bytes_factory);
test_loader_fini(&tester);
err = cap_enable_effective(old_caps, NULL);
if (err)
PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
}
#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)
void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bounds(void) { RUN(verifier_bounds); }
void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); }
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); }
void test_verifier_bswap(void) { RUN(verifier_bswap); }
void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
void test_verifier_const_or(void) { RUN(verifier_const_or); }
void test_verifier_ctx(void) { RUN(verifier_ctx); }
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
void test_verifier_d_path(void) { RUN(verifier_d_path); }
void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_access); }
void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
void test_verifier_gotol(void) { RUN(verifier_gotol); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
void test_verifier_loops1(void) { RUN(verifier_loops1); }
void test_verifier_lwt(void) { RUN(verifier_lwt); }
void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); }
void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
void test_verifier_masking(void) { RUN(verifier_masking); }
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); }
void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); }
void test_verifier_regalloc(void) { RUN(verifier_regalloc); }
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); }
void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); }
void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_typedef(void) { RUN(verifier_typedef); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); }
void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); }
void test_verifier_value(void) { RUN(verifier_value); }
void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); }
void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
void test_verifier_var_off(void) { RUN(verifier_var_off); }
void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
struct test_val value = {
.index = (6 + 1) * sizeof(int),
.foo[6] = 0xabcdef12,
};
struct bpf_map *map;
int err, key = 0;
map = bpf_object__find_map_by_name(obj, map_name);
if (!map) {
PRINT_FAIL("Can't find map '%s'\n", map_name);
return -EINVAL;
}
err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0);
if (err) {
PRINT_FAIL("Error while updating map '%s': %d\n", map_name, err);
return err;
}
return 0;
}
static int init_array_access_maps(struct bpf_object *obj)
{
return init_test_val_map(obj, "map_array_ro");
}
void test_verifier_array_access(void)
{
run_tests_aux("verifier_array_access",
verifier_array_access__elf_bytes,
init_array_access_maps);
}
static int init_value_ptr_arith_maps(struct bpf_object *obj)
{
return init_test_val_map(obj, "map_array_48b");
}
void test_verifier_value_ptr_arith(void)
{
run_tests_aux("verifier_value_ptr_arith",
verifier_value_ptr_arith__elf_bytes,
init_value_ptr_arith_maps);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/verifier.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_endian.skel.h"
static int duration;
#define IN16 0x1234
#define IN32 0x12345678U
#define IN64 0x123456789abcdef0ULL
#define OUT16 0x3412
#define OUT32 0x78563412U
#define OUT64 0xf0debc9a78563412ULL
void test_endian(void)
{
struct test_endian* skel;
struct test_endian__bss *bss;
int err;
skel = test_endian__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
bss->in16 = IN16;
bss->in32 = IN32;
bss->in64 = IN64;
err = test_endian__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
usleep(1);
CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out16, (__u64)OUT16);
CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out32, (__u64)OUT32);
CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out64, (__u64)OUT64);
CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const16, (__u64)OUT16);
CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const32, (__u64)OUT32);
CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const64, (__u64)OUT64);
cleanup:
test_endian__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/endian.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "iters.skel.h"
#include "iters_state_safety.skel.h"
#include "iters_looping.skel.h"
#include "iters_num.skel.h"
#include "iters_testmod_seq.skel.h"
static void subtest_num_iters(void)
{
struct iters_num *skel;
int err;
skel = iters_num__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = iters_num__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
usleep(1);
iters_num__detach(skel);
#define VALIDATE_CASE(case_name) \
ASSERT_EQ(skel->bss->res_##case_name, \
skel->rodata->exp_##case_name, \
#case_name)
VALIDATE_CASE(empty_zero);
VALIDATE_CASE(empty_int_min);
VALIDATE_CASE(empty_int_max);
VALIDATE_CASE(empty_minus_one);
VALIDATE_CASE(simple_sum);
VALIDATE_CASE(neg_sum);
VALIDATE_CASE(very_neg_sum);
VALIDATE_CASE(neg_pos_sum);
VALIDATE_CASE(invalid_range);
VALIDATE_CASE(max_range);
VALIDATE_CASE(e2big_range);
VALIDATE_CASE(succ_elem_cnt);
VALIDATE_CASE(overfetched_elem_cnt);
VALIDATE_CASE(fail_elem_cnt);
#undef VALIDATE_CASE
cleanup:
iters_num__destroy(skel);
}
static void subtest_testmod_seq_iters(void)
{
struct iters_testmod_seq *skel;
int err;
if (!env.has_testmod) {
test__skip();
return;
}
skel = iters_testmod_seq__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = iters_testmod_seq__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
usleep(1);
iters_testmod_seq__detach(skel);
#define VALIDATE_CASE(case_name) \
ASSERT_EQ(skel->bss->res_##case_name, \
skel->rodata->exp_##case_name, \
#case_name)
VALIDATE_CASE(empty);
VALIDATE_CASE(full);
VALIDATE_CASE(truncated);
#undef VALIDATE_CASE
cleanup:
iters_testmod_seq__destroy(skel);
}
void test_iters(void)
{
RUN_TESTS(iters_state_safety);
RUN_TESTS(iters_looping);
RUN_TESTS(iters);
if (env.has_testmod)
RUN_TESTS(iters_testmod_seq);
if (test__start_subtest("num"))
subtest_num_iters();
if (test__start_subtest("testmod_seq"))
subtest_testmod_seq_iters();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/iters.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_pkt_access.skel.h"
static const __u32 duration;
static void check_run_cnt(int prog_fd, __u64 run_cnt)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int err;
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
return;
CHECK(run_cnt != info.run_cnt, "run_cnt",
"incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt);
}
void test_prog_run_opts(void)
{
struct test_pkt_access *skel;
int err, stats_fd = -1, prog_fd;
char buf[10] = {};
__u64 run_cnt = 0;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.repeat = 1,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = 5,
);
stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd"))
return;
skel = test_pkt_access__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_EQ(errno, ENOSPC, "test_run errno");
ASSERT_ERR(err, "test_run");
ASSERT_OK(topts.retval, "test_run retval");
ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out");
ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint");
run_cnt += topts.repeat;
check_run_cnt(prog_fd, run_cnt);
topts.data_out = NULL;
topts.data_size_out = 0;
topts.repeat = 2;
errno = 0;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(errno, "run_no_output errno");
ASSERT_OK(err, "run_no_output err");
ASSERT_OK(topts.retval, "run_no_output retval");
run_cnt += topts.repeat;
check_run_cnt(prog_fd, run_cnt);
cleanup:
if (skel)
test_pkt_access__destroy(skel);
if (stats_fd >= 0)
close(stats_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/prog_run_opts.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <test_progs.h>
#include "network_helpers.h"
#include "cgroup_skb_sk_lookup_kern.skel.h"
static void run_lookup_test(__u16 *g_serv_port, int out_sk)
{
int serv_sk = -1, in_sk = -1, serv_in_sk = -1, err;
struct sockaddr_in6 addr = {};
socklen_t addr_len = sizeof(addr);
__u32 duration = 0;
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
return;
err = getsockname(serv_sk, (struct sockaddr *)&addr, &addr_len);
if (CHECK(err, "getsockname", "errno %d\n", errno))
goto cleanup;
*g_serv_port = addr.sin6_port;
/* Client outside of test cgroup should fail to connect by timeout. */
err = connect_fd_to_fd(out_sk, serv_sk, 1000);
if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd",
"unexpected result err %d errno %d\n", err, errno))
goto cleanup;
/* Client inside test cgroup should connect just fine. */
in_sk = connect_to_fd(serv_sk, 0);
if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno))
goto cleanup;
serv_in_sk = accept(serv_sk, NULL, NULL);
if (CHECK(serv_in_sk < 0, "accept", "errno %d\n", errno))
goto cleanup;
cleanup:
close(serv_in_sk);
close(in_sk);
close(serv_sk);
}
static void run_cgroup_bpf_test(const char *cg_path, int out_sk)
{
struct cgroup_skb_sk_lookup_kern *skel;
struct bpf_link *link;
__u32 duration = 0;
int cgfd = -1;
skel = cgroup_skb_sk_lookup_kern__open_and_load();
if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
return;
cgfd = test__join_cgroup(cg_path);
if (CHECK(cgfd < 0, "cgroup_join", "cgroup setup failed\n"))
goto cleanup;
link = bpf_program__attach_cgroup(skel->progs.ingress_lookup, cgfd);
if (!ASSERT_OK_PTR(link, "cgroup_attach"))
goto cleanup;
run_lookup_test(&skel->bss->g_serv_port, out_sk);
bpf_link__destroy(link);
cleanup:
close(cgfd);
cgroup_skb_sk_lookup_kern__destroy(skel);
}
void test_cgroup_skb_sk_lookup(void)
{
const char *cg_path = "/foo";
int out_sk;
/* Create a socket before joining testing cgroup so that its cgroup id
* differs from that of testing cgroup. Moving selftests process to
* testing cgroup won't change cgroup id of an already created socket.
*/
out_sk = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK_FAIL(out_sk < 0))
return;
run_cgroup_bpf_test(cg_path, out_sk);
close(out_sk);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include <network_helpers.h>
void test_load_bytes_relative(void)
{
int server_fd, cgroup_fd, prog_fd, map_fd, client_fd;
int err;
struct bpf_object *obj;
struct bpf_program *prog;
struct bpf_map *test_result;
__u32 duration = 0;
__u32 map_key = 0;
__u32 map_value = 0;
cgroup_fd = test__join_cgroup("/load_bytes_relative");
if (CHECK_FAIL(cgroup_fd < 0))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
err = bpf_prog_test_load("./load_bytes_relative.bpf.o", BPF_PROG_TYPE_CGROUP_SKB,
&obj, &prog_fd);
if (CHECK_FAIL(err))
goto close_server_fd;
test_result = bpf_object__find_map_by_name(obj, "test_result");
if (CHECK_FAIL(!test_result))
goto close_bpf_object;
map_fd = bpf_map__fd(test_result);
if (map_fd < 0)
goto close_bpf_object;
prog = bpf_object__find_program_by_name(obj, "load_bytes_relative");
if (CHECK_FAIL(!prog))
goto close_bpf_object;
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI);
if (CHECK_FAIL(err))
goto close_bpf_object;
client_fd = connect_to_fd(server_fd, 0);
if (CHECK_FAIL(client_fd < 0))
goto close_bpf_object;
close(client_fd);
err = bpf_map_lookup_elem(map_fd, &map_key, &map_value);
if (CHECK_FAIL(err))
goto close_bpf_object;
CHECK(map_value != 1, "bpf", "bpf program returned failure");
close_bpf_object:
bpf_object__close(obj);
close_server_fd:
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_stacktrace_map(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *prog_name = "oncpu";
int err, prog_fd, stack_trace_len;
const char *file = "./test_stacktrace_map.bpf.o";
__u32 key, val, duration = 0;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return;
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch");
if (!ASSERT_OK_PTR(link, "attach_tp"))
goto close_prog;
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK_FAIL(control_map_fd < 0))
goto disable_pmu;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK_FAIL(stackid_hmap_fd < 0))
goto disable_pmu;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK_FAIL(stackmap_fd < 0))
goto disable_pmu;
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
if (CHECK_FAIL(stack_amap_fd < 0))
goto disable_pmu;
/* give some time for bpf program run */
sleep(1);
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
disable_pmu:
bpf_link__destroy(link);
close_prog:
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stacktrace_map.c |
// SPDX-License-Identifier: GPL-2.0
#include "test_progs.h"
#include "core_kern_overflow.lskel.h"
void test_core_kern_overflow_lskel(void)
{
struct core_kern_overflow_lskel *skel;
skel = core_kern_overflow_lskel__open_and_load();
if (!ASSERT_NULL(skel, "open_and_load"))
core_kern_overflow_lskel__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This test makes sure BPF stats collection using rstat works correctly.
* The test uses 3 BPF progs:
* (a) counter: This BPF prog is invoked every time we attach a process to a
* cgroup and locklessly increments a percpu counter.
* The program then calls cgroup_rstat_updated() to inform rstat
* of an update on the (cpu, cgroup) pair.
*
* (b) flusher: This BPF prog is invoked when an rstat flush is ongoing, it
* aggregates all percpu counters to a total counter, and also
* propagates the changes to the ancestor cgroups.
*
* (c) dumper: This BPF prog is a cgroup_iter. It is used to output the total
* counter of a cgroup through reading a file in userspace.
*
* The test sets up a cgroup hierarchy, and the above programs. It spawns a few
* processes in the leaf cgroups and makes sure all the counters are aggregated
* correctly.
*
* Copyright 2022 Google LLC.
*/
#include <asm-generic/errno.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <unistd.h>
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "cgroup_helpers.h"
#include "cgroup_hierarchical_stats.skel.h"
#define PAGE_SIZE 4096
#define MB(x) (x << 20)
#define PROCESSES_PER_CGROUP 3
#define BPFFS_ROOT "/sys/fs/bpf/"
#define BPFFS_ATTACH_COUNTERS BPFFS_ROOT "attach_counters/"
#define CG_ROOT_NAME "root"
#define CG_ROOT_ID 1
#define CGROUP_PATH(p, n) {.path = p"/"n, .name = n}
static struct {
const char *path, *name;
unsigned long long id;
int fd;
} cgroups[] = {
CGROUP_PATH("/", "test"),
CGROUP_PATH("/test", "child1"),
CGROUP_PATH("/test", "child2"),
CGROUP_PATH("/test/child1", "child1_1"),
CGROUP_PATH("/test/child1", "child1_2"),
CGROUP_PATH("/test/child2", "child2_1"),
CGROUP_PATH("/test/child2", "child2_2"),
};
#define N_CGROUPS ARRAY_SIZE(cgroups)
#define N_NON_LEAF_CGROUPS 3
static int root_cgroup_fd;
static bool mounted_bpffs;
/* reads file at 'path' to 'buf', returns 0 on success. */
static int read_from_file(const char *path, char *buf, size_t size)
{
int fd, len;
fd = open(path, O_RDONLY);
if (fd < 0)
return fd;
len = read(fd, buf, size);
close(fd);
if (len < 0)
return len;
buf[len] = 0;
return 0;
}
/* mounts bpffs and mkdir for reading stats, returns 0 on success. */
static int setup_bpffs(void)
{
int err;
/* Mount bpffs */
err = mount("bpf", BPFFS_ROOT, "bpf", 0, NULL);
mounted_bpffs = !err;
if (ASSERT_FALSE(err && errno != EBUSY, "mount"))
return err;
/* Create a directory to contain stat files in bpffs */
err = mkdir(BPFFS_ATTACH_COUNTERS, 0755);
if (!ASSERT_OK(err, "mkdir"))
return err;
return 0;
}
static void cleanup_bpffs(void)
{
/* Remove created directory in bpffs */
ASSERT_OK(rmdir(BPFFS_ATTACH_COUNTERS), "rmdir "BPFFS_ATTACH_COUNTERS);
/* Unmount bpffs, if it wasn't already mounted when we started */
if (mounted_bpffs)
return;
ASSERT_OK(umount(BPFFS_ROOT), "unmount bpffs");
}
/* sets up cgroups, returns 0 on success. */
static int setup_cgroups(void)
{
int i, fd, err;
err = setup_cgroup_environment();
if (!ASSERT_OK(err, "setup_cgroup_environment"))
return err;
root_cgroup_fd = get_root_cgroup();
if (!ASSERT_GE(root_cgroup_fd, 0, "get_root_cgroup"))
return root_cgroup_fd;
for (i = 0; i < N_CGROUPS; i++) {
fd = create_and_get_cgroup(cgroups[i].path);
if (!ASSERT_GE(fd, 0, "create_and_get_cgroup"))
return fd;
cgroups[i].fd = fd;
cgroups[i].id = get_cgroup_id(cgroups[i].path);
}
return 0;
}
static void cleanup_cgroups(void)
{
close(root_cgroup_fd);
for (int i = 0; i < N_CGROUPS; i++)
close(cgroups[i].fd);
cleanup_cgroup_environment();
}
/* Sets up cgroup hiearchary, returns 0 on success. */
static int setup_hierarchy(void)
{
return setup_bpffs() || setup_cgroups();
}
static void destroy_hierarchy(void)
{
cleanup_cgroups();
cleanup_bpffs();
}
static int attach_processes(void)
{
int i, j, status;
/* In every leaf cgroup, attach 3 processes */
for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) {
for (j = 0; j < PROCESSES_PER_CGROUP; j++) {
pid_t pid;
/* Create child and attach to cgroup */
pid = fork();
if (pid == 0) {
if (join_parent_cgroup(cgroups[i].path))
exit(EACCES);
exit(0);
}
/* Cleanup child */
waitpid(pid, &status, 0);
if (!ASSERT_TRUE(WIFEXITED(status), "child process exited"))
return 1;
if (!ASSERT_EQ(WEXITSTATUS(status), 0,
"child process exit code"))
return 1;
}
}
return 0;
}
static unsigned long long
get_attach_counter(unsigned long long cgroup_id, const char *file_name)
{
unsigned long long attach_counter = 0, id = 0;
static char buf[128], path[128];
/* For every cgroup, read the file generated by cgroup_iter */
snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name);
if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter"))
return 0;
/* Check the output file formatting */
ASSERT_EQ(sscanf(buf, "cg_id: %llu, attach_counter: %llu\n",
&id, &attach_counter), 2, "output format");
/* Check that the cgroup_id is displayed correctly */
ASSERT_EQ(id, cgroup_id, "cgroup_id");
/* Check that the counter is non-zero */
ASSERT_GT(attach_counter, 0, "attach counter non-zero");
return attach_counter;
}
static void check_attach_counters(void)
{
unsigned long long attach_counters[N_CGROUPS], root_attach_counter;
int i;
for (i = 0; i < N_CGROUPS; i++)
attach_counters[i] = get_attach_counter(cgroups[i].id,
cgroups[i].name);
/* Read stats for root too */
root_attach_counter = get_attach_counter(CG_ROOT_ID, CG_ROOT_NAME);
/* Check that all leafs cgroups have an attach counter of 3 */
for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++)
ASSERT_EQ(attach_counters[i], PROCESSES_PER_CGROUP,
"leaf cgroup attach counter");
/* Check that child1 == child1_1 + child1_2 */
ASSERT_EQ(attach_counters[1], attach_counters[3] + attach_counters[4],
"child1_counter");
/* Check that child2 == child2_1 + child2_2 */
ASSERT_EQ(attach_counters[2], attach_counters[5] + attach_counters[6],
"child2_counter");
/* Check that test == child1 + child2 */
ASSERT_EQ(attach_counters[0], attach_counters[1] + attach_counters[2],
"test_counter");
/* Check that root >= test */
ASSERT_GE(root_attach_counter, attach_counters[1], "root_counter");
}
/* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure.
*/
static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj,
int cgroup_fd, const char *file_name)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo = {};
struct bpf_link *link;
static char path[128];
int err;
/*
* Create an iter link, parameterized by cgroup_fd. We only want to
* traverse one cgroup, so set the traversal order to "self".
*/
linfo.cgroup.cgroup_fd = cgroup_fd;
linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(obj->progs.dumper, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return -EFAULT;
/* Pin the link to a bpffs file */
snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name);
err = bpf_link__pin(link, path);
ASSERT_OK(err, "pin cgroup_iter");
/* Remove the link, leaving only the ref held by the pinned file */
bpf_link__destroy(link);
return err;
}
/* Sets up programs for collecting stats, returns 0 on success. */
static int setup_progs(struct cgroup_hierarchical_stats **skel)
{
int i, err;
*skel = cgroup_hierarchical_stats__open_and_load();
if (!ASSERT_OK_PTR(*skel, "open_and_load"))
return 1;
/* Attach cgroup_iter program that will dump the stats to cgroups */
for (i = 0; i < N_CGROUPS; i++) {
err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name);
if (!ASSERT_OK(err, "setup_cgroup_iter"))
return err;
}
/* Also dump stats for root */
err = setup_cgroup_iter(*skel, root_cgroup_fd, CG_ROOT_NAME);
if (!ASSERT_OK(err, "setup_cgroup_iter"))
return err;
bpf_program__set_autoattach((*skel)->progs.dumper, false);
err = cgroup_hierarchical_stats__attach(*skel);
if (!ASSERT_OK(err, "attach"))
return err;
return 0;
}
static void destroy_progs(struct cgroup_hierarchical_stats *skel)
{
static char path[128];
int i;
for (i = 0; i < N_CGROUPS; i++) {
/* Delete files in bpffs that cgroup_iters are pinned in */
snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS,
cgroups[i].name);
ASSERT_OK(remove(path), "remove cgroup_iter pin");
}
/* Delete root file in bpffs */
snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, CG_ROOT_NAME);
ASSERT_OK(remove(path), "remove cgroup_iter root pin");
cgroup_hierarchical_stats__destroy(skel);
}
void test_cgroup_hierarchical_stats(void)
{
struct cgroup_hierarchical_stats *skel = NULL;
if (setup_hierarchy())
goto hierarchy_cleanup;
if (setup_progs(&skel))
goto cleanup;
if (attach_processes())
goto cleanup;
check_attach_counters();
cleanup:
destroy_progs(skel);
hierarchy_cleanup:
destroy_hierarchy();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "trace_vprintk.lskel.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "1,2,3,4,5,6,7,8,9,10"
void serial_test_trace_vprintk(void)
{
struct trace_vprintk_lskel__bss *bss;
int err = 0, iter = 0, found = 0;
struct trace_vprintk_lskel *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_vprintk_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
goto cleanup;
bss = skel->bss;
err = trace_vprintk_lskel__attach(skel);
if (!ASSERT_OK(err, "trace_vprintk__attach"))
goto cleanup;
if (access(TRACEFS_PIPE, F_OK) == 0)
fp = fopen(TRACEFS_PIPE, "r");
else
fp = fopen(DEBUGFS_PIPE, "r");
if (!ASSERT_OK_PTR(fp, "fopen(TRACE_PIPE)"))
goto cleanup;
/* We do not want to wait forever if this test fails... */
fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
/* wait for tracepoint to trigger */
usleep(1);
trace_vprintk_lskel__detach(skel);
if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran"))
goto cleanup;
if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret"))
goto cleanup;
/* verify our search string is in the trace buffer */
while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
if (strstr(buf, SEARCHMSG) != NULL)
found++;
if (found == bss->trace_vprintk_ran)
break;
if (++iter > 1000)
break;
}
if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found"))
goto cleanup;
if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret"))
goto cleanup;
cleanup:
trace_vprintk_lskel__destroy(skel);
free(buf);
if (fp)
fclose(fp);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/trace_vprintk.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_xdp_attach_fail.skel.h"
#define IFINDEX_LO 1
#define XDP_FLAGS_REPLACE (1U << 4)
static void test_xdp_attach(const char *file)
{
__u32 duration = 0, id1, id2, id0 = 0, len;
struct bpf_object *obj1, *obj2, *obj3;
struct bpf_prog_info info = {};
int err, fd1, fd2, fd3;
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
len = sizeof(info);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
if (CHECK_FAIL(err))
return;
err = bpf_prog_get_info_by_fd(fd1, &info, &len);
if (CHECK_FAIL(err))
goto out_1;
id1 = info.id;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
if (CHECK_FAIL(err))
goto out_1;
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(fd2, &info, &len);
if (CHECK_FAIL(err))
goto out_2;
id2 = info.id;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
if (CHECK_FAIL(err))
goto out_2;
err = bpf_xdp_attach(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "load_ok", "initial load failed"))
goto out_close;
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err))
goto out_close;
err = bpf_xdp_attach(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, &opts);
if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
goto out;
opts.old_prog_fd = fd1;
err = bpf_xdp_attach(IFINDEX_LO, fd2, 0, &opts);
if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != id2, "id2_check",
"loaded prog id %u != id2 %u, err %d", id0, id2, err))
goto out_close;
err = bpf_xdp_attach(IFINDEX_LO, fd3, 0, &opts);
if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
goto out;
err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);
if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail"))
goto out;
opts.old_prog_fd = fd2;
err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);
if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
goto out;
err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != 0, "unload_check",
"loaded prog id %u != 0, err %d", id0, err))
goto out_close;
out:
bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
bpf_object__close(obj3);
out_2:
bpf_object__close(obj2);
out_1:
bpf_object__close(obj1);
}
#define ERRMSG_LEN 64
struct xdp_errmsg {
char msg[ERRMSG_LEN];
};
static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size)
{
struct xdp_errmsg *ctx_errmg = ctx, *tp_errmsg = data;
memcpy(&ctx_errmg->msg, &tp_errmsg->msg, ERRMSG_LEN);
}
static const char tgt_errmsg[] = "Invalid XDP flags for BPF link attachment";
static void test_xdp_attach_fail(const char *file)
{
struct test_xdp_attach_fail *skel = NULL;
struct xdp_errmsg errmsg = {};
struct perf_buffer *pb = NULL;
struct bpf_object *obj = NULL;
int err, fd_xdp;
LIBBPF_OPTS(bpf_link_create_opts, opts);
skel = test_xdp_attach_fail__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_attach_fail__open_and_load"))
goto out_close;
err = test_xdp_attach_fail__attach(skel);
if (!ASSERT_EQ(err, 0, "test_xdp_attach_fail__attach"))
goto out_close;
/* set up perf buffer */
pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1,
on_xdp_errmsg, NULL, &errmsg, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buffer__new"))
goto out_close;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &fd_xdp);
if (!ASSERT_EQ(err, 0, "bpf_prog_test_load"))
goto out_close;
opts.flags = 0xFF; // invalid flags to fail to attach XDP prog
err = bpf_link_create(fd_xdp, IFINDEX_LO, BPF_XDP, &opts);
if (!ASSERT_EQ(err, -EINVAL, "bpf_link_create"))
goto out_close;
/* read perf buffer */
err = perf_buffer__poll(pb, 100);
if (!ASSERT_GT(err, -1, "perf_buffer__poll"))
goto out_close;
ASSERT_STRNEQ((const char *) errmsg.msg, tgt_errmsg,
42 /* strlen(tgt_errmsg) */, "check error message");
out_close:
perf_buffer__free(pb);
bpf_object__close(obj);
test_xdp_attach_fail__destroy(skel);
}
void serial_test_xdp_attach(void)
{
if (test__start_subtest("xdp_attach"))
test_xdp_attach("./test_xdp.bpf.o");
if (test__start_subtest("xdp_attach_dynptr"))
test_xdp_attach("./test_xdp_dynptr.bpf.o");
if (test__start_subtest("xdp_attach_failed"))
test_xdp_attach_fail("./xdp_dummy.bpf.o");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_attach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Oracle and/or its affiliates. */
#include <test_progs.h>
/* Test that verifies exception handling is working. fork()
* triggers task_newtask tracepoint; that new task will have a
* NULL pointer task_works, and the associated task->task_works->func
* should not be NULL if task_works itself is non-NULL.
*
* So to verify exception handling we want to see a NULL task_works
* and task_works->func; if we see this we can conclude that the
* exception handler ran when we attempted to dereference task->task_works
* and zeroed the destination register.
*/
#include "exhandler_kern.skel.h"
void test_exhandler(void)
{
int err = 0, duration = 0, status;
struct exhandler_kern *skel;
pid_t cpid;
skel = exhandler_kern__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err))
goto cleanup;
skel->bss->test_pid = getpid();
err = exhandler_kern__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto cleanup;
cpid = fork();
if (!ASSERT_GT(cpid, -1, "fork failed"))
goto cleanup;
if (cpid == 0)
_exit(0);
waitpid(cpid, &status, 0);
ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred");
cleanup:
exhandler_kern__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/exhandler.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "sockopt_inherit.skel.h"
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
static int connect_to_server(int server_fd)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int fd;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
goto out;
}
if (connect(fd, (const struct sockaddr *)&addr, len) < 0) {
log_err("Fail to connect to server");
goto out;
}
return fd;
out:
close(fd);
return -1;
}
static int verify_sockopt(int fd, int optname, const char *msg, char expected)
{
socklen_t optlen = 1;
char buf = 0;
int err;
err = getsockopt(fd, SOL_CUSTOM, optname, &buf, &optlen);
if (err) {
log_err("%s: failed to call getsockopt", msg);
return 1;
}
printf("%s %d: got=0x%x ? expected=0x%x\n", msg, optname, buf, expected);
if (buf != expected) {
log_err("%s: unexpected getsockopt value %d != %d", msg,
buf, expected);
return 1;
}
return 0;
}
static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER;
static void *server_thread(void *arg)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int fd = *(int *)arg;
int client_fd;
int err = 0;
err = listen(fd, 1);
pthread_mutex_lock(&server_started_mtx);
pthread_cond_signal(&server_started);
pthread_mutex_unlock(&server_started_mtx);
if (!ASSERT_GE(err, 0, "listed on socket"))
return NULL;
err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1);
err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1);
err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1);
client_fd = accept(fd, (struct sockaddr *)&addr, &len);
if (!ASSERT_GE(client_fd, 0, "accept client"))
return NULL;
err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1);
err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1);
err += verify_sockopt(client_fd, CUSTOM_LISTENER, "accept", 0);
close(client_fd);
return (void *)(long)err;
}
static int start_server(void)
{
struct sockaddr_in addr = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
};
char buf;
int err;
int fd;
int i;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
}
for (i = CUSTOM_INHERIT1; i <= CUSTOM_LISTENER; i++) {
buf = 0x01;
err = setsockopt(fd, SOL_CUSTOM, i, &buf, 1);
if (err) {
log_err("Failed to call setsockopt(%d)", i);
close(fd);
return -1;
}
}
if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) {
log_err("Failed to bind socket");
close(fd);
return -1;
}
return fd;
}
static void run_test(int cgroup_fd)
{
struct bpf_link *link_getsockopt = NULL;
struct bpf_link *link_setsockopt = NULL;
int server_fd = -1, client_fd;
struct sockopt_inherit *obj;
void *server_err;
pthread_t tid;
int err;
obj = sockopt_inherit__open_and_load();
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
obj->bss->page_size = sysconf(_SC_PAGESIZE);
link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt,
cgroup_fd);
if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt"))
goto close_bpf_object;
link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt,
cgroup_fd);
if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt"))
goto close_bpf_object;
server_fd = start_server();
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto close_bpf_object;
pthread_mutex_lock(&server_started_mtx);
if (!ASSERT_OK(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd), "pthread_create")) {
pthread_mutex_unlock(&server_started_mtx);
goto close_server_fd;
}
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);
client_fd = connect_to_server(server_fd);
if (!ASSERT_GE(client_fd, 0, "connect_to_server"))
goto close_server_fd;
ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0), "verify_sockopt1");
ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0), "verify_sockopt2");
ASSERT_OK(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0), "verify_sockopt ener");
pthread_join(tid, &server_err);
err = (int)(long)server_err;
ASSERT_OK(err, "pthread_join retval");
close(client_fd);
close_server_fd:
close(server_fd);
close_bpf_object:
bpf_link__destroy(link_getsockopt);
bpf_link__destroy(link_setsockopt);
sockopt_inherit__destroy(obj);
}
void test_sockopt_inherit(void)
{
int cgroup_fd;
cgroup_fd = test__join_cgroup("/sockopt_inherit");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
return;
run_test(cgroup_fd);
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "tcp_rtt.skel.h"
struct tcp_rtt_storage {
__u32 invoked;
__u32 dsack_dups;
__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
};
static void send_byte(int fd)
{
char b = 0x55;
ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
}
static int wait_for_ack(int fd, int retries)
{
struct tcp_info info;
socklen_t optlen;
int i, err;
for (i = 0; i < retries; i++) {
optlen = sizeof(info);
err = getsockopt(fd, SOL_TCP, TCP_INFO, &info, &optlen);
if (err < 0) {
log_err("Failed to lookup TCP stats");
return err;
}
if (info.tcpi_unacked == 0)
return 0;
usleep(10);
}
log_err("Did not receive ACK");
return -1;
}
static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked,
__u32 dsack_dups, __u32 delivered, __u32 delivered_ce,
__u32 icsk_retransmits)
{
int err = 0;
struct tcp_rtt_storage val;
if (!ASSERT_GE(bpf_map_lookup_elem(map_fd, &client_fd, &val), 0, "read socket storage"))
return -1;
if (val.invoked != invoked) {
log_err("%s: unexpected bpf_tcp_sock.invoked %d != %d",
msg, val.invoked, invoked);
err++;
}
if (val.dsack_dups != dsack_dups) {
log_err("%s: unexpected bpf_tcp_sock.dsack_dups %d != %d",
msg, val.dsack_dups, dsack_dups);
err++;
}
if (val.delivered != delivered) {
log_err("%s: unexpected bpf_tcp_sock.delivered %d != %d",
msg, val.delivered, delivered);
err++;
}
if (val.delivered_ce != delivered_ce) {
log_err("%s: unexpected bpf_tcp_sock.delivered_ce %d != %d",
msg, val.delivered_ce, delivered_ce);
err++;
}
if (val.icsk_retransmits != icsk_retransmits) {
log_err("%s: unexpected bpf_tcp_sock.icsk_retransmits %d != %d",
msg, val.icsk_retransmits, icsk_retransmits);
err++;
}
return err;
}
static int run_test(int cgroup_fd, int server_fd)
{
struct tcp_rtt *skel;
int client_fd;
int prog_fd;
int map_fd;
int err;
skel = tcp_rtt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_load"))
return -1;
map_fd = bpf_map__fd(skel->maps.socket_storage_map);
prog_fd = bpf_program__fd(skel->progs._sockops);
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
if (err) {
log_err("Failed to attach BPF program");
goto close_bpf_object;
}
client_fd = connect_to_fd(server_fd, 0);
if (client_fd < 0) {
err = -1;
goto close_bpf_object;
}
err += verify_sk(map_fd, client_fd, "syn-ack",
/*invoked=*/1,
/*dsack_dups=*/0,
/*delivered=*/1,
/*delivered_ce=*/0,
/*icsk_retransmits=*/0);
send_byte(client_fd);
if (wait_for_ack(client_fd, 100) < 0) {
err = -1;
goto close_client_fd;
}
err += verify_sk(map_fd, client_fd, "first payload byte",
/*invoked=*/2,
/*dsack_dups=*/0,
/*delivered=*/2,
/*delivered_ce=*/0,
/*icsk_retransmits=*/0);
close_client_fd:
close(client_fd);
close_bpf_object:
tcp_rtt__destroy(skel);
return err;
}
void test_tcp_rtt(void)
{
int server_fd, cgroup_fd;
cgroup_fd = test__join_cgroup("/tcp_rtt");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /tcp_rtt"))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto close_cgroup_fd;
ASSERT_OK(run_test(cgroup_fd, server_fd), "run_test");
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tcp_rtt.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Topology:
* ---------
* NS0 namespace | NS1 namespace
* |
* +--------------+ | +--------------+
* | veth01 |----------| veth10 |
* | 172.16.1.100 | | | 172.16.1.200 |
* | bpf | | +--------------+
* +--------------+ |
* server(UDP/TCP) |
* +-------------------+ |
* | vrf1 | |
* | +--------------+ | | +--------------+
* | | veth02 |----------| veth20 |
* | | 172.16.2.100 | | | | 172.16.2.200 |
* | | bpf | | | +--------------+
* | +--------------+ | |
* | server(UDP/TCP) | |
* +-------------------+ |
*
* Test flow
* -----------
* The tests verifies that socket lookup via TC is VRF aware:
* 1) Creates two veth pairs between NS0 and NS1:
* a) veth01 <-> veth10 outside the VRF
* b) veth02 <-> veth20 in the VRF
* 2) Attaches to veth01 and veth02 a program that calls:
* a) bpf_skc_lookup_tcp() with TCP and tcp_skc is true
* b) bpf_sk_lookup_tcp() with TCP and tcp_skc is false
* c) bpf_sk_lookup_udp() with UDP
* The program stores the lookup result in bss->lookup_status.
* 3) Creates a socket TCP/UDP server in/outside the VRF.
* 4) The test expects lookup_status to be:
* a) 0 from device in VRF to server outside VRF
* b) 0 from device outside VRF to server in VRF
* c) 1 from device in VRF to server in VRF
* d) 1 from device outside VRF to server outside VRF
*/
#include <net/if.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "vrf_socket_lookup.skel.h"
#define NS0 "vrf_socket_lookup_0"
#define NS1 "vrf_socket_lookup_1"
#define IP4_ADDR_VETH01 "172.16.1.100"
#define IP4_ADDR_VETH10 "172.16.1.200"
#define IP4_ADDR_VETH02 "172.16.2.100"
#define IP4_ADDR_VETH20 "172.16.2.200"
#define NON_VRF_PORT 5000
#define IN_VRF_PORT 5001
#define TIMEOUT_MS 3000
static int make_socket(int sotype, const char *ip, int port,
struct sockaddr_storage *addr)
{
int err, fd;
err = make_sockaddr(AF_INET, ip, port, addr, NULL);
if (!ASSERT_OK(err, "make_address"))
return -1;
fd = socket(AF_INET, sotype, 0);
if (!ASSERT_GE(fd, 0, "socket"))
return -1;
if (!ASSERT_OK(settimeo(fd, TIMEOUT_MS), "settimeo"))
goto fail;
return fd;
fail:
close(fd);
return -1;
}
static int make_server(int sotype, const char *ip, int port, const char *ifname)
{
int err, fd = -1;
fd = start_server(AF_INET, sotype, ip, port, TIMEOUT_MS);
if (!ASSERT_GE(fd, 0, "start_server"))
return -1;
if (ifname) {
err = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
ifname, strlen(ifname) + 1);
if (!ASSERT_OK(err, "setsockopt(SO_BINDTODEVICE)"))
goto fail;
}
return fd;
fail:
close(fd);
return -1;
}
static int attach_progs(char *ifname, int tc_prog_fd, int xdp_prog_fd)
{
LIBBPF_OPTS(bpf_tc_hook, hook, .attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1,
.prog_fd = tc_prog_fd);
int ret, ifindex;
ifindex = if_nametoindex(ifname);
if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex"))
return -1;
hook.ifindex = ifindex;
ret = bpf_tc_hook_create(&hook);
if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
return ret;
ret = bpf_tc_attach(&hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(&hook);
return ret;
}
ret = bpf_xdp_attach(ifindex, xdp_prog_fd, 0, NULL);
if (!ASSERT_OK(ret, "bpf_xdp_attach")) {
bpf_tc_hook_destroy(&hook);
return ret;
}
return 0;
}
static void cleanup(void)
{
SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete "
NS0);
SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete "
NS1);
}
static int setup(struct vrf_socket_lookup *skel)
{
int tc_prog_fd, xdp_prog_fd, ret = 0;
struct nstoken *nstoken = NULL;
SYS(fail, "ip netns add " NS0);
SYS(fail, "ip netns add " NS1);
/* NS0 <-> NS1 [veth01 <-> veth10] */
SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10"
" netns " NS1);
SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01");
SYS(fail, "ip -net " NS0 " link set dev veth01 up");
SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10");
SYS(fail, "ip -net " NS1 " link set dev veth10 up");
/* NS0 <-> NS1 [veth02 <-> veth20] */
SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20"
" netns " NS1);
SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02");
SYS(fail, "ip -net " NS0 " link set dev veth02 up");
SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH20 "/24 dev veth20");
SYS(fail, "ip -net " NS1 " link set dev veth20 up");
/* veth02 -> vrf1 */
SYS(fail, "ip -net " NS0 " link add vrf1 type vrf table 11");
SYS(fail, "ip -net " NS0 " route add vrf vrf1 unreachable default"
" metric 4278198272");
SYS(fail, "ip -net " NS0 " link set vrf1 alias vrf");
SYS(fail, "ip -net " NS0 " link set vrf1 up");
SYS(fail, "ip -net " NS0 " link set veth02 master vrf1");
/* Attach TC and XDP progs to veth devices in NS0 */
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
goto fail;
tc_prog_fd = bpf_program__fd(skel->progs.tc_socket_lookup);
if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__tc_fd"))
goto fail;
xdp_prog_fd = bpf_program__fd(skel->progs.xdp_socket_lookup);
if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__xdp_fd"))
goto fail;
if (attach_progs("veth01", tc_prog_fd, xdp_prog_fd))
goto fail;
if (attach_progs("veth02", tc_prog_fd, xdp_prog_fd))
goto fail;
goto close;
fail:
ret = -1;
close:
if (nstoken)
close_netns(nstoken);
return ret;
}
static int test_lookup(struct vrf_socket_lookup *skel, int sotype,
const char *ip, int port, bool test_xdp, bool tcp_skc,
int lookup_status_exp)
{
static const char msg[] = "Hello Server";
struct sockaddr_storage addr = {};
int fd, ret = 0;
fd = make_socket(sotype, ip, port, &addr);
if (fd < 0)
return -1;
skel->bss->test_xdp = test_xdp;
skel->bss->tcp_skc = tcp_skc;
skel->bss->lookup_status = -1;
if (sotype == SOCK_STREAM)
connect(fd, (void *)&addr, sizeof(struct sockaddr_in));
else
sendto(fd, msg, sizeof(msg), 0, (void *)&addr,
sizeof(struct sockaddr_in));
if (!ASSERT_EQ(skel->bss->lookup_status, lookup_status_exp,
"lookup_status"))
goto fail;
goto close;
fail:
ret = -1;
close:
close(fd);
return ret;
}
static void _test_vrf_socket_lookup(struct vrf_socket_lookup *skel, int sotype,
bool test_xdp, bool tcp_skc)
{
int in_vrf_server = -1, non_vrf_server = -1;
struct nstoken *nstoken = NULL;
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
goto done;
/* Open sockets in and outside VRF */
non_vrf_server = make_server(sotype, "0.0.0.0", NON_VRF_PORT, NULL);
if (!ASSERT_GE(non_vrf_server, 0, "make_server__outside_vrf_fd"))
goto done;
in_vrf_server = make_server(sotype, "0.0.0.0", IN_VRF_PORT, "veth02");
if (!ASSERT_GE(in_vrf_server, 0, "make_server__in_vrf_fd"))
goto done;
/* Perform test from NS1 */
close_netns(nstoken);
nstoken = open_netns(NS1);
if (!ASSERT_OK_PTR(nstoken, "setns " NS1))
goto done;
if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, NON_VRF_PORT,
test_xdp, tcp_skc, 0), "in_to_out"))
goto done;
if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, IN_VRF_PORT,
test_xdp, tcp_skc, 1), "in_to_in"))
goto done;
if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, NON_VRF_PORT,
test_xdp, tcp_skc, 1), "out_to_out"))
goto done;
if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, IN_VRF_PORT,
test_xdp, tcp_skc, 0), "out_to_in"))
goto done;
done:
if (non_vrf_server >= 0)
close(non_vrf_server);
if (in_vrf_server >= 0)
close(in_vrf_server);
if (nstoken)
close_netns(nstoken);
}
void test_vrf_socket_lookup(void)
{
struct vrf_socket_lookup *skel;
cleanup();
skel = vrf_socket_lookup__open_and_load();
if (!ASSERT_OK_PTR(skel, "vrf_socket_lookup__open_and_load"))
return;
if (!ASSERT_OK(setup(skel), "setup"))
goto done;
if (test__start_subtest("tc_socket_lookup_tcp"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
if (test__start_subtest("tc_socket_lookup_tcp_skc"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
if (test__start_subtest("tc_socket_lookup_udp"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
if (test__start_subtest("xdp_socket_lookup_tcp"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
if (test__start_subtest("xdp_socket_lookup_tcp_skc"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
if (test__start_subtest("xdp_socket_lookup_udp"))
_test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
done:
vrf_socket_lookup__destroy(skel);
cleanup();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <sys/wait.h>
#include <test_progs.h>
#include <unistd.h>
#include "task_kfunc_failure.skel.h"
#include "task_kfunc_success.skel.h"
static struct task_kfunc_success *open_load_task_kfunc_skel(void)
{
struct task_kfunc_success *skel;
int err;
skel = task_kfunc_success__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
skel->bss->pid = getpid();
err = task_kfunc_success__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
return skel;
cleanup:
task_kfunc_success__destroy(skel);
return NULL;
}
static void run_success_test(const char *prog_name)
{
struct task_kfunc_success *skel;
int status;
pid_t child_pid;
struct bpf_program *prog;
struct bpf_link *link = NULL;
skel = open_load_task_kfunc_skel();
if (!ASSERT_OK_PTR(skel, "open_load_skel"))
return;
if (!ASSERT_OK(skel->bss->err, "pre_spawn_err"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "attached_link"))
goto cleanup;
child_pid = fork();
if (!ASSERT_GT(child_pid, -1, "child_pid"))
goto cleanup;
if (child_pid == 0)
_exit(0);
waitpid(child_pid, &status, 0);
ASSERT_OK(skel->bss->err, "post_wait_err");
cleanup:
bpf_link__destroy(link);
task_kfunc_success__destroy(skel);
}
static const char * const success_tests[] = {
"test_task_acquire_release_argument",
"test_task_acquire_release_current",
"test_task_acquire_leave_in_map",
"test_task_xchg_release",
"test_task_map_acquire_release",
"test_task_current_acquire_release",
"test_task_from_pid_arg",
"test_task_from_pid_current",
"test_task_from_pid_invalid",
"task_kfunc_acquire_trusted_walked",
"test_task_kfunc_flavor_relo",
"test_task_kfunc_flavor_relo_not_found",
};
void test_task_kfunc(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i]))
continue;
run_success_test(success_tests[i]);
}
RUN_TESTS(task_kfunc_failure);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/task_kfunc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <test_progs.h>
#include <bpf/btf.h>
#include <fcntl.h>
#include <unistd.h>
#include <linux/unistd.h>
#include <linux/mount.h>
#include <sys/syscall.h>
#include "bpf/libbpf_internal.h"
static inline int sys_fsopen(const char *fsname, unsigned flags)
{
return syscall(__NR_fsopen, fsname, flags);
}
static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
{
return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
}
static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
{
return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
}
__attribute__((unused))
static inline int sys_move_mount(int from_dfd, const char *from_path,
int to_dfd, const char *to_path,
unsigned int ms_flags)
{
return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, ms_flags);
}
static void bpf_obj_pinning_detached(void)
{
LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
int fs_fd = -1, mnt_fd = -1;
int map_fd = -1, map_fd2 = -1;
int zero = 0, src_value, dst_value, err;
const char *map_name = "fsmount_map";
/* A bunch of below UAPI calls are constructed based on reading:
* https://brauner.io/2023/02/28/mounting-into-mount-namespaces.html
*/
/* create VFS context */
fs_fd = sys_fsopen("bpf", 0);
if (!ASSERT_GE(fs_fd, 0, "fs_fd"))
goto cleanup;
/* instantiate FS object */
err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
if (!ASSERT_OK(err, "fs_create"))
goto cleanup;
/* create O_PATH fd for detached mount */
mnt_fd = sys_fsmount(fs_fd, 0, 0);
if (!ASSERT_GE(mnt_fd, 0, "mnt_fd"))
goto cleanup;
/* If we wanted to expose detached mount in the file system, we'd do
* something like below. But the whole point is that we actually don't
* even have to expose BPF FS in the file system to be able to work
* (pin/get objects) with it.
*
* err = sys_move_mount(mnt_fd, "", -EBADF, mnt_path, MOVE_MOUNT_F_EMPTY_PATH);
* if (!ASSERT_OK(err, "move_mount"))
* goto cleanup;
*/
/* create BPF map to pin */
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
if (!ASSERT_GE(map_fd, 0, "map_fd"))
goto cleanup;
/* pin BPF map into detached BPF FS through mnt_fd */
pin_opts.file_flags = BPF_F_PATH_FD;
pin_opts.path_fd = mnt_fd;
err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts);
if (!ASSERT_OK(err, "map_pin"))
goto cleanup;
/* get BPF map from detached BPF FS through mnt_fd */
get_opts.file_flags = BPF_F_PATH_FD;
get_opts.path_fd = mnt_fd;
map_fd2 = bpf_obj_get_opts(map_name, &get_opts);
if (!ASSERT_GE(map_fd2, 0, "map_get"))
goto cleanup;
/* update map through one FD */
src_value = 0xcafebeef;
err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
ASSERT_OK(err, "map_update");
/* check values written/read through different FDs do match */
dst_value = 0;
err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
ASSERT_OK(err, "map_lookup");
ASSERT_EQ(dst_value, src_value, "map_value_eq1");
ASSERT_EQ(dst_value, 0xcafebeef, "map_value_eq2");
cleanup:
if (map_fd >= 0)
ASSERT_OK(close(map_fd), "close_map_fd");
if (map_fd2 >= 0)
ASSERT_OK(close(map_fd2), "close_map_fd2");
if (fs_fd >= 0)
ASSERT_OK(close(fs_fd), "close_fs_fd");
if (mnt_fd >= 0)
ASSERT_OK(close(mnt_fd), "close_mnt_fd");
}
enum path_kind
{
PATH_STR_ABS,
PATH_STR_REL,
PATH_FD_REL,
};
static void validate_pin(int map_fd, const char *map_name, int src_value,
enum path_kind path_kind)
{
LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
char abs_path[PATH_MAX], old_cwd[PATH_MAX];
const char *pin_path = NULL;
int zero = 0, dst_value, map_fd2, err;
snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
old_cwd[0] = '\0';
switch (path_kind) {
case PATH_STR_ABS:
/* absolute path */
pin_path = abs_path;
break;
case PATH_STR_REL:
/* cwd + relative path */
ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
pin_path = map_name;
break;
case PATH_FD_REL:
/* dir fd + relative path */
pin_opts.file_flags = BPF_F_PATH_FD;
pin_opts.path_fd = open("/sys/fs/bpf", O_PATH);
ASSERT_GE(pin_opts.path_fd, 0, "path_fd");
pin_path = map_name;
break;
}
/* pin BPF map using specified path definition */
err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts);
ASSERT_OK(err, "obj_pin");
/* cleanup */
if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0)
close(pin_opts.path_fd);
if (old_cwd[0])
ASSERT_OK(chdir(old_cwd), "restore_cwd");
map_fd2 = bpf_obj_get(abs_path);
if (!ASSERT_GE(map_fd2, 0, "map_get"))
goto cleanup;
/* update map through one FD */
err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
ASSERT_OK(err, "map_update");
/* check values written/read through different FDs do match */
dst_value = 0;
err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
ASSERT_OK(err, "map_lookup");
ASSERT_EQ(dst_value, src_value, "map_value_eq");
cleanup:
if (map_fd2 >= 0)
ASSERT_OK(close(map_fd2), "close_map_fd2");
unlink(abs_path);
}
static void validate_get(int map_fd, const char *map_name, int src_value,
enum path_kind path_kind)
{
LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
char abs_path[PATH_MAX], old_cwd[PATH_MAX];
const char *pin_path = NULL;
int zero = 0, dst_value, map_fd2, err;
snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
/* pin BPF map using specified path definition */
err = bpf_obj_pin(map_fd, abs_path);
if (!ASSERT_OK(err, "pin_map"))
return;
old_cwd[0] = '\0';
switch (path_kind) {
case PATH_STR_ABS:
/* absolute path */
pin_path = abs_path;
break;
case PATH_STR_REL:
/* cwd + relative path */
ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
pin_path = map_name;
break;
case PATH_FD_REL:
/* dir fd + relative path */
get_opts.file_flags = BPF_F_PATH_FD;
get_opts.path_fd = open("/sys/fs/bpf", O_PATH);
ASSERT_GE(get_opts.path_fd, 0, "path_fd");
pin_path = map_name;
break;
}
map_fd2 = bpf_obj_get_opts(pin_path, &get_opts);
if (!ASSERT_GE(map_fd2, 0, "map_get"))
goto cleanup;
/* cleanup */
if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0)
close(get_opts.path_fd);
if (old_cwd[0])
ASSERT_OK(chdir(old_cwd), "restore_cwd");
/* update map through one FD */
err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
ASSERT_OK(err, "map_update");
/* check values written/read through different FDs do match */
dst_value = 0;
err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
ASSERT_OK(err, "map_lookup");
ASSERT_EQ(dst_value, src_value, "map_value_eq");
cleanup:
if (map_fd2 >= 0)
ASSERT_OK(close(map_fd2), "close_map_fd2");
unlink(abs_path);
}
static void bpf_obj_pinning_mounted(enum path_kind path_kind)
{
const char *map_name = "mounted_map";
int map_fd;
/* create BPF map to pin */
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
if (!ASSERT_GE(map_fd, 0, "map_fd"))
return;
validate_pin(map_fd, map_name, 100 + (int)path_kind, path_kind);
validate_get(map_fd, map_name, 200 + (int)path_kind, path_kind);
ASSERT_OK(close(map_fd), "close_map_fd");
}
void test_bpf_obj_pinning()
{
if (test__start_subtest("detached"))
bpf_obj_pinning_detached();
if (test__start_subtest("mounted-str-abs"))
bpf_obj_pinning_mounted(PATH_STR_ABS);
if (test__start_subtest("mounted-str-rel"))
bpf_obj_pinning_mounted(PATH_STR_REL);
if (test__start_subtest("mounted-fd-rel"))
bpf_obj_pinning_mounted(PATH_FD_REL);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void serial_test_tp_attach_query(void)
{
const int num_progs = 3;
int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
__u32 duration = 0, info_len, saved_prog_ids[num_progs];
const char *file = "./test_tracepoint.bpf.o";
struct perf_event_query_bpf *query;
struct perf_event_attr attr = {};
struct bpf_object *obj[num_progs];
struct bpf_prog_info prog_info;
char buf[256];
for (i = 0; i < num_progs; i++)
obj[i] = NULL;
if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
snprintf(buf, sizeof(buf),
"/sys/kernel/tracing/events/sched/sched_switch/id");
} else {
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/sched/sched_switch/id");
}
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
return;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
"read", "bytes %d errno %d\n", bytes, errno))
return;
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
attr.sample_period = 1;
attr.wakeup_events = 1;
query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
for (i = 0; i < num_progs; i++) {
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
&prog_fd[i]);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto cleanup1;
bzero(&prog_info, sizeof(prog_info));
prog_info.jited_prog_len = 0;
prog_info.xlated_prog_len = 0;
prog_info.nr_map_ids = 0;
info_len = sizeof(prog_info);
err = bpf_prog_get_info_by_fd(prog_fd[i], &prog_info,
&info_len);
if (CHECK(err, "bpf_prog_get_info_by_fd", "err %d errno %d\n",
err, errno))
goto cleanup1;
saved_prog_ids[i] = prog_info.id;
pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd[i], errno))
goto cleanup2;
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
err, errno))
goto cleanup3;
if (i == 0) {
/* check NULL prog array query */
query->ids_len = num_progs;
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
if (CHECK(err || query->prog_cnt != 0,
"perf_event_ioc_query_bpf",
"err %d errno %d query->prog_cnt %u\n",
err, errno, query->prog_cnt))
goto cleanup3;
}
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
err, errno))
goto cleanup3;
if (i == 1) {
/* try to get # of programs only */
query->ids_len = 0;
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
if (CHECK(err || query->prog_cnt != 2,
"perf_event_ioc_query_bpf",
"err %d errno %d query->prog_cnt %u\n",
err, errno, query->prog_cnt))
goto cleanup3;
/* try a few negative tests */
/* invalid query pointer */
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
(struct perf_event_query_bpf *)0x1);
if (CHECK(!err || errno != EFAULT,
"perf_event_ioc_query_bpf",
"err %d errno %d\n", err, errno))
goto cleanup3;
/* no enough space */
query->ids_len = 1;
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
"perf_event_ioc_query_bpf",
"err %d errno %d query->prog_cnt %u\n",
err, errno, query->prog_cnt))
goto cleanup3;
}
query->ids_len = num_progs;
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
if (CHECK(err || query->prog_cnt != (i + 1),
"perf_event_ioc_query_bpf",
"err %d errno %d query->prog_cnt %u\n",
err, errno, query->prog_cnt))
goto cleanup3;
for (j = 0; j < i + 1; j++)
if (CHECK(saved_prog_ids[j] != query->ids[j],
"perf_event_ioc_query_bpf",
"#%d saved_prog_id %x query prog_id %x\n",
j, saved_prog_ids[j], query->ids[j]))
goto cleanup3;
}
i = num_progs - 1;
for (; i >= 0; i--) {
cleanup3:
ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
cleanup2:
close(pmu_fd[i]);
cleanup1:
bpf_object__close(obj[i]);
}
free(query);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tp_attach_query.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "test_legacy_printk.skel.h"
static int execute_one_variant(bool legacy)
{
struct test_legacy_printk *skel;
int err, zero = 0, my_pid = getpid(), res, map_fd;
skel = test_legacy_printk__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return -errno;
bpf_program__set_autoload(skel->progs.handle_legacy, legacy);
bpf_program__set_autoload(skel->progs.handle_modern, !legacy);
err = test_legacy_printk__load(skel);
/* no ASSERT_OK, we expect one of two variants can fail here */
if (err)
goto err_out;
if (legacy) {
map_fd = bpf_map__fd(skel->maps.my_pid_map);
err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY);
if (!ASSERT_OK(err, "my_pid_map_update"))
goto err_out;
err = bpf_map_lookup_elem(map_fd, &zero, &res);
} else {
skel->bss->my_pid_var = my_pid;
}
err = test_legacy_printk__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto err_out;
usleep(1); /* trigger */
if (legacy) {
map_fd = bpf_map__fd(skel->maps.res_map);
err = bpf_map_lookup_elem(map_fd, &zero, &res);
if (!ASSERT_OK(err, "res_map_lookup"))
goto err_out;
} else {
res = skel->bss->res_var;
}
if (!ASSERT_GT(res, 0, "res")) {
err = -EINVAL;
goto err_out;
}
err_out:
test_legacy_printk__destroy(skel);
return err;
}
void test_legacy_printk(void)
{
/* legacy variant should work everywhere */
ASSERT_OK(execute_one_variant(true /* legacy */), "legacy_case");
/* execute modern variant, can fail the load on old kernels */
execute_one_variant(false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/legacy_printk.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Facebook */
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "sk_storage_omem_uncharge.skel.h"
void test_sk_storage_omem_uncharge(void)
{
struct sk_storage_omem_uncharge *skel;
int sk_fd = -1, map_fd, err, value;
socklen_t optlen;
skel = sk_storage_omem_uncharge__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_storage);
/* A standalone socket not binding to addr:port,
* so nentns is not needed.
*/
sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sk_fd, 0, "socket"))
goto done;
optlen = sizeof(skel->bss->cookie);
err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen);
if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)"))
goto done;
value = 0;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)"))
goto done;
value = 0xdeadbeef;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)"))
goto done;
err = sk_storage_omem_uncharge__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto done;
close(sk_fd);
sk_fd = -1;
ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found");
ASSERT_EQ(skel->bss->omem, 0, "omem");
done:
sk_storage_omem_uncharge__destroy(skel);
if (sk_fd != -1)
close(sk_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <test_progs.h>
#include "test_lookup_and_delete.skel.h"
#define START_VALUE 1234
#define NEW_VALUE 4321
#define MAX_ENTRIES 2
static int duration;
static int nr_cpus;
static int fill_values(int map_fd)
{
__u64 key, value = START_VALUE;
int err;
for (key = 1; key < MAX_ENTRIES + 1; key++) {
err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
return -1;
}
return 0;
}
static int fill_values_percpu(int map_fd)
{
__u64 key, value[nr_cpus];
int i, err;
for (i = 0; i < nr_cpus; i++)
value[i] = START_VALUE;
for (key = 1; key < MAX_ENTRIES + 1; key++) {
err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
return -1;
}
return 0;
}
static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
int *map_fd)
{
struct test_lookup_and_delete *skel;
int err;
skel = test_lookup_and_delete__open();
if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
return NULL;
err = bpf_map__set_type(skel->maps.hash_map, map_type);
if (!ASSERT_OK(err, "bpf_map__set_type"))
goto cleanup;
err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
goto cleanup;
err = test_lookup_and_delete__load(skel);
if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
goto cleanup;
*map_fd = bpf_map__fd(skel->maps.hash_map);
if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
goto cleanup;
return skel;
cleanup:
test_lookup_and_delete__destroy(skel);
return NULL;
}
/* Triggers BPF program that updates map with given key and value */
static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
__u64 value)
{
int err;
skel->bss->set_pid = getpid();
skel->bss->set_key = key;
skel->bss->set_value = value;
err = test_lookup_and_delete__attach(skel);
if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
return -1;
syscall(__NR_getpgid);
test_lookup_and_delete__detach(skel);
return 0;
}
static void test_lookup_and_delete_hash(void)
{
struct test_lookup_and_delete *skel;
__u64 key, value;
int map_fd, err;
/* Setup program and fill the map. */
skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
if (!ASSERT_OK_PTR(skel, "setup_prog"))
return;
err = fill_values(map_fd);
if (!ASSERT_OK(err, "fill_values"))
goto cleanup;
/* Lookup and delete element. */
key = 1;
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
/* Fetched value should match the initially set value. */
if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
"unexpected value=%lld\n", value))
goto cleanup;
/* Check that the entry is non existent. */
err = bpf_map_lookup_elem(map_fd, &key, &value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
cleanup:
test_lookup_and_delete__destroy(skel);
}
static void test_lookup_and_delete_percpu_hash(void)
{
struct test_lookup_and_delete *skel;
__u64 key, val, value[nr_cpus];
int map_fd, err, i;
/* Setup program and fill the map. */
skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
if (!ASSERT_OK_PTR(skel, "setup_prog"))
return;
err = fill_values_percpu(map_fd);
if (!ASSERT_OK(err, "fill_values_percpu"))
goto cleanup;
/* Lookup and delete element. */
key = 1;
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
for (i = 0; i < nr_cpus; i++) {
val = value[i];
/* Fetched value should match the initially set value. */
if (CHECK(val != START_VALUE, "map value",
"unexpected for cpu %d: %lld\n", i, val))
goto cleanup;
}
/* Check that the entry is non existent. */
err = bpf_map_lookup_elem(map_fd, &key, value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
cleanup:
test_lookup_and_delete__destroy(skel);
}
static void test_lookup_and_delete_lru_hash(void)
{
struct test_lookup_and_delete *skel;
__u64 key, value;
int map_fd, err;
/* Setup program and fill the LRU map. */
skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
if (!ASSERT_OK_PTR(skel, "setup_prog"))
return;
err = fill_values(map_fd);
if (!ASSERT_OK(err, "fill_values"))
goto cleanup;
/* Insert new element at key=3, should reuse LRU element. */
key = 3;
err = trigger_tp(skel, key, NEW_VALUE);
if (!ASSERT_OK(err, "trigger_tp"))
goto cleanup;
/* Lookup and delete element 3. */
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
/* Value should match the new value. */
if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
"unexpected value=%lld\n", value))
goto cleanup;
/* Check that entries 3 and 1 are non existent. */
err = bpf_map_lookup_elem(map_fd, &key, &value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
key = 1;
err = bpf_map_lookup_elem(map_fd, &key, &value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
cleanup:
test_lookup_and_delete__destroy(skel);
}
static void test_lookup_and_delete_lru_percpu_hash(void)
{
struct test_lookup_and_delete *skel;
__u64 key, val, value[nr_cpus];
int map_fd, err, i, cpucnt = 0;
/* Setup program and fill the LRU map. */
skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
if (!ASSERT_OK_PTR(skel, "setup_prog"))
return;
err = fill_values_percpu(map_fd);
if (!ASSERT_OK(err, "fill_values_percpu"))
goto cleanup;
/* Insert new element at key=3, should reuse LRU element 1. */
key = 3;
err = trigger_tp(skel, key, NEW_VALUE);
if (!ASSERT_OK(err, "trigger_tp"))
goto cleanup;
/* Clean value. */
for (i = 0; i < nr_cpus; i++)
value[i] = 0;
/* Lookup and delete element 3. */
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
/* Check if only one CPU has set the value. */
for (i = 0; i < nr_cpus; i++) {
val = value[i];
if (val) {
if (CHECK(val != NEW_VALUE, "map value",
"unexpected for cpu %d: %lld\n", i, val))
goto cleanup;
cpucnt++;
}
}
if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
cpucnt))
goto cleanup;
/* Check that entries 3 and 1 are non existent. */
err = bpf_map_lookup_elem(map_fd, &key, &value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
key = 1;
err = bpf_map_lookup_elem(map_fd, &key, &value);
if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
goto cleanup;
cleanup:
test_lookup_and_delete__destroy(skel);
}
void test_lookup_and_delete(void)
{
nr_cpus = bpf_num_possible_cpus();
if (test__start_subtest("lookup_and_delete"))
test_lookup_and_delete_hash();
if (test__start_subtest("lookup_and_delete_percpu"))
test_lookup_and_delete_percpu_hash();
if (test__start_subtest("lookup_and_delete_lru"))
test_lookup_and_delete_lru_hash();
if (test__start_subtest("lookup_and_delete_lru_percpu"))
test_lookup_and_delete_lru_percpu_hash();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <sys/syscall.h>
#include "linked_maps.skel.h"
void test_linked_maps(void)
{
int err;
struct linked_maps *skel;
skel = linked_maps__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = linked_maps__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger */
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1");
ASSERT_EQ(skel->bss->output_second1, 2, "output_second1");
ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1");
cleanup:
linked_maps__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/linked_maps.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "bpf_loop.skel.h"
static void check_nr_loops(struct bpf_loop *skel)
{
struct bpf_link *link;
link = bpf_program__attach(skel->progs.test_prog);
if (!ASSERT_OK_PTR(link, "link"))
return;
/* test 0 loops */
skel->bss->nr_loops = 0;
usleep(1);
ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
"0 loops");
/* test 500 loops */
skel->bss->nr_loops = 500;
usleep(1);
ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
"500 loops");
ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output");
/* test exceeding the max limit */
skel->bss->nr_loops = -1;
usleep(1);
ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit");
bpf_link__destroy(link);
}
static void check_callback_fn_stop(struct bpf_loop *skel)
{
struct bpf_link *link;
link = bpf_program__attach(skel->progs.test_prog);
if (!ASSERT_OK_PTR(link, "link"))
return;
/* testing that loop is stopped when callback_fn returns 1 */
skel->bss->nr_loops = 400;
skel->data->stop_index = 50;
usleep(1);
ASSERT_EQ(skel->bss->nr_loops_returned, skel->data->stop_index + 1,
"nr_loops_returned");
ASSERT_EQ(skel->bss->g_output, (50 * 49) / 2,
"g_output");
bpf_link__destroy(link);
}
static void check_null_callback_ctx(struct bpf_loop *skel)
{
struct bpf_link *link;
/* check that user is able to pass in a null callback_ctx */
link = bpf_program__attach(skel->progs.prog_null_ctx);
if (!ASSERT_OK_PTR(link, "link"))
return;
skel->bss->nr_loops = 10;
usleep(1);
ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
"nr_loops_returned");
bpf_link__destroy(link);
}
static void check_invalid_flags(struct bpf_loop *skel)
{
struct bpf_link *link;
/* check that passing in non-zero flags returns -EINVAL */
link = bpf_program__attach(skel->progs.prog_invalid_flags);
if (!ASSERT_OK_PTR(link, "link"))
return;
usleep(1);
ASSERT_EQ(skel->bss->err, -EINVAL, "err");
bpf_link__destroy(link);
}
static void check_nested_calls(struct bpf_loop *skel)
{
__u32 nr_loops = 100, nested_callback_nr_loops = 4;
struct bpf_link *link;
/* check that nested calls are supported */
link = bpf_program__attach(skel->progs.prog_nested_calls);
if (!ASSERT_OK_PTR(link, "link"))
return;
skel->bss->nr_loops = nr_loops;
skel->bss->nested_callback_nr_loops = nested_callback_nr_loops;
usleep(1);
ASSERT_EQ(skel->bss->nr_loops_returned, nr_loops * nested_callback_nr_loops
* nested_callback_nr_loops, "nr_loops_returned");
ASSERT_EQ(skel->bss->g_output, (4 * 3) / 2 * nested_callback_nr_loops
* nr_loops, "g_output");
bpf_link__destroy(link);
}
static void check_non_constant_callback(struct bpf_loop *skel)
{
struct bpf_link *link =
bpf_program__attach(skel->progs.prog_non_constant_callback);
if (!ASSERT_OK_PTR(link, "link"))
return;
skel->bss->callback_selector = 0x0F;
usleep(1);
ASSERT_EQ(skel->bss->g_output, 0x0F, "g_output #1");
skel->bss->callback_selector = 0xF0;
usleep(1);
ASSERT_EQ(skel->bss->g_output, 0xF0, "g_output #2");
bpf_link__destroy(link);
}
static void check_stack(struct bpf_loop *skel)
{
struct bpf_link *link = bpf_program__attach(skel->progs.stack_check);
const int max_key = 12;
int key;
int map_fd;
if (!ASSERT_OK_PTR(link, "link"))
return;
map_fd = bpf_map__fd(skel->maps.map1);
if (!ASSERT_GE(map_fd, 0, "bpf_map__fd"))
goto out;
for (key = 1; key <= max_key; ++key) {
int val = key;
int err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto out;
}
usleep(1);
for (key = 1; key <= max_key; ++key) {
int val;
int err = bpf_map_lookup_elem(map_fd, &key, &val);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto out;
if (!ASSERT_EQ(val, key + 1, "bad value in the map"))
goto out;
}
out:
bpf_link__destroy(link);
}
void test_bpf_loop(void)
{
struct bpf_loop *skel;
skel = bpf_loop__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_loop__open_and_load"))
return;
skel->bss->pid = getpid();
if (test__start_subtest("check_nr_loops"))
check_nr_loops(skel);
if (test__start_subtest("check_callback_fn_stop"))
check_callback_fn_stop(skel);
if (test__start_subtest("check_null_callback_ctx"))
check_null_callback_ctx(skel);
if (test__start_subtest("check_invalid_flags"))
check_invalid_flags(skel);
if (test__start_subtest("check_nested_calls"))
check_nested_calls(skel);
if (test__start_subtest("check_non_constant_callback"))
check_non_constant_callback(skel);
if (test__start_subtest("check_stack"))
check_stack(skel);
bpf_loop__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/bpf_loop.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <stdbool.h>
#include "test_module_attach.skel.h"
#include "testing_helpers.h"
static int duration;
static int trigger_module_test_writable(int *val)
{
int fd, err;
char buf[65];
ssize_t rd;
fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
err = -errno;
if (!ASSERT_GE(fd, 0, "testmode_file_open"))
return err;
rd = read(fd, buf, sizeof(buf) - 1);
err = -errno;
if (!ASSERT_GT(rd, 0, "testmod_file_rd_val")) {
close(fd);
return err;
}
buf[rd] = '\0';
*val = strtol(buf, NULL, 0);
close(fd);
return 0;
}
void test_module_attach(void)
{
const int READ_SZ = 456;
const int WRITE_SZ = 457;
struct test_module_attach* skel;
struct test_module_attach__bss *bss;
struct bpf_link *link;
int err;
int writable_val = 0;
skel = test_module_attach__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
err = bpf_program__set_attach_target(skel->progs.handle_fentry_manual,
0, "bpf_testmod_test_read");
ASSERT_OK(err, "set_attach_target");
err = test_module_attach__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton\n"))
return;
bss = skel->bss;
err = test_module_attach__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
ASSERT_OK(trigger_module_test_read(READ_SZ), "trigger_read");
ASSERT_OK(trigger_module_test_write(WRITE_SZ), "trigger_write");
ASSERT_EQ(bss->raw_tp_read_sz, READ_SZ, "raw_tp");
ASSERT_EQ(bss->raw_tp_bare_write_sz, WRITE_SZ, "raw_tp_bare");
ASSERT_EQ(bss->tp_btf_read_sz, READ_SZ, "tp_btf");
ASSERT_EQ(bss->fentry_read_sz, READ_SZ, "fentry");
ASSERT_EQ(bss->fentry_manual_read_sz, READ_SZ, "fentry_manual");
ASSERT_EQ(bss->fexit_read_sz, READ_SZ, "fexit");
ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet");
ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret");
bss->raw_tp_writable_bare_early_ret = true;
bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4;
ASSERT_OK(trigger_module_test_writable(&writable_val),
"trigger_writable");
ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in");
ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val,
"writable_test_out");
test_module_attach__detach(skel);
/* attach fentry/fexit and make sure it get's module reference */
link = bpf_program__attach(skel->progs.handle_fentry);
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
link = bpf_program__attach(skel->progs.handle_fexit);
if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
link = bpf_program__attach(skel->progs.kprobe_multi);
if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
goto cleanup;
ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
cleanup:
test_module_attach__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/module_attach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <sys/syscall.h>
#include "linked_vars.skel.h"
void test_linked_vars(void)
{
int err;
struct linked_vars *skel;
skel = linked_vars__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->input_bss1 = 1000;
skel->bss->input_bss2 = 2000;
skel->bss->input_bss_weak = 3000;
err = linked_vars__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = linked_vars__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger */
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1");
ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2");
/* 10 comes from "winner" input_data_weak in first obj file */
ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1");
ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2");
/* 100 comes from "winner" input_rodata_weak in first obj file */
ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1");
ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2");
cleanup:
linked_vars__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/linked_vars.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void test_xdp_update_frags(void)
{
const char *file = "./test_xdp_update_frags.bpf.o";
int err, prog_fd, max_skb_frags, buf_size, num;
struct bpf_program *prog;
struct bpf_object *obj;
__u32 *offset;
__u8 *buf;
FILE *f;
LIBBPF_OPTS(bpf_test_run_opts, topts);
obj = bpf_object__open(file);
if (libbpf_get_error(obj))
return;
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
return;
prog_fd = bpf_program__fd(prog);
buf = malloc(128);
if (!ASSERT_OK_PTR(buf, "alloc buf 128b"))
goto out;
memset(buf, 0, 128);
offset = (__u32 *)buf;
*offset = 16;
buf[*offset] = 0xaa; /* marker at offset 16 (head) */
buf[*offset + 15] = 0xaa; /* marker at offset 31 (head) */
topts.data_in = buf;
topts.data_out = buf;
topts.data_size_in = 128;
topts.data_size_out = 128;
err = bpf_prog_test_run_opts(prog_fd, &topts);
/* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */
ASSERT_OK(err, "xdp_update_frag");
ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]");
ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]");
free(buf);
buf = malloc(9000);
if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb"))
goto out;
memset(buf, 0, 9000);
offset = (__u32 *)buf;
*offset = 5000;
buf[*offset] = 0xaa; /* marker at offset 5000 (frag0) */
buf[*offset + 15] = 0xaa; /* marker at offset 5015 (frag0) */
topts.data_in = buf;
topts.data_out = buf;
topts.data_size_in = 9000;
topts.data_size_out = 9000;
err = bpf_prog_test_run_opts(prog_fd, &topts);
/* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */
ASSERT_OK(err, "xdp_update_frag");
ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]");
ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]");
memset(buf, 0, 9000);
offset = (__u32 *)buf;
*offset = 3510;
buf[*offset] = 0xaa; /* marker at offset 3510 (head) */
buf[*offset + 15] = 0xaa; /* marker at offset 3525 (frag0) */
err = bpf_prog_test_run_opts(prog_fd, &topts);
/* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */
ASSERT_OK(err, "xdp_update_frag");
ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]");
ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]");
memset(buf, 0, 9000);
offset = (__u32 *)buf;
*offset = 7606;
buf[*offset] = 0xaa; /* marker at offset 7606 (frag0) */
buf[*offset + 15] = 0xaa; /* marker at offset 7621 (frag1) */
err = bpf_prog_test_run_opts(prog_fd, &topts);
/* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */
ASSERT_OK(err, "xdp_update_frag");
ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]");
ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]");
free(buf);
/* test_xdp_update_frags: unsupported buffer size */
f = fopen("/proc/sys/net/core/max_skb_frags", "r");
if (!ASSERT_OK_PTR(f, "max_skb_frag file pointer"))
goto out;
num = fscanf(f, "%d", &max_skb_frags);
fclose(f);
if (!ASSERT_EQ(num, 1, "max_skb_frags read failed"))
goto out;
/* xdp_buff linear area size is always set to 4096 in the
* bpf_prog_test_run_xdp routine.
*/
buf_size = 4096 + (max_skb_frags + 1) * sysconf(_SC_PAGE_SIZE);
buf = malloc(buf_size);
if (!ASSERT_OK_PTR(buf, "alloc buf"))
goto out;
memset(buf, 0, buf_size);
offset = (__u32 *)buf;
*offset = 16;
buf[*offset] = 0xaa;
buf[*offset + 15] = 0xaa;
topts.data_in = buf;
topts.data_out = buf;
topts.data_size_in = buf_size;
topts.data_size_out = buf_size;
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_EQ(err, -ENOMEM,
"unsupported buf size, possible non-default /proc/sys/net/core/max_skb_flags?");
free(buf);
out:
bpf_object__close(obj);
}
void test_xdp_adjust_frags(void)
{
if (test__start_subtest("xdp_adjust_frags"))
test_xdp_update_frags();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
void test_stacktrace_build_id_nmi(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd;
struct test_stacktrace_build_id *skel;
int err, pmu_fd;
struct perf_event_attr attr = {
.freq = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
__u32 key, prev_key, val, duration = 0;
char buf[BPF_BUILD_ID_SIZE];
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0, build_id_size;
int i, retry = 1;
attr.sample_freq = read_perf_max_sample_freq();
retry:
skel = test_stacktrace_build_id__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
/* override program type */
bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (pmu_fd < 0 && errno == ENOENT) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
test__skip();
goto cleanup;
}
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
/* find map fds */
control_map_fd = bpf_map__fd(skel->maps.control_map);
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
goto cleanup;
if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto cleanup;
build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
err = build_id_size < 0 ? build_id_size : 0;
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
goto cleanup;
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
do {
err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
id_offs, sizeof(id_offs), 0);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
build_id_matches = 1;
}
prev_key = key;
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
}
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto cleanup;
/*
* We intentionally skip compare_stack_ips(). This is because we
* only support one in_nmi() ips-to-build_id translation per cpu
* at any time, thus stack_amap here will always fallback to
* BPF_STACK_BUILD_ID_IP;
*/
cleanup:
test_stacktrace_build_id__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linutronix GmbH */
#include <test_progs.h>
#include <network_helpers.h>
#include "test_time_tai.skel.h"
#include <time.h>
#include <stdint.h>
#define TAI_THRESHOLD 1000000000ULL /* 1s */
#define NSEC_PER_SEC 1000000000ULL
static __u64 ts_to_ns(const struct timespec *ts)
{
return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
}
void test_time_tai(void)
{
struct __sk_buff skb = {
.cb[0] = 0,
.cb[1] = 0,
.tstamp = 0,
};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
);
struct test_time_tai *skel;
struct timespec now_tai;
__u64 ts1, ts2, now;
int ret, prog_fd;
/* Open and load */
skel = test_time_tai__open_and_load();
if (!ASSERT_OK_PTR(skel, "tai_open"))
return;
/* Run test program */
prog_fd = bpf_program__fd(skel->progs.time_tai);
ret = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(ret, "test_run");
/* Retrieve generated TAI timestamps */
ts1 = skb.tstamp;
ts2 = skb.cb[0] | ((__u64)skb.cb[1] << 32);
/* TAI != 0 */
ASSERT_NEQ(ts1, 0, "tai_ts1");
ASSERT_NEQ(ts2, 0, "tai_ts2");
/* TAI is moving forward only */
ASSERT_GT(ts2, ts1, "tai_forward");
/* Check for future */
ret = clock_gettime(CLOCK_TAI, &now_tai);
ASSERT_EQ(ret, 0, "tai_gettime");
now = ts_to_ns(&now_tai);
ASSERT_TRUE(now > ts1, "tai_future_ts1");
ASSERT_TRUE(now > ts2, "tai_future_ts2");
/* Check for reasonable range */
ASSERT_TRUE(now - ts1 < TAI_THRESHOLD, "tai_range_ts1");
ASSERT_TRUE(now - ts2 < TAI_THRESHOLD, "tai_range_ts2");
test_time_tai__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/time_tai.c |
// SPDX-License-Identifier: GPL-2.0
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
#include <test_progs.h>
#include "test_xdp_devmap_helpers.skel.h"
#include "test_xdp_with_devmap_frags_helpers.skel.h"
#include "test_xdp_with_devmap_helpers.skel.h"
#define IFINDEX_LO 1
static void test_xdp_with_devmap_helpers(void)
{
struct test_xdp_with_devmap_helpers *skel;
struct bpf_prog_info info = {};
struct bpf_devmap_val val = {
.ifindex = IFINDEX_LO,
};
__u32 len = sizeof(info);
int err, dm_fd, map_fd;
__u32 idx = 0;
skel = test_xdp_with_devmap_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
return;
dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))
goto out_close;
err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
ASSERT_OK(err, "XDP program detach");
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
map_fd = bpf_map__fd(skel->maps.dm_ports);
err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = dm_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_OK(err, "Add program to devmap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
ASSERT_OK(err, "Read devmap entry");
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
/* can not attach BPF_XDP_DEVMAP program to a device */
err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program"))
bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
val.ifindex = 1;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry");
/* Try to attach BPF_XDP program with frags to devmap when we have
* already loaded a BPF_XDP program on the map
*/
idx = 1;
val.ifindex = 1;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry");
out_close:
test_xdp_with_devmap_helpers__destroy(skel);
}
static void test_neg_xdp_devmap_helpers(void)
{
struct test_xdp_devmap_helpers *skel;
skel = test_xdp_devmap_helpers__open_and_load();
if (!ASSERT_EQ(skel, NULL,
"Load of XDP program accessing egress ifindex without attach type")) {
test_xdp_devmap_helpers__destroy(skel);
}
}
static void test_xdp_with_devmap_frags_helpers(void)
{
struct test_xdp_with_devmap_frags_helpers *skel;
struct bpf_prog_info info = {};
struct bpf_devmap_val val = {
.ifindex = IFINDEX_LO,
};
__u32 len = sizeof(info);
int err, dm_fd_frags, map_fd;
__u32 idx = 0;
skel = test_xdp_with_devmap_frags_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
return;
dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags);
map_fd = bpf_map__fd(skel->maps.dm_ports);
err = bpf_prog_get_info_by_fd(dm_fd_frags, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = dm_fd_frags;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_OK(err, "Add frags program to devmap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
ASSERT_OK(err, "Read devmap entry");
ASSERT_EQ(info.id, val.bpf_prog.id,
"Match program id to devmap entry prog_id");
/* Try to attach BPF_XDP program to devmap when we have
* already loaded a BPF_XDP program with frags on the map
*/
idx = 1;
val.ifindex = 1;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry");
out_close:
test_xdp_with_devmap_frags_helpers__destroy(skel);
}
void serial_test_xdp_devmap_attach(void)
{
if (test__start_subtest("DEVMAP with programs in entries"))
test_xdp_with_devmap_helpers();
if (test__start_subtest("DEVMAP with frags programs in entries"))
test_xdp_with_devmap_frags_helpers();
if (test__start_subtest("Verifier check of DEVMAP programs"))
test_neg_xdp_devmap_helpers();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <test_progs.h>
#include <linux/limits.h>
#include "bprm_opts.skel.h"
#include "network_helpers.h"
#include "task_local_storage_helpers.h"
static const char * const bash_envp[] = { "TMPDIR=shouldnotbeset", NULL };
static int update_storage(int map_fd, int secureexec)
{
int task_fd, ret = 0;
task_fd = sys_pidfd_open(getpid(), 0);
if (task_fd < 0)
return errno;
ret = bpf_map_update_elem(map_fd, &task_fd, &secureexec, BPF_NOEXIST);
if (ret)
ret = errno;
close(task_fd);
return ret;
}
static int run_set_secureexec(int map_fd, int secureexec)
{
int child_pid, child_status, ret, null_fd;
child_pid = fork();
if (child_pid == 0) {
null_fd = open("/dev/null", O_WRONLY);
if (null_fd == -1)
exit(errno);
dup2(null_fd, STDOUT_FILENO);
dup2(null_fd, STDERR_FILENO);
close(null_fd);
/* Ensure that all executions from hereon are
* secure by setting a local storage which is read by
* the bprm_creds_for_exec hook and sets bprm->secureexec.
*/
ret = update_storage(map_fd, secureexec);
if (ret)
exit(ret);
/* If the binary is executed with securexec=1, the dynamic
* loader ingores and unsets certain variables like LD_PRELOAD,
* TMPDIR etc. TMPDIR is used here to simplify the example, as
* LD_PRELOAD requires a real .so file.
*
* If the value of TMPDIR is set, the bash command returns 10
* and if the value is unset, it returns 20.
*/
execle("/bin/bash", "bash", "-c",
"[[ -z \"${TMPDIR}\" ]] || exit 10 && exit 20", NULL,
bash_envp);
exit(errno);
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
ret = WEXITSTATUS(child_status);
/* If a secureexec occurred, the exit status should be 20 */
if (secureexec && ret == 20)
return 0;
/* If normal execution happened, the exit code should be 10 */
if (!secureexec && ret == 10)
return 0;
}
return -EINVAL;
}
void test_test_bprm_opts(void)
{
int err, duration = 0;
struct bprm_opts *skel = NULL;
skel = bprm_opts__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton failed\n"))
goto close_prog;
err = bprm_opts__attach(skel);
if (CHECK(err, "attach", "attach failed: %d\n", err))
goto close_prog;
/* Run the test with the secureexec bit unset */
err = run_set_secureexec(bpf_map__fd(skel->maps.secure_exec_task_map),
0 /* secureexec */);
if (CHECK(err, "run_set_secureexec:0", "err = %d\n", err))
goto close_prog;
/* Run the test with the secureexec bit set */
err = run_set_secureexec(bpf_map__fd(skel->maps.secure_exec_task_map),
1 /* secureexec */);
if (CHECK(err, "run_set_secureexec:1", "err = %d\n", err))
goto close_prog;
close_prog:
bprm_opts__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include <cgroup_helpers.h>
#include <network_helpers.h>
#include "metadata_unused.skel.h"
#include "metadata_used.skel.h"
static int duration;
static int prog_holds_map(int prog_fd, int map_fd)
{
struct bpf_prog_info prog_info = {};
struct bpf_map_info map_info = {};
__u32 prog_info_len;
__u32 map_info_len;
__u32 *map_ids;
int nr_maps;
int ret;
int i;
map_info_len = sizeof(map_info);
ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (ret)
return -errno;
prog_info_len = sizeof(prog_info);
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return -errno;
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
if (!map_ids)
return -ENOMEM;
nr_maps = prog_info.nr_map_ids;
memset(&prog_info, 0, sizeof(prog_info));
prog_info.nr_map_ids = nr_maps;
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret) {
ret = -errno;
goto free_map_ids;
}
ret = -ENOENT;
for (i = 0; i < prog_info.nr_map_ids; i++) {
if (map_ids[i] == map_info.id) {
ret = 0;
break;
}
}
free_map_ids:
free(map_ids);
return ret;
}
static void test_metadata_unused(void)
{
struct metadata_unused *obj;
int err;
obj = metadata_unused__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata));
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
return;
/* Assert that we can access the metadata in skel and the values are
* what we expect.
*/
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo",
sizeof(obj->rodata->bpf_metadata_a)),
"bpf_metadata_a", "expected \"foo\", value differ"))
goto close_bpf_object;
if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b",
"expected 1, got %d", obj->rodata->bpf_metadata_b))
goto close_bpf_object;
/* Assert that binding metadata map to prog again succeeds. */
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata), NULL);
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
close_bpf_object:
metadata_unused__destroy(obj);
}
static void test_metadata_used(void)
{
struct metadata_used *obj;
int err;
obj = metadata_used__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata));
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
return;
/* Assert that we can access the metadata in skel and the values are
* what we expect.
*/
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar",
sizeof(obj->rodata->bpf_metadata_a)),
"metadata_a", "expected \"bar\", value differ"))
goto close_bpf_object;
if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b",
"expected 2, got %d", obj->rodata->bpf_metadata_b))
goto close_bpf_object;
/* Assert that binding metadata map to prog again succeeds. */
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata), NULL);
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
close_bpf_object:
metadata_used__destroy(obj);
}
void test_metadata(void)
{
if (test__start_subtest("unused"))
test_metadata_unused();
if (test__start_subtest("used"))
test_metadata_used();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/metadata.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
* and jmp->jmp rewrites. Also checks for nop->nop.
*/
static void test_tailcall_1(void)
{
int err, map_fd, prog_fd, main_fd, i, j;
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
char prog_name[32];
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, i, "tailcall retval");
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
j = bpf_map__max_entries(prog_array) - 1 - i;
snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
j = bpf_map__max_entries(prog_array) - 1 - i;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, j, "tailcall retval");
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err >= 0 || errno != ENOENT))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
bpf_object__close(obj);
}
/* test_tailcall_2 checks that patching multiple programs for a single
* tail call slot works. It also jumps through several programs and tests
* the tail call limit counter.
*/
static void test_tailcall_2(void)
{
int err, map_fd, prog_fd, main_fd, i;
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
char prog_name[32];
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 2, "tailcall retval");
i = 2;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
out:
bpf_object__close(obj);
}
static void test_tailcall_count(const char *which)
{
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
i = 0;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
data_fd = bpf_map__fd(data_map);
if (CHECK_FAIL(map_fd < 0))
return;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "tailcall count");
ASSERT_EQ(val, 33, "tailcall count");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
out:
bpf_object__close(obj);
}
/* test_tailcall_3 checks that the count value of the tail call limit
* enforcement matches with expectations. JIT uses direct jump.
*/
static void test_tailcall_3(void)
{
test_tailcall_count("tailcall3.bpf.o");
}
/* test_tailcall_6 checks that the count value of the tail call limit
* enforcement matches with expectations. JIT uses indirect jump.
*/
static void test_tailcall_6(void)
{
test_tailcall_count("tailcall6.bpf.o");
}
/* test_tailcall_4 checks that the kernel properly selects indirect jump
* for the case where the key is not known. Latter is passed via global
* data to select different targets we can compare return value of.
*/
static void test_tailcall_4(void)
{
int err, map_fd, prog_fd, main_fd, data_fd, i;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
static const int zero = 0;
char buff[128] = {};
char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
data_fd = bpf_map__fd(data_map);
if (CHECK_FAIL(map_fd < 0))
return;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, i, "tailcall retval");
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
bpf_object__close(obj);
}
/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
* an indirect jump when the keys are const but different from different branches.
*/
static void test_tailcall_5(void)
{
int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
static const int zero = 0;
char buff[128] = {};
char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
data_fd = bpf_map__fd(data_map);
if (CHECK_FAIL(map_fd < 0))
return;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, i, "tailcall retval");
}
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
bpf_object__close(obj);
}
/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
* correctly in correlation with BPF subprograms
*/
static void test_tailcall_bpf2bpf_1(void)
{
int err, map_fd, prog_fd, main_fd, i;
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
/* nop -> jmp */
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
/* jmp -> nop, call subprog that will do tailcall */
i = 1;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
/* make sure that subprog can access ctx and entry prog that
* called this subprog can properly return
*/
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
out:
bpf_object__close(obj);
}
/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
* enforcement matches with expectations when tailcall is preceded with
* bpf2bpf call.
*/
static void test_tailcall_bpf2bpf_2(void)
{
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
i = 0;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
data_fd = bpf_map__fd(data_map);
if (CHECK_FAIL(map_fd < 0))
return;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "tailcall count");
ASSERT_EQ(val, 33, "tailcall count");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
out:
bpf_object__close(obj);
}
/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
* 256 bytes) can be used within bpf subprograms that have the tailcalls
* in them
*/
static void test_tailcall_bpf2bpf_3(void)
{
int err, map_fd, prog_fd, main_fd, i;
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
i = 1;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
out:
bpf_object__close(obj);
}
#include "tailcall_bpf2bpf4.skel.h"
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
* across tailcalls combined with bpf2bpf calls. for making sure that tailcall
* counter behaves correctly, bpf program will go through following flow:
*
* entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
* -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
* subprog2 [here bump global counter] --------^
*
* We go through first two tailcalls and start counting from the subprog2 where
* the loop begins. At the end of the test make sure that the global counter is
* equal to 31, because tailcall counter includes the first two tailcalls
* whereas global counter is incremented only on loop presented on flow above.
*
* The noise parameter is used to insert bpf_map_update calls into the logic
* to force verifier to patch instructions. This allows us to ensure jump
* logic remains correct with instruction movement.
*/
static void test_tailcall_bpf2bpf_4(bool noise)
{
int err, map_fd, prog_fd, main_fd, data_fd, i;
struct tailcall_bpf2bpf4__bss val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
main_fd = bpf_program__fd(prog);
if (CHECK_FAIL(main_fd < 0))
goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
if (CHECK_FAIL(!prog_array))
goto out;
map_fd = bpf_map__fd(prog_array);
if (CHECK_FAIL(map_fd < 0))
goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
prog_fd = bpf_program__fd(prog);
if (CHECK_FAIL(prog_fd < 0))
goto out;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
}
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
data_fd = bpf_map__fd(data_map);
if (CHECK_FAIL(map_fd < 0))
return;
i = 0;
val.noise = noise;
val.count = 0;
err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "tailcall count");
ASSERT_EQ(val.count, 31, "tailcall count");
out:
bpf_object__close(obj);
}
#include "tailcall_bpf2bpf6.skel.h"
/* Tail call counting works even when there is data on stack which is
* not aligned to 8 bytes.
*/
static void test_tailcall_bpf2bpf_6(void)
{
struct tailcall_bpf2bpf6 *obj;
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
obj = tailcall_bpf2bpf6__open_and_load();
if (!ASSERT_OK_PTR(obj, "open and load"))
return;
main_fd = bpf_program__fd(obj->progs.entry);
if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
goto out;
map_fd = bpf_map__fd(obj->maps.jmp_table);
if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
goto out;
prog_fd = bpf_program__fd(obj->progs.classifier_0);
if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
goto out;
i = 0;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (!ASSERT_OK(err, "jmp_table map update"))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "entry prog test run");
ASSERT_EQ(topts.retval, 0, "tailcall retval");
data_fd = bpf_map__fd(obj->maps.bss);
if (!ASSERT_GE(map_fd, 0, "bss map fd"))
goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "bss map lookup");
ASSERT_EQ(val, 1, "done flag is set");
out:
tailcall_bpf2bpf6__destroy(obj);
}
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
test_tailcall_1();
if (test__start_subtest("tailcall_2"))
test_tailcall_2();
if (test__start_subtest("tailcall_3"))
test_tailcall_3();
if (test__start_subtest("tailcall_4"))
test_tailcall_4();
if (test__start_subtest("tailcall_5"))
test_tailcall_5();
if (test__start_subtest("tailcall_6"))
test_tailcall_6();
if (test__start_subtest("tailcall_bpf2bpf_1"))
test_tailcall_bpf2bpf_1();
if (test__start_subtest("tailcall_bpf2bpf_2"))
test_tailcall_bpf2bpf_2();
if (test__start_subtest("tailcall_bpf2bpf_3"))
test_tailcall_bpf2bpf_3();
if (test__start_subtest("tailcall_bpf2bpf_4"))
test_tailcall_bpf2bpf_4(false);
if (test__start_subtest("tailcall_bpf2bpf_5"))
test_tailcall_bpf2bpf_4(true);
if (test__start_subtest("tailcall_bpf2bpf_6"))
test_tailcall_bpf2bpf_6();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/tailcalls.c |
// SPDX-License-Identifier: GPL-2.0
/**
* Test XDP bonding support
*
* Sets up two bonded veth pairs between two fresh namespaces
* and verifies that XDP_TX program loaded on a bond device
* are correctly loaded onto the slave devices and XDP_TX'd
* packets are balanced using bonding.
*/
#define _GNU_SOURCE
#include <sched.h>
#include <net/if.h>
#include <linux/if_link.h>
#include "test_progs.h"
#include "network_helpers.h"
#include <linux/if_bonding.h>
#include <linux/limits.h>
#include <linux/udp.h>
#include <uapi/linux/netdev.h>
#include "xdp_dummy.skel.h"
#include "xdp_redirect_multi_kern.skel.h"
#include "xdp_tx.skel.h"
#define BOND1_MAC {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
#define BOND1_MAC_STR "00:11:22:33:44:55"
#define BOND2_MAC {0x00, 0x22, 0x33, 0x44, 0x55, 0x66}
#define BOND2_MAC_STR "00:22:33:44:55:66"
#define NPACKETS 100
static int root_netns_fd = -1;
static void restore_root_netns(void)
{
ASSERT_OK(setns(root_netns_fd, CLONE_NEWNET), "restore_root_netns");
}
static int setns_by_name(char *name)
{
int nsfd, err;
char nspath[PATH_MAX];
snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
if (nsfd < 0)
return -1;
err = setns(nsfd, CLONE_NEWNET);
close(nsfd);
return err;
}
static int get_rx_packets(const char *iface)
{
FILE *f;
char line[512];
int iface_len = strlen(iface);
f = fopen("/proc/net/dev", "r");
if (!f)
return -1;
while (fgets(line, sizeof(line), f)) {
char *p = line;
while (*p == ' ')
p++; /* skip whitespace */
if (!strncmp(p, iface, iface_len)) {
p += iface_len;
if (*p++ != ':')
continue;
while (*p == ' ')
p++; /* skip whitespace */
while (*p && *p != ' ')
p++; /* skip rx bytes */
while (*p == ' ')
p++; /* skip whitespace */
fclose(f);
return atoi(p);
}
}
fclose(f);
return -1;
}
#define MAX_BPF_LINKS 8
struct skeletons {
struct xdp_dummy *xdp_dummy;
struct xdp_tx *xdp_tx;
struct xdp_redirect_multi_kern *xdp_redirect_multi_kern;
int nlinks;
struct bpf_link *links[MAX_BPF_LINKS];
};
static int xdp_attach(struct skeletons *skeletons, struct bpf_program *prog, char *iface)
{
struct bpf_link *link;
int ifindex;
ifindex = if_nametoindex(iface);
if (!ASSERT_GT(ifindex, 0, "get ifindex"))
return -1;
if (!ASSERT_LE(skeletons->nlinks+1, MAX_BPF_LINKS, "too many XDP programs attached"))
return -1;
link = bpf_program__attach_xdp(prog, ifindex);
if (!ASSERT_OK_PTR(link, "attach xdp program"))
return -1;
skeletons->links[skeletons->nlinks++] = link;
return 0;
}
enum {
BOND_ONE_NO_ATTACH = 0,
BOND_BOTH_AND_ATTACH,
};
static const char * const mode_names[] = {
[BOND_MODE_ROUNDROBIN] = "balance-rr",
[BOND_MODE_ACTIVEBACKUP] = "active-backup",
[BOND_MODE_XOR] = "balance-xor",
[BOND_MODE_BROADCAST] = "broadcast",
[BOND_MODE_8023AD] = "802.3ad",
[BOND_MODE_TLB] = "balance-tlb",
[BOND_MODE_ALB] = "balance-alb",
};
static const char * const xmit_policy_names[] = {
[BOND_XMIT_POLICY_LAYER2] = "layer2",
[BOND_XMIT_POLICY_LAYER34] = "layer3+4",
[BOND_XMIT_POLICY_LAYER23] = "layer2+3",
[BOND_XMIT_POLICY_ENCAP23] = "encap2+3",
[BOND_XMIT_POLICY_ENCAP34] = "encap3+4",
};
static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy,
int bond_both_attach)
{
SYS(fail, "ip netns add ns_dst");
SYS(fail, "ip link add veth1_1 type veth peer name veth2_1 netns ns_dst");
SYS(fail, "ip link add veth1_2 type veth peer name veth2_2 netns ns_dst");
SYS(fail, "ip link add bond1 type bond mode %s xmit_hash_policy %s",
mode_names[mode], xmit_policy_names[xmit_policy]);
SYS(fail, "ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none");
SYS(fail, "ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s",
mode_names[mode], xmit_policy_names[xmit_policy]);
SYS(fail, "ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none");
SYS(fail, "ip link set veth1_1 master bond1");
if (bond_both_attach == BOND_BOTH_AND_ATTACH) {
SYS(fail, "ip link set veth1_2 master bond1");
} else {
SYS(fail, "ip link set veth1_2 up addrgenmode none");
if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2"))
return -1;
}
SYS(fail, "ip -netns ns_dst link set veth2_1 master bond2");
if (bond_both_attach == BOND_BOTH_AND_ATTACH)
SYS(fail, "ip -netns ns_dst link set veth2_2 master bond2");
else
SYS(fail, "ip -netns ns_dst link set veth2_2 up addrgenmode none");
/* Load a dummy program on sending side as with veth peer needs to have a
* XDP program loaded as well.
*/
if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "bond1"))
return -1;
if (bond_both_attach == BOND_BOTH_AND_ATTACH) {
if (!ASSERT_OK(setns_by_name("ns_dst"), "set netns to ns_dst"))
return -1;
if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2"))
return -1;
restore_root_netns();
}
return 0;
fail:
return -1;
}
static void bonding_cleanup(struct skeletons *skeletons)
{
restore_root_netns();
while (skeletons->nlinks) {
skeletons->nlinks--;
bpf_link__destroy(skeletons->links[skeletons->nlinks]);
}
ASSERT_OK(system("ip link delete bond1"), "delete bond1");
ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1");
ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2");
ASSERT_OK(system("ip netns delete ns_dst"), "delete ns_dst");
}
static int send_udp_packets(int vary_dst_ip)
{
struct ethhdr eh = {
.h_source = BOND1_MAC,
.h_dest = BOND2_MAC,
.h_proto = htons(ETH_P_IP),
};
struct iphdr iph = {};
struct udphdr uh = {};
uint8_t buf[128];
int i, s = -1;
int ifindex;
s = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (!ASSERT_GE(s, 0, "socket"))
goto err;
ifindex = if_nametoindex("bond1");
if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex"))
goto err;
iph.ihl = 5;
iph.version = 4;
iph.tos = 16;
iph.id = 1;
iph.ttl = 64;
iph.protocol = IPPROTO_UDP;
iph.saddr = 1;
iph.daddr = 2;
iph.tot_len = htons(sizeof(buf) - ETH_HLEN);
iph.check = 0;
for (i = 1; i <= NPACKETS; i++) {
int n;
struct sockaddr_ll saddr_ll = {
.sll_ifindex = ifindex,
.sll_halen = ETH_ALEN,
.sll_addr = BOND2_MAC,
};
/* vary the UDP destination port for even distribution with roundrobin/xor modes */
uh.dest++;
if (vary_dst_ip)
iph.daddr++;
/* construct a packet */
memcpy(buf, &eh, sizeof(eh));
memcpy(buf + sizeof(eh), &iph, sizeof(iph));
memcpy(buf + sizeof(eh) + sizeof(iph), &uh, sizeof(uh));
n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll));
if (!ASSERT_EQ(n, sizeof(buf), "sendto"))
goto err;
}
return 0;
err:
if (s >= 0)
close(s);
return -1;
}
static void test_xdp_bonding_with_mode(struct skeletons *skeletons, int mode, int xmit_policy)
{
int bond1_rx;
if (bonding_setup(skeletons, mode, xmit_policy, BOND_BOTH_AND_ATTACH))
goto out;
if (send_udp_packets(xmit_policy != BOND_XMIT_POLICY_LAYER34))
goto out;
bond1_rx = get_rx_packets("bond1");
ASSERT_EQ(bond1_rx, NPACKETS, "expected more received packets");
switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_XOR: {
int veth1_rx = get_rx_packets("veth1_1");
int veth2_rx = get_rx_packets("veth1_2");
int diff = abs(veth1_rx - veth2_rx);
ASSERT_GE(veth1_rx + veth2_rx, NPACKETS, "expected more packets");
switch (xmit_policy) {
case BOND_XMIT_POLICY_LAYER2:
ASSERT_GE(diff, NPACKETS,
"expected packets on only one of the interfaces");
break;
case BOND_XMIT_POLICY_LAYER23:
case BOND_XMIT_POLICY_LAYER34:
ASSERT_LT(diff, NPACKETS/2,
"expected even distribution of packets");
break;
default:
PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy);
break;
}
break;
}
case BOND_MODE_ACTIVEBACKUP: {
int veth1_rx = get_rx_packets("veth1_1");
int veth2_rx = get_rx_packets("veth1_2");
int diff = abs(veth1_rx - veth2_rx);
ASSERT_GE(diff, NPACKETS,
"expected packets on only one of the interfaces");
break;
}
default:
PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy);
break;
}
out:
bonding_cleanup(skeletons);
}
/* Test the broadcast redirection using xdp_redirect_map_multi_prog and adding
* all the interfaces to it and checking that broadcasting won't send the packet
* to neither the ingress bond device (bond2) or its slave (veth2_1).
*/
static void test_xdp_bonding_redirect_multi(struct skeletons *skeletons)
{
static const char * const ifaces[] = {"bond2", "veth2_1", "veth2_2"};
int veth1_1_rx, veth1_2_rx;
int err;
if (bonding_setup(skeletons, BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23,
BOND_ONE_NO_ATTACH))
goto out;
if (!ASSERT_OK(setns_by_name("ns_dst"), "could not set netns to ns_dst"))
goto out;
/* populate the devmap with the relevant interfaces */
for (int i = 0; i < ARRAY_SIZE(ifaces); i++) {
int ifindex = if_nametoindex(ifaces[i]);
int map_fd = bpf_map__fd(skeletons->xdp_redirect_multi_kern->maps.map_all);
if (!ASSERT_GT(ifindex, 0, "could not get interface index"))
goto out;
err = bpf_map_update_elem(map_fd, &ifindex, &ifindex, 0);
if (!ASSERT_OK(err, "add interface to map_all"))
goto out;
}
if (xdp_attach(skeletons,
skeletons->xdp_redirect_multi_kern->progs.xdp_redirect_map_multi_prog,
"bond2"))
goto out;
restore_root_netns();
if (send_udp_packets(BOND_MODE_ROUNDROBIN))
goto out;
veth1_1_rx = get_rx_packets("veth1_1");
veth1_2_rx = get_rx_packets("veth1_2");
ASSERT_EQ(veth1_1_rx, 0, "expected no packets on veth1_1");
ASSERT_GE(veth1_2_rx, NPACKETS, "expected packets on veth1_2");
out:
restore_root_netns();
bonding_cleanup(skeletons);
}
/* Test that XDP programs cannot be attached to both the bond master and slaves simultaneously */
static void test_xdp_bonding_attach(struct skeletons *skeletons)
{
struct bpf_link *link = NULL;
struct bpf_link *link2 = NULL;
int veth, bond, err;
if (!ASSERT_OK(system("ip link add veth type veth"), "add veth"))
goto out;
if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
goto out;
veth = if_nametoindex("veth");
if (!ASSERT_GE(veth, 0, "if_nametoindex veth"))
goto out;
bond = if_nametoindex("bond");
if (!ASSERT_GE(bond, 0, "if_nametoindex bond"))
goto out;
/* enslaving with a XDP program loaded is allowed */
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
if (!ASSERT_OK_PTR(link, "attach program to veth"))
goto out;
err = system("ip link set veth master bond");
if (!ASSERT_OK(err, "set veth master"))
goto out;
bpf_link__destroy(link);
link = NULL;
/* attaching to slave when master has no program is allowed */
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
if (!ASSERT_OK_PTR(link, "attach program to slave when enslaved"))
goto out;
/* attaching to master not allowed when slave has program loaded */
link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
if (!ASSERT_ERR_PTR(link2, "attach program to master when slave has program"))
goto out;
bpf_link__destroy(link);
link = NULL;
/* attaching XDP program to master allowed when slave has no program */
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
if (!ASSERT_OK_PTR(link, "attach program to master"))
goto out;
/* attaching to slave not allowed when master has program loaded */
link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
if (!ASSERT_ERR_PTR(link2, "attach program to slave when master has program"))
goto out;
bpf_link__destroy(link);
link = NULL;
/* test program unwinding with a non-XDP slave */
if (!ASSERT_OK(system("ip link add vxlan type vxlan id 1 remote 1.2.3.4 dstport 0 dev lo"),
"add vxlan"))
goto out;
err = system("ip link set vxlan master bond");
if (!ASSERT_OK(err, "set vxlan master"))
goto out;
/* attaching not allowed when one slave does not support XDP */
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
if (!ASSERT_ERR_PTR(link, "attach program to master when slave does not support XDP"))
goto out;
out:
bpf_link__destroy(link);
bpf_link__destroy(link2);
system("ip link del veth");
system("ip link del bond");
system("ip link del vxlan");
}
/* Test with nested bonding devices to catch issue with negative jump label count */
static void test_xdp_bonding_nested(struct skeletons *skeletons)
{
struct bpf_link *link = NULL;
int bond, err;
if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
goto out;
bond = if_nametoindex("bond");
if (!ASSERT_GE(bond, 0, "if_nametoindex bond"))
goto out;
if (!ASSERT_OK(system("ip link add bond_nest1 type bond"), "add bond_nest1"))
goto out;
err = system("ip link set bond_nest1 master bond");
if (!ASSERT_OK(err, "set bond_nest1 master"))
goto out;
if (!ASSERT_OK(system("ip link add bond_nest2 type bond"), "add bond_nest1"))
goto out;
err = system("ip link set bond_nest2 master bond_nest1");
if (!ASSERT_OK(err, "set bond_nest2 master"))
goto out;
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
ASSERT_OK_PTR(link, "attach program to master");
out:
bpf_link__destroy(link);
system("ip link del bond");
system("ip link del bond_nest1");
system("ip link del bond_nest2");
}
static void test_xdp_bonding_features(struct skeletons *skeletons)
{
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
int bond_idx, veth1_idx, err;
struct bpf_link *link = NULL;
if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
goto out;
bond_idx = if_nametoindex("bond");
if (!ASSERT_GE(bond_idx, 0, "if_nametoindex bond"))
goto out;
/* query default xdp-feature for bond device */
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
"bond query_opts.feature_flags"))
goto out;
if (!ASSERT_OK(system("ip link add veth0 type veth peer name veth1"),
"add veth{0,1} pair"))
goto out;
if (!ASSERT_OK(system("ip link add veth2 type veth peer name veth3"),
"add veth{2,3} pair"))
goto out;
if (!ASSERT_OK(system("ip link set veth0 master bond"),
"add veth0 to master bond"))
goto out;
/* xdp-feature for bond device should be obtained from the single slave
* device (veth0)
*/
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG,
"bond query_opts.feature_flags"))
goto out;
veth1_idx = if_nametoindex("veth1");
if (!ASSERT_GE(veth1_idx, 0, "if_nametoindex veth1"))
goto out;
link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog,
veth1_idx);
if (!ASSERT_OK_PTR(link, "attach program to veth1"))
goto out;
/* xdp-feature for veth0 are changed */
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG,
"bond query_opts.feature_flags"))
goto out;
if (!ASSERT_OK(system("ip link set veth2 master bond"),
"add veth2 to master bond"))
goto out;
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
/* xdp-feature for bond device should be set to the most restrict
* value obtained from attached slave devices (veth0 and veth2)
*/
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG,
"bond query_opts.feature_flags"))
goto out;
if (!ASSERT_OK(system("ip link set veth2 nomaster"),
"del veth2 to master bond"))
goto out;
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG,
"bond query_opts.feature_flags"))
goto out;
if (!ASSERT_OK(system("ip link set veth0 nomaster"),
"del veth0 to master bond"))
goto out;
err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
"bond query_opts.feature_flags");
out:
bpf_link__destroy(link);
system("ip link del veth0");
system("ip link del veth2");
system("ip link del bond");
}
static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level != LIBBPF_WARN)
vprintf(format, args);
return 0;
}
struct bond_test_case {
char *name;
int mode;
int xmit_policy;
};
static struct bond_test_case bond_test_cases[] = {
{ "xdp_bonding_roundrobin", BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, },
{ "xdp_bonding_activebackup", BOND_MODE_ACTIVEBACKUP, BOND_XMIT_POLICY_LAYER23 },
{ "xdp_bonding_xor_layer2", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER2, },
{ "xdp_bonding_xor_layer23", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER23, },
{ "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, },
};
void serial_test_xdp_bonding(void)
{
libbpf_print_fn_t old_print_fn;
struct skeletons skeletons = {};
int i;
old_print_fn = libbpf_set_print(libbpf_debug_print);
root_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (!ASSERT_GE(root_netns_fd, 0, "open /proc/self/ns/net"))
goto out;
skeletons.xdp_dummy = xdp_dummy__open_and_load();
if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load"))
goto out;
skeletons.xdp_tx = xdp_tx__open_and_load();
if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load"))
goto out;
skeletons.xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load();
if (!ASSERT_OK_PTR(skeletons.xdp_redirect_multi_kern,
"xdp_redirect_multi_kern__open_and_load"))
goto out;
if (test__start_subtest("xdp_bonding_attach"))
test_xdp_bonding_attach(&skeletons);
if (test__start_subtest("xdp_bonding_nested"))
test_xdp_bonding_nested(&skeletons);
if (test__start_subtest("xdp_bonding_features"))
test_xdp_bonding_features(&skeletons);
for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) {
struct bond_test_case *test_case = &bond_test_cases[i];
if (test__start_subtest(test_case->name))
test_xdp_bonding_with_mode(
&skeletons,
test_case->mode,
test_case->xmit_policy);
}
if (test__start_subtest("xdp_bonding_redirect_multi"))
test_xdp_bonding_redirect_multi(&skeletons);
out:
xdp_dummy__destroy(skeletons.xdp_dummy);
xdp_tx__destroy(skeletons.xdp_tx);
xdp_redirect_multi_kern__destroy(skeletons.xdp_redirect_multi_kern);
libbpf_set_print(old_print_fn);
if (root_netns_fd >= 0)
close(root_netns_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_bonding.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_stack_var_off.skel.h"
/* Test read and writes to the stack performed with offsets that are not
* statically known.
*/
void test_stack_var_off(void)
{
int duration = 0;
struct test_stack_var_off *skel;
skel = test_stack_var_off__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
/* Give pid to bpf prog so it doesn't trigger for anyone else. */
skel->bss->test_pid = getpid();
/* Initialize the probe's input. */
skel->bss->input[0] = 2;
skel->bss->input[1] = 42; /* This will be returned in probe_res. */
if (!ASSERT_OK(test_stack_var_off__attach(skel), "skel_attach"))
goto cleanup;
/* Trigger probe. */
usleep(1);
if (CHECK(skel->bss->probe_res != 42, "check_probe_res",
"wrong probe res: %d\n", skel->bss->probe_res))
goto cleanup;
cleanup:
test_stack_var_off__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stack_var_off.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <test_progs.h>
#include "cgrp_ls_tp_btf.skel.h"
#include "cgrp_ls_recursion.skel.h"
#include "cgrp_ls_attach_cgroup.skel.h"
#include "cgrp_ls_negative.skel.h"
#include "cgrp_ls_sleepable.skel.h"
#include "network_helpers.h"
#include "cgroup_helpers.h"
struct socket_cookie {
__u64 cookie_key;
__u64 cookie_value;
};
static void test_tp_btf(int cgroup_fd)
{
struct cgrp_ls_tp_btf *skel;
long val1 = 1, val2 = 0;
int err;
skel = cgrp_ls_tp_btf__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
/* populate a value in map_b */
err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
if (!ASSERT_OK(err, "map_update_elem"))
goto out;
/* check value */
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
if (!ASSERT_OK(err, "map_lookup_elem"))
goto out;
if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
goto out;
/* delete value */
err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
if (!ASSERT_OK(err, "map_delete_elem"))
goto out;
skel->bss->target_pid = syscall(SYS_gettid);
err = cgrp_ls_tp_btf__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_gettid);
syscall(SYS_gettid);
skel->bss->target_pid = 0;
/* 3x syscalls: 1x attach and 2x gettid */
ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
out:
cgrp_ls_tp_btf__destroy(skel);
}
static void test_attach_cgroup(int cgroup_fd)
{
int server_fd = 0, client_fd = 0, err = 0;
socklen_t addr_len = sizeof(struct sockaddr_in6);
struct cgrp_ls_attach_cgroup *skel;
__u32 cookie_expected_value;
struct sockaddr_in6 addr;
struct socket_cookie val;
skel = cgrp_ls_attach_cgroup__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->links.set_cookie = bpf_program__attach_cgroup(
skel->progs.set_cookie, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
goto out;
skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
skel->progs.update_cookie_sockops, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
goto out;
skel->links.update_cookie_tracing = bpf_program__attach(
skel->progs.update_cookie_tracing);
if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
goto out;
server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server"))
goto out;
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
goto close_server_fd;
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
&cgroup_fd, &val);
if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
goto close_client_fd;
err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
if (!ASSERT_OK(err, "getsockname"))
goto close_client_fd;
cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
close_client_fd:
close(client_fd);
close_server_fd:
close(server_fd);
out:
cgrp_ls_attach_cgroup__destroy(skel);
}
static void test_recursion(int cgroup_fd)
{
struct cgrp_ls_recursion *skel;
int err;
skel = cgrp_ls_recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = cgrp_ls_recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* trigger sys_enter, make sure it does not cause deadlock */
syscall(SYS_gettid);
out:
cgrp_ls_recursion__destroy(skel);
}
static void test_negative(void)
{
struct cgrp_ls_negative *skel;
skel = cgrp_ls_negative__open_and_load();
if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
cgrp_ls_negative__destroy(skel);
return;
}
}
static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
union bpf_iter_link_info linfo;
struct cgrp_ls_sleepable *skel;
struct bpf_link *link;
int err, iter_fd;
char buf[16];
skel = cgrp_ls_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.cgroup_iter, true);
err = cgrp_ls_sleepable__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = cgroup_fd;
linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
goto out;
/* trigger the program run */
(void)read(iter_fd, buf, sizeof(buf));
ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
close(iter_fd);
out:
cgrp_ls_sleepable__destroy(skel);
}
static void test_yes_rcu_lock(__u64 cgroup_id)
{
struct cgrp_ls_sleepable *skel;
int err;
skel = cgrp_ls_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
err = cgrp_ls_sleepable__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
out:
cgrp_ls_sleepable__destroy(skel);
}
static void test_no_rcu_lock(void)
{
struct cgrp_ls_sleepable *skel;
int err;
skel = cgrp_ls_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);
ASSERT_ERR(err, "skel_load");
cgrp_ls_sleepable__destroy(skel);
}
void test_cgrp_local_storage(void)
{
__u64 cgroup_id;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/cgrp_local_storage");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
return;
cgroup_id = get_cgroup_id("/cgrp_local_storage");
if (test__start_subtest("tp_btf"))
test_tp_btf(cgroup_fd);
if (test__start_subtest("attach_cgroup"))
test_attach_cgroup(cgroup_fd);
if (test__start_subtest("recursion"))
test_recursion(cgroup_fd);
if (test__start_subtest("negative"))
test_negative();
if (test__start_subtest("cgroup_iter_sleepable"))
test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
if (test__start_subtest("yes_rcu_lock"))
test_yes_rcu_lock(cgroup_id);
if (test__start_subtest("no_rcu_lock"))
test_no_rcu_lock();
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include <linux/tcp.h>
#include <linux/netlink.h>
#include "sockopt_sk.skel.h"
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
static int getsetsockopt(void)
{
int fd, err;
union {
char u8[4];
__u32 u32;
char cc[16]; /* TCP_CA_NAME_MAX */
struct tcp_zerocopy_receive zc;
} buf = {};
socklen_t optlen;
char *big_buf = NULL;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
log_err("Failed to create socket");
return -1;
}
/* IP_TOS - BPF bypass */
optlen = getpagesize() * 2;
big_buf = calloc(1, optlen);
if (!big_buf) {
log_err("Couldn't allocate two pages");
goto err;
}
*(int *)big_buf = 0x08;
err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen);
if (err) {
log_err("Failed to call setsockopt(IP_TOS)");
goto err;
}
memset(big_buf, 0, optlen);
optlen = 1;
err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto err;
}
if (*big_buf != 0x08) {
log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
(int)*big_buf);
goto err;
}
/* IP_TTL - EPERM */
buf.u8[0] = 1;
err = setsockopt(fd, SOL_IP, IP_TTL, &buf, 1);
if (!err || errno != EPERM) {
log_err("Unexpected success from setsockopt(IP_TTL)");
goto err;
}
/* SOL_CUSTOM - handled by BPF */
buf.u8[0] = 0x01;
err = setsockopt(fd, SOL_CUSTOM, 0, &buf, 1);
if (err) {
log_err("Failed to call setsockopt");
goto err;
}
buf.u32 = 0x00;
optlen = 4;
err = getsockopt(fd, SOL_CUSTOM, 0, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt");
goto err;
}
if (optlen != 1) {
log_err("Unexpected optlen %d != 1", optlen);
goto err;
}
if (buf.u8[0] != 0x01) {
log_err("Unexpected buf[0] 0x%02x != 0x01", buf.u8[0]);
goto err;
}
/* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */
optlen = getpagesize() * 2;
memset(big_buf, 0, optlen);
err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen);
if (err != 0) {
log_err("Failed to call setsockopt, ret=%d", err);
goto err;
}
err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen);
if (err != 0) {
log_err("Failed to call getsockopt, ret=%d", err);
goto err;
}
if (optlen != 1 || *(__u8 *)big_buf != 0x55) {
log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x",
optlen, *(__u8 *)big_buf);
}
/* SO_SNDBUF is overwritten */
buf.u32 = 0x01010101;
err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, 4);
if (err) {
log_err("Failed to call setsockopt(SO_SNDBUF)");
goto err;
}
buf.u32 = 0x00;
optlen = 4;
err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(SO_SNDBUF)");
goto err;
}
if (buf.u32 != 0x55AA*2) {
log_err("Unexpected getsockopt(SO_SNDBUF) 0x%x != 0x55AA*2",
buf.u32);
goto err;
}
/* TCP_CONGESTION can extend the string */
strcpy(buf.cc, "nv");
err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv"));
if (err) {
log_err("Failed to call setsockopt(TCP_CONGESTION)");
goto err;
}
optlen = sizeof(buf.cc);
err = getsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(TCP_CONGESTION)");
goto err;
}
if (strcmp(buf.cc, "cubic") != 0) {
log_err("Unexpected getsockopt(TCP_CONGESTION) %s != %s",
buf.cc, "cubic");
goto err;
}
/* TCP_ZEROCOPY_RECEIVE triggers */
memset(&buf, 0, sizeof(buf));
optlen = sizeof(buf.zc);
err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
if (err) {
log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
err, errno);
goto err;
}
memset(&buf, 0, sizeof(buf));
buf.zc.address = 12345; /* Not page aligned. Rejected by tcp_zerocopy_receive() */
optlen = sizeof(buf.zc);
errno = 0;
err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
if (errno != EINVAL) {
log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
err, errno);
goto err;
}
/* optval=NULL case is handled correctly */
close(fd);
fd = socket(AF_NETLINK, SOCK_RAW, 0);
if (fd < 0) {
log_err("Failed to create AF_NETLINK socket");
return -1;
}
buf.u32 = 1;
optlen = sizeof(__u32);
err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen);
if (err) {
log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d",
err, errno);
goto err;
}
optlen = 0;
err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen);
if (err) {
log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d",
err, errno);
goto err;
}
ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
free(big_buf);
close(fd);
return 0;
err:
free(big_buf);
close(fd);
return -1;
}
static void run_test(int cgroup_fd)
{
struct sockopt_sk *skel;
skel = sockopt_sk__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
skel->bss->page_size = getpagesize();
skel->links._setsockopt =
bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link"))
goto cleanup;
skel->links._getsockopt =
bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
goto cleanup;
ASSERT_OK(getsetsockopt(), "getsetsockopt");
cleanup:
sockopt_sk__destroy(skel);
}
void test_sockopt_sk(void)
{
int cgroup_fd;
cgroup_fd = test__join_cgroup("/sockopt_sk");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /sockopt_sk"))
return;
run_test(cgroup_fd);
close(cgroup_fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockopt_sk.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
void serial_test_fexit_stress(void)
{
int bpf_max_tramp_links, err, i;
int *fd, *fexit_fd, *link_fd;
bpf_max_tramp_links = get_bpf_max_tramp_links();
if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
return;
fd = calloc(bpf_max_tramp_links * 2, sizeof(*fd));
if (!ASSERT_OK_PTR(fd, "fd"))
return;
fexit_fd = fd;
link_fd = fd + bpf_max_tramp_links;
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.expected_attach_type = BPF_TRACE_FEXIT,
);
LIBBPF_OPTS(bpf_test_run_opts, topts);
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
trace_opts.expected_attach_type);
if (!ASSERT_GT(err, 0, "find_vmlinux_btf_id"))
goto out;
trace_opts.attach_btf_id = err;
for (i = 0; i < bpf_max_tramp_links; i++) {
fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
sizeof(trace_program) / sizeof(struct bpf_insn),
&trace_opts);
if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL);
if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))
goto out;
}
err = bpf_prog_test_run_opts(fexit_fd[0], &topts);
ASSERT_OK(err, "bpf_prog_test_run_opts");
out:
for (i = 0; i < bpf_max_tramp_links; i++) {
if (link_fd[i])
close(link_fd[i]);
if (fexit_fd[i])
close(fexit_fd[i]);
}
free(fd);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/fexit_stress.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
#include "btf_helpers.h"
static void gen_btf(struct btf *btf)
{
const struct btf_var_secinfo *vi;
const struct btf_type *t;
const struct btf_member *m;
const struct btf_enum64 *v64;
const struct btf_enum *v;
const struct btf_param *p;
int id, err, str_off;
str_off = btf__find_str(btf, "int");
ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off");
str_off = btf__add_str(btf, "int");
ASSERT_EQ(str_off, 1, "int_str_off");
str_off = btf__find_str(btf, "int");
ASSERT_EQ(str_off, 1, "int_str_found_off");
/* BTF_KIND_INT */
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
ASSERT_EQ(id, 1, "int_id");
t = btf__type_by_id(btf, 1);
/* should re-use previously added "int" string */
ASSERT_EQ(t->name_off, str_off, "int_name_off");
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "int", "int_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_INT, "int_kind");
ASSERT_EQ(t->size, 4, "int_sz");
ASSERT_EQ(btf_int_encoding(t), BTF_INT_SIGNED, "int_enc");
ASSERT_EQ(btf_int_bits(t), 32, "int_bits");
ASSERT_STREQ(btf_type_raw_dump(btf, 1),
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", "raw_dump");
/* invalid int size */
id = btf__add_int(btf, "bad sz int", 7, 0);
ASSERT_ERR(id, "int_bad_sz");
/* invalid encoding */
id = btf__add_int(btf, "bad enc int", 4, 123);
ASSERT_ERR(id, "int_bad_enc");
/* NULL name */
id = btf__add_int(btf, NULL, 4, 0);
ASSERT_ERR(id, "int_bad_null_name");
/* empty name */
id = btf__add_int(btf, "", 4, 0);
ASSERT_ERR(id, "int_bad_empty_name");
/* PTR/CONST/VOLATILE/RESTRICT */
id = btf__add_ptr(btf, 1);
ASSERT_EQ(id, 2, "ptr_id");
t = btf__type_by_id(btf, 2);
ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_kind");
ASSERT_EQ(t->type, 1, "ptr_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 2),
"[2] PTR '(anon)' type_id=1", "raw_dump");
id = btf__add_const(btf, 5); /* points forward to restrict */
ASSERT_EQ(id, 3, "const_id");
t = btf__type_by_id(btf, 3);
ASSERT_EQ(btf_kind(t), BTF_KIND_CONST, "const_kind");
ASSERT_EQ(t->type, 5, "const_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 3),
"[3] CONST '(anon)' type_id=5", "raw_dump");
id = btf__add_volatile(btf, 3);
ASSERT_EQ(id, 4, "volatile_id");
t = btf__type_by_id(btf, 4);
ASSERT_EQ(btf_kind(t), BTF_KIND_VOLATILE, "volatile_kind");
ASSERT_EQ(t->type, 3, "volatile_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 4),
"[4] VOLATILE '(anon)' type_id=3", "raw_dump");
id = btf__add_restrict(btf, 4);
ASSERT_EQ(id, 5, "restrict_id");
t = btf__type_by_id(btf, 5);
ASSERT_EQ(btf_kind(t), BTF_KIND_RESTRICT, "restrict_kind");
ASSERT_EQ(t->type, 4, "restrict_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 5),
"[5] RESTRICT '(anon)' type_id=4", "raw_dump");
/* ARRAY */
id = btf__add_array(btf, 1, 2, 10); /* int *[10] */
ASSERT_EQ(id, 6, "array_id");
t = btf__type_by_id(btf, 6);
ASSERT_EQ(btf_kind(t), BTF_KIND_ARRAY, "array_kind");
ASSERT_EQ(btf_array(t)->index_type, 1, "array_index_type");
ASSERT_EQ(btf_array(t)->type, 2, "array_elem_type");
ASSERT_EQ(btf_array(t)->nelems, 10, "array_nelems");
ASSERT_STREQ(btf_type_raw_dump(btf, 6),
"[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", "raw_dump");
/* STRUCT */
err = btf__add_field(btf, "field", 1, 0, 0);
ASSERT_ERR(err, "no_struct_field");
id = btf__add_struct(btf, "s1", 8);
ASSERT_EQ(id, 7, "struct_id");
err = btf__add_field(btf, "f1", 1, 0, 0);
ASSERT_OK(err, "f1_res");
err = btf__add_field(btf, "f2", 1, 32, 16);
ASSERT_OK(err, "f2_res");
t = btf__type_by_id(btf, 7);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "s1", "struct_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_STRUCT, "struct_kind");
ASSERT_EQ(btf_vlen(t), 2, "struct_vlen");
ASSERT_EQ(btf_kflag(t), true, "struct_kflag");
ASSERT_EQ(t->size, 8, "struct_sz");
m = btf_members(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name");
ASSERT_EQ(m->type, 1, "f1_type");
ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off");
ASSERT_EQ(btf_member_bitfield_size(t, 0), 0, "f1_bit_sz");
m = btf_members(t) + 1;
ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f2", "f2_name");
ASSERT_EQ(m->type, 1, "f2_type");
ASSERT_EQ(btf_member_bit_offset(t, 1), 32, "f2_bit_off");
ASSERT_EQ(btf_member_bitfield_size(t, 1), 16, "f2_bit_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 7),
"[7] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16", "raw_dump");
/* UNION */
id = btf__add_union(btf, "u1", 8);
ASSERT_EQ(id, 8, "union_id");
/* invalid, non-zero offset */
err = btf__add_field(btf, "field", 1, 1, 0);
ASSERT_ERR(err, "no_struct_field");
err = btf__add_field(btf, "f1", 1, 0, 16);
ASSERT_OK(err, "f1_res");
t = btf__type_by_id(btf, 8);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "u1", "union_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_UNION, "union_kind");
ASSERT_EQ(btf_vlen(t), 1, "union_vlen");
ASSERT_EQ(btf_kflag(t), true, "union_kflag");
ASSERT_EQ(t->size, 8, "union_sz");
m = btf_members(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name");
ASSERT_EQ(m->type, 1, "f1_type");
ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off");
ASSERT_EQ(btf_member_bitfield_size(t, 0), 16, "f1_bit_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 8),
"[8] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16", "raw_dump");
/* ENUM */
id = btf__add_enum(btf, "e1", 4);
ASSERT_EQ(id, 9, "enum_id");
err = btf__add_enum_value(btf, "v1", 1);
ASSERT_OK(err, "v1_res");
err = btf__add_enum_value(btf, "v2", 2);
ASSERT_OK(err, "v2_res");
t = btf__type_by_id(btf, 9);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_kind");
ASSERT_EQ(btf_vlen(t), 2, "enum_vlen");
ASSERT_EQ(t->size, 4, "enum_sz");
v = btf_enum(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v1", "v1_name");
ASSERT_EQ(v->val, 1, "v1_val");
v = btf_enum(t) + 1;
ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name");
ASSERT_EQ(v->val, 2, "v2_val");
ASSERT_STREQ(btf_type_raw_dump(btf, 9),
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2", "raw_dump");
/* FWDs */
id = btf__add_fwd(btf, "struct_fwd", BTF_FWD_STRUCT);
ASSERT_EQ(id, 10, "struct_fwd_id");
t = btf__type_by_id(btf, 10);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "struct_fwd", "fwd_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind");
ASSERT_EQ(btf_kflag(t), 0, "fwd_kflag");
ASSERT_STREQ(btf_type_raw_dump(btf, 10),
"[10] FWD 'struct_fwd' fwd_kind=struct", "raw_dump");
id = btf__add_fwd(btf, "union_fwd", BTF_FWD_UNION);
ASSERT_EQ(id, 11, "union_fwd_id");
t = btf__type_by_id(btf, 11);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "union_fwd", "fwd_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind");
ASSERT_EQ(btf_kflag(t), 1, "fwd_kflag");
ASSERT_STREQ(btf_type_raw_dump(btf, 11),
"[11] FWD 'union_fwd' fwd_kind=union", "raw_dump");
id = btf__add_fwd(btf, "enum_fwd", BTF_FWD_ENUM);
ASSERT_EQ(id, 12, "enum_fwd_id");
t = btf__type_by_id(btf, 12);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "enum_fwd", "fwd_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_fwd_kind");
ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind");
ASSERT_EQ(t->size, 4, "enum_fwd_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 12),
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", "raw_dump");
/* TYPEDEF */
id = btf__add_typedef(btf, "typedef1", 1);
ASSERT_EQ(id, 13, "typedef_fwd_id");
t = btf__type_by_id(btf, 13);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "typedef1", "typedef_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_TYPEDEF, "typedef_kind");
ASSERT_EQ(t->type, 1, "typedef_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 13),
"[13] TYPEDEF 'typedef1' type_id=1", "raw_dump");
/* FUNC & FUNC_PROTO */
id = btf__add_func(btf, "func1", BTF_FUNC_GLOBAL, 15);
ASSERT_EQ(id, 14, "func_id");
t = btf__type_by_id(btf, 14);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "func1", "func_name");
ASSERT_EQ(t->type, 15, "func_type");
ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC, "func_kind");
ASSERT_EQ(btf_vlen(t), BTF_FUNC_GLOBAL, "func_vlen");
ASSERT_STREQ(btf_type_raw_dump(btf, 14),
"[14] FUNC 'func1' type_id=15 linkage=global", "raw_dump");
id = btf__add_func_proto(btf, 1);
ASSERT_EQ(id, 15, "func_proto_id");
err = btf__add_func_param(btf, "p1", 1);
ASSERT_OK(err, "p1_res");
err = btf__add_func_param(btf, "p2", 2);
ASSERT_OK(err, "p2_res");
t = btf__type_by_id(btf, 15);
ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC_PROTO, "func_proto_kind");
ASSERT_EQ(btf_vlen(t), 2, "func_proto_vlen");
ASSERT_EQ(t->type, 1, "func_proto_ret_type");
p = btf_params(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p1", "p1_name");
ASSERT_EQ(p->type, 1, "p1_type");
p = btf_params(t) + 1;
ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p2", "p2_name");
ASSERT_EQ(p->type, 2, "p2_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 15),
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
"\t'p1' type_id=1\n"
"\t'p2' type_id=2", "raw_dump");
/* VAR */
id = btf__add_var(btf, "var1", BTF_VAR_GLOBAL_ALLOCATED, 1);
ASSERT_EQ(id, 16, "var_id");
t = btf__type_by_id(btf, 16);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "var1", "var_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_VAR, "var_kind");
ASSERT_EQ(t->type, 1, "var_type");
ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 16),
"[16] VAR 'var1' type_id=1, linkage=global-alloc", "raw_dump");
/* DATASECT */
id = btf__add_datasec(btf, "datasec1", 12);
ASSERT_EQ(id, 17, "datasec_id");
err = btf__add_datasec_var_info(btf, 1, 4, 8);
ASSERT_OK(err, "v1_res");
t = btf__type_by_id(btf, 17);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "datasec1", "datasec_name");
ASSERT_EQ(t->size, 12, "datasec_sz");
ASSERT_EQ(btf_kind(t), BTF_KIND_DATASEC, "datasec_kind");
ASSERT_EQ(btf_vlen(t), 1, "datasec_vlen");
vi = btf_var_secinfos(t) + 0;
ASSERT_EQ(vi->type, 1, "v1_type");
ASSERT_EQ(vi->offset, 4, "v1_off");
ASSERT_EQ(vi->size, 8, "v1_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 17),
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8", "raw_dump");
/* DECL_TAG */
id = btf__add_decl_tag(btf, "tag1", 16, -1);
ASSERT_EQ(id, 18, "tag_id");
t = btf__type_by_id(btf, 18);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind");
ASSERT_EQ(t->type, 16, "tag_type");
ASSERT_EQ(btf_decl_tag(t)->component_idx, -1, "tag_component_idx");
ASSERT_STREQ(btf_type_raw_dump(btf, 18),
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "raw_dump");
id = btf__add_decl_tag(btf, "tag2", 14, 1);
ASSERT_EQ(id, 19, "tag_id");
t = btf__type_by_id(btf, 19);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value");
ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind");
ASSERT_EQ(t->type, 14, "tag_type");
ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx");
ASSERT_STREQ(btf_type_raw_dump(btf, 19),
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump");
/* TYPE_TAG */
id = btf__add_type_tag(btf, "tag1", 1);
ASSERT_EQ(id, 20, "tag_id");
t = btf__type_by_id(btf, 20);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind");
ASSERT_EQ(t->type, 1, "tag_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 20),
"[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
/* ENUM64 */
id = btf__add_enum64(btf, "e1", 8, true);
ASSERT_EQ(id, 21, "enum64_id");
err = btf__add_enum64_value(btf, "v1", -1);
ASSERT_OK(err, "v1_res");
err = btf__add_enum64_value(btf, "v2", 0x123456789); /* 4886718345 */
ASSERT_OK(err, "v2_res");
t = btf__type_by_id(btf, 21);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
ASSERT_EQ(btf_vlen(t), 2, "enum64_vlen");
ASSERT_EQ(t->size, 8, "enum64_sz");
v64 = btf_enum64(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
v64 = btf_enum64(t) + 1;
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v2", "v2_name");
ASSERT_EQ(v64->val_hi32, 0x1, "v2_val");
ASSERT_EQ(v64->val_lo32, 0x23456789, "v2_val");
ASSERT_STREQ(btf_type_raw_dump(btf, 21),
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
"\t'v1' val=-1\n"
"\t'v2' val=4886718345", "raw_dump");
id = btf__add_enum64(btf, "e1", 8, false);
ASSERT_EQ(id, 22, "enum64_id");
err = btf__add_enum64_value(btf, "v1", 0xffffffffFFFFFFFF); /* 18446744073709551615 */
ASSERT_OK(err, "v1_res");
t = btf__type_by_id(btf, 22);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
ASSERT_EQ(btf_vlen(t), 1, "enum64_vlen");
ASSERT_EQ(t->size, 8, "enum64_sz");
v64 = btf_enum64(t) + 0;
ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
ASSERT_STREQ(btf_type_raw_dump(btf, 22),
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
"\t'v1' val=18446744073709551615", "raw_dump");
}
static void test_btf_add()
{
struct btf *btf;
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "new_empty"))
return;
gen_btf(btf);
VALIDATE_RAW_BTF(
btf,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=1",
"[3] CONST '(anon)' type_id=5",
"[4] VOLATILE '(anon)' type_id=3",
"[5] RESTRICT '(anon)' type_id=4",
"[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10",
"[7] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
"[8] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[10] FWD 'struct_fwd' fwd_kind=struct",
"[11] FWD 'union_fwd' fwd_kind=union",
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
"[13] TYPEDEF 'typedef1' type_id=1",
"[14] FUNC 'func1' type_id=15 linkage=global",
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
"\t'p1' type_id=1\n"
"\t'p2' type_id=2",
"[16] VAR 'var1' type_id=1, linkage=global-alloc",
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
"[20] TYPE_TAG 'tag1' type_id=1",
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
"\t'v1' val=-1\n"
"\t'v2' val=4886718345",
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
"\t'v1' val=18446744073709551615");
btf__free(btf);
}
static void test_btf_add_btf()
{
struct btf *btf1 = NULL, *btf2 = NULL;
int id;
btf1 = btf__new_empty();
if (!ASSERT_OK_PTR(btf1, "btf1"))
return;
btf2 = btf__new_empty();
if (!ASSERT_OK_PTR(btf2, "btf2"))
goto cleanup;
gen_btf(btf1);
gen_btf(btf2);
id = btf__add_btf(btf1, btf2);
if (!ASSERT_EQ(id, 23, "id"))
goto cleanup;
VALIDATE_RAW_BTF(
btf1,
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[2] PTR '(anon)' type_id=1",
"[3] CONST '(anon)' type_id=5",
"[4] VOLATILE '(anon)' type_id=3",
"[5] RESTRICT '(anon)' type_id=4",
"[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10",
"[7] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=1 bits_offset=0\n"
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
"[8] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
"[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[10] FWD 'struct_fwd' fwd_kind=struct",
"[11] FWD 'union_fwd' fwd_kind=union",
"[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
"[13] TYPEDEF 'typedef1' type_id=1",
"[14] FUNC 'func1' type_id=15 linkage=global",
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
"\t'p1' type_id=1\n"
"\t'p2' type_id=2",
"[16] VAR 'var1' type_id=1, linkage=global-alloc",
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
"[20] TYPE_TAG 'tag1' type_id=1",
"[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
"\t'v1' val=-1\n"
"\t'v2' val=4886718345",
"[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
"\t'v1' val=18446744073709551615",
/* types appended from the second BTF */
"[23] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[24] PTR '(anon)' type_id=23",
"[25] CONST '(anon)' type_id=27",
"[26] VOLATILE '(anon)' type_id=25",
"[27] RESTRICT '(anon)' type_id=26",
"[28] ARRAY '(anon)' type_id=24 index_type_id=23 nr_elems=10",
"[29] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=23 bits_offset=0\n"
"\t'f2' type_id=23 bits_offset=32 bitfield_size=16",
"[30] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=23 bits_offset=0 bitfield_size=16",
"[31] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[32] FWD 'struct_fwd' fwd_kind=struct",
"[33] FWD 'union_fwd' fwd_kind=union",
"[34] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
"[35] TYPEDEF 'typedef1' type_id=23",
"[36] FUNC 'func1' type_id=37 linkage=global",
"[37] FUNC_PROTO '(anon)' ret_type_id=23 vlen=2\n"
"\t'p1' type_id=23\n"
"\t'p2' type_id=24",
"[38] VAR 'var1' type_id=23, linkage=global-alloc",
"[39] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=23 offset=4 size=8",
"[40] DECL_TAG 'tag1' type_id=38 component_idx=-1",
"[41] DECL_TAG 'tag2' type_id=36 component_idx=1",
"[42] TYPE_TAG 'tag1' type_id=23",
"[43] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
"\t'v1' val=-1\n"
"\t'v2' val=4886718345",
"[44] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
"\t'v1' val=18446744073709551615");
cleanup:
btf__free(btf1);
btf__free(btf2);
}
void test_btf_write()
{
if (test__start_subtest("btf_add"))
test_btf_add();
if (test__start_subtest("btf_add_btf"))
test_btf_add_btf();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_write.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_global_data_init(void)
{
const char *file = "./test_global_data.bpf.o";
int err = -ENOMEM, map_fd, zero = 0;
__u8 *buff = NULL, *newval = NULL;
struct bpf_object *obj;
struct bpf_map *map;
__u32 duration = 0;
size_t sz;
obj = bpf_object__open_file(file, NULL);
err = libbpf_get_error(obj);
if (CHECK_FAIL(err))
return;
map = bpf_object__find_map_by_name(obj, ".rodata");
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
goto out;
sz = bpf_map__value_size(map);
newval = malloc(sz);
if (CHECK_FAIL(!newval))
goto out;
memset(newval, 0, sz);
/* wrong size, should fail */
err = bpf_map__set_initial_value(map, newval, sz - 1);
if (CHECK(!err, "reject set initial value wrong size", "err %d\n", err))
goto out;
err = bpf_map__set_initial_value(map, newval, sz);
if (CHECK(err, "set initial value", "err %d\n", err))
goto out;
err = bpf_object__load(obj);
if (CHECK_FAIL(err))
goto out;
map_fd = bpf_map__fd(map);
if (CHECK_FAIL(map_fd < 0))
goto out;
buff = malloc(sz);
if (buff)
err = bpf_map_lookup_elem(map_fd, &zero, buff);
if (CHECK(!buff || err || memcmp(buff, newval, sz),
"compare .rodata map data override",
"err %d errno %d\n", err, errno))
goto out;
memset(newval, 1, sz);
/* object loaded - should fail */
err = bpf_map__set_initial_value(map, newval, sz);
CHECK(!err, "reject set initial value after load", "err %d\n", err);
out:
free(buff);
free(newval);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/global_data_init.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Tests for libbpf's hashmap.
*
* Copyright (c) 2019 Facebook
*/
#include "test_progs.h"
#include "bpf/hashmap.h"
#include <stddef.h>
static int duration = 0;
static size_t hash_fn(long k, void *ctx)
{
return k;
}
static bool equal_fn(long a, long b, void *ctx)
{
return a == b;
}
static inline size_t next_pow_2(size_t n)
{
size_t r = 1;
while (r < n)
r <<= 1;
return r;
}
static inline size_t exp_cap(size_t sz)
{
size_t r = next_pow_2(sz);
if (sz * 4 / 3 > r)
r <<= 1;
return r;
}
#define ELEM_CNT 62
static void test_hashmap_generic(void)
{
struct hashmap_entry *entry, *tmp;
int err, bkt, found_cnt, i;
long long found_msk;
struct hashmap *map;
map = hashmap__new(hash_fn, equal_fn, NULL);
if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
for (i = 0; i < ELEM_CNT; i++) {
long oldk, k = i;
long oldv, v = 1024 + i;
err = hashmap__update(map, k, v, &oldk, &oldv);
if (CHECK(err != -ENOENT, "hashmap__update",
"unexpected result: %d\n", err))
goto cleanup;
if (i % 2) {
err = hashmap__add(map, k, v);
} else {
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(oldk != 0 || oldv != 0, "check_kv",
"unexpected k/v: %ld=%ld\n", oldk, oldv))
goto cleanup;
}
if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
"failed to find key %ld\n", k))
goto cleanup;
if (CHECK(oldv != v, "elem_val", "found value is wrong: %ld\n", oldv))
goto cleanup;
}
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
"invalid map size: %zu\n", hashmap__size(map)))
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap_cap",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
goto cleanup;
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
long k = entry->key;
long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 1024, "check_kv",
"invalid k/v pair: %ld = %ld\n", k, v))
goto cleanup;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt",
"not all keys iterated: %llx\n", found_msk))
goto cleanup;
for (i = 0; i < ELEM_CNT; i++) {
long oldk, k = i;
long oldv, v = 256 + i;
err = hashmap__add(map, k, v);
if (CHECK(err != -EEXIST, "hashmap__add",
"unexpected add result: %d\n", err))
goto cleanup;
if (i % 2)
err = hashmap__update(map, k, v, &oldk, &oldv);
else
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(err, "elem_upd",
"failed to update k/v %ld = %ld: %d\n",
k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
"failed to find key %ld\n", k))
goto cleanup;
if (CHECK(oldv != v, "elem_val",
"found value is wrong: %ld\n", oldv))
goto cleanup;
}
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
"invalid updated map size: %zu\n", hashmap__size(map)))
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap__capacity",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
goto cleanup;
found_msk = 0;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
long k = entry->key;
long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 256, "elem_check",
"invalid updated k/v pair: %ld = %ld\n", k, v))
goto cleanup;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt",
"not all keys iterated after update: %llx\n", found_msk))
goto cleanup;
found_cnt = 0;
hashmap__for_each_key_entry(map, entry, 0) {
found_cnt++;
}
if (CHECK(!found_cnt, "found_cnt",
"didn't find any entries for key 0\n"))
goto cleanup;
found_msk = 0;
found_cnt = 0;
hashmap__for_each_key_entry_safe(map, entry, tmp, 0) {
long oldk, k;
long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "check_old",
"invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n",
k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"unexpectedly deleted k/v %ld = %ld\n", oldk, oldv))
goto cleanup;
}
if (CHECK(!found_cnt || !found_msk, "found_entries",
"didn't delete any key entries\n"))
goto cleanup;
if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, "elem_cnt",
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap__capacity",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
goto cleanup;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
long oldk, k;
long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "elem_check",
"invalid old k/v: expect %ld = %ld, got %ld = %ld\n",
k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"unexpectedly deleted k/v %ld = %ld\n", k, v))
goto cleanup;
}
if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1,
"found_cnt",
"not all keys were deleted: found_cnt:%d, found_msk:%llx\n",
found_cnt, found_msk))
goto cleanup;
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
goto cleanup;
found_cnt = 0;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
entry->key, entry->value);
goto cleanup;
}
hashmap__clear(map);
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
entry->key, entry->value);
goto cleanup;
}
cleanup:
hashmap__free(map);
}
static size_t str_hash_fn(long a, void *ctx)
{
return str_hash((char *)a);
}
static bool str_equal_fn(long a, long b, void *ctx)
{
return strcmp((char *)a, (char *)b) == 0;
}
/* Verify that hashmap interface works with pointer keys and values */
static void test_hashmap_ptr_iface(void)
{
const char *key, *value, *old_key, *old_value;
struct hashmap_entry *cur;
struct hashmap *map;
int err, i, bkt;
map = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n"))
goto cleanup;
#define CHECK_STR(fn, var, expected) \
CHECK(strcmp(var, (expected)), (fn), \
"wrong value of " #var ": '%s' instead of '%s'\n", var, (expected))
err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL);
if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
goto cleanup;
err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value);
if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
goto cleanup;
CHECK_STR("hashmap__update", old_key, "a");
CHECK_STR("hashmap__update", old_value, "apricot");
err = hashmap__add(map, "b", "banana");
if (CHECK(err, "hashmap__add", "unexpected error: %d\n", err))
goto cleanup;
err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value);
if (CHECK(err, "hashmap__set", "unexpected error: %d\n", err))
goto cleanup;
CHECK_STR("hashmap__set", old_key, "b");
CHECK_STR("hashmap__set", old_value, "banana");
err = hashmap__update(map, "b", "blueberry", &old_key, &old_value);
if (CHECK(err, "hashmap__update", "unexpected error: %d\n", err))
goto cleanup;
CHECK_STR("hashmap__update", old_key, "b");
CHECK_STR("hashmap__update", old_value, "breadfruit");
err = hashmap__append(map, "c", "cherry");
if (CHECK(err, "hashmap__append", "unexpected error: %d\n", err))
goto cleanup;
if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value),
"hashmap__delete", "expected to have entry for 'c'\n"))
goto cleanup;
CHECK_STR("hashmap__delete", old_key, "c");
CHECK_STR("hashmap__delete", old_value, "cherry");
CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n");
CHECK_STR("hashmap__find", value, "blueberry");
if (CHECK(!hashmap__delete(map, "b", NULL, NULL),
"hashmap__delete", "expected to have entry for 'b'\n"))
goto cleanup;
i = 0;
hashmap__for_each_entry(map, cur, bkt) {
if (CHECK(i != 0, "hashmap__for_each_entry", "too many entries"))
goto cleanup;
key = cur->pkey;
value = cur->pvalue;
CHECK_STR("entry", key, "a");
CHECK_STR("entry", value, "apple");
i++;
}
#undef CHECK_STR
cleanup:
hashmap__free(map);
}
static size_t collision_hash_fn(long k, void *ctx)
{
return 0;
}
static void test_hashmap_multimap(void)
{
long k1 = 0, k2 = 1;
struct hashmap_entry *entry;
struct hashmap *map;
long found_msk;
int err, bkt;
/* force collisions */
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
/* set up multimap:
* [0] -> 1, 2, 4;
* [1] -> 8, 16, 32;
*/
err = hashmap__append(map, k1, 1);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k1, 2);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k1, 4);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, 8);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, 16);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, 32);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
if (CHECK(hashmap__size(map) != 6, "hashmap_size",
"invalid map size: %zu\n", hashmap__size(map)))
goto cleanup;
/* verify global iteration still works and sees all values */
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
found_msk |= entry->value;
}
if (CHECK(found_msk != (1 << 6) - 1, "found_msk",
"not all keys iterated: %lx\n", found_msk))
goto cleanup;
/* iterate values for key 1 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k1) {
found_msk |= entry->value;
}
if (CHECK(found_msk != (1 | 2 | 4), "found_msk",
"invalid k1 values: %lx\n", found_msk))
goto cleanup;
/* iterate values for key 2 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k2) {
found_msk |= entry->value;
}
if (CHECK(found_msk != (8 | 16 | 32), "found_msk",
"invalid k2 values: %lx\n", found_msk))
goto cleanup;
cleanup:
hashmap__free(map);
}
static void test_hashmap_empty()
{
struct hashmap_entry *entry;
int bkt;
struct hashmap *map;
long k = 0;
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
if (!ASSERT_OK_PTR(map, "hashmap__new"))
goto cleanup;
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
"invalid map size: %zu\n", hashmap__size(map)))
goto cleanup;
if (CHECK(hashmap__capacity(map) != 0, "hashmap__capacity",
"invalid map capacity: %zu\n", hashmap__capacity(map)))
goto cleanup;
if (CHECK(hashmap__find(map, k, NULL), "elem_find",
"unexpected find\n"))
goto cleanup;
if (CHECK(hashmap__delete(map, k, NULL, NULL), "elem_del",
"unexpected delete\n"))
goto cleanup;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_found", "unexpected iterated entry\n");
goto cleanup;
}
hashmap__for_each_key_entry(map, entry, k) {
CHECK(false, "key_found", "unexpected key entry\n");
goto cleanup;
}
cleanup:
hashmap__free(map);
}
void test_hashmap()
{
if (test__start_subtest("generic"))
test_hashmap_generic();
if (test__start_subtest("multimap"))
test_hashmap_multimap();
if (test__start_subtest("empty"))
test_hashmap_empty();
if (test__start_subtest("ptr_iface"))
test_hashmap_ptr_iface();
}
| linux-master | tools/testing/selftests/bpf/prog_tests/hashmap.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "atomic_bounds.skel.h"
void test_atomic_bounds(void)
{
struct atomic_bounds *skel;
__u32 duration = 0;
skel = atomic_bounds__open_and_load();
if (CHECK(!skel, "skel_load", "couldn't load program\n"))
return;
atomic_bounds__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/atomic_bounds.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
static void test_l4lb(const char *file)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
__u32 vip_num;
} value = {.vip_num = VIP_NUM};
__u32 stats_key = VIP_NUM;
struct vip_stats {
__u64 bytes;
__u64 pkts;
} stats[nr_cpus];
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
int err, i, prog_fd, map_fd;
__u64 bytes = 0, pkts = 0;
struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = NUM_ITER,
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
map_fd = bpf_find_map(__func__, obj, "vip_map");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &key, &value, 0);
map_fd = bpf_find_map(__func__, obj, "ch_rings");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
map_fd = bpf_find_map(__func__, obj, "reals");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv4 test_run retval");
ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out");
ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 magic");
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
topts.data_size_out = sizeof(buf); /* reset out size */
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv6 test_run retval");
ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out");
ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 magic");
map_fd = bpf_find_map(__func__, obj, "stats");
if (map_fd < 0)
goto out;
bpf_map_lookup_elem(map_fd, &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
pkts != NUM_ITER * 2))
printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
out:
bpf_object__close(obj);
}
void test_l4lb_all(void)
{
if (test__start_subtest("l4lb_inline"))
test_l4lb("test_l4lb.bpf.o");
if (test__start_subtest("l4lb_noinline"))
test_l4lb("test_l4lb_noinline.bpf.o");
if (test__start_subtest("l4lb_noinline_dynptr"))
test_l4lb("test_l4lb_noinline_dynptr.bpf.o");
}
| linux-master | tools/testing/selftests/bpf/prog_tests/l4lb_all.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "test_subskeleton.skel.h"
#include "test_subskeleton_lib.subskel.h"
static void subskeleton_lib_setup(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return;
*lib->rodata.var1 = 1;
*lib->data.var2 = 2;
lib->bss.var3->var3_1 = 3;
lib->bss.var3->var3_2 = 4;
test_subskeleton_lib__destroy(lib);
}
static int subskeleton_lib_subresult(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
int result;
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return -EINVAL;
result = *lib->bss.libout1;
ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
"lib_perf_handler", "program name");
ASSERT_OK_PTR(lib->maps.map1, "map1");
ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
ASSERT_EQ(*lib->data.var6, 6, "extern var6");
ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
test_subskeleton_lib__destroy(lib);
return result;
}
void test_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
skel = test_subskeleton__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->rovar1 = 10;
skel->rodata->var1 = 1;
subskeleton_lib_setup(skel->obj);
err = test_subskeleton__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = test_subskeleton__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
result = subskeleton_lib_subresult(skel->obj) * 10;
ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
cleanup:
test_subskeleton__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/subskeleton.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
#include "test_btf_decl_tag.skel.h"
/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */
struct btf_type_tag_test {
int **p;
};
#include "btf_type_tag.skel.h"
#include "btf_type_tag_user.skel.h"
#include "btf_type_tag_percpu.skel.h"
static void test_btf_decl_tag(void)
{
struct test_btf_decl_tag *skel;
skel = test_btf_decl_tag__open_and_load();
if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))
return;
if (skel->rodata->skip_tests) {
printf("%s:SKIP: btf_decl_tag attribute not supported", __func__);
test__skip();
}
test_btf_decl_tag__destroy(skel);
}
static void test_btf_type_tag(void)
{
struct btf_type_tag *skel;
skel = btf_type_tag__open_and_load();
if (!ASSERT_OK_PTR(skel, "btf_type_tag"))
return;
if (skel->rodata->skip_tests) {
printf("%s:SKIP: btf_type_tag attribute not supported", __func__);
test__skip();
}
btf_type_tag__destroy(skel);
}
/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as
* module_btf, it will not load module btf.
*
* Returns 0 on success.
* Return -1 On error. In case of error, the loaded btf will be freed and the
* input parameters will be set to pointing to NULL.
*/
static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf,
bool needs_vmlinux_tag)
{
const char *module_name = "bpf_testmod";
__s32 type_id;
if (!env.has_testmod) {
test__skip();
return -1;
}
*vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF"))
return -1;
if (!needs_vmlinux_tag)
goto load_module_btf;
/* skip the test if the vmlinux does not have __user tags */
type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
test__skip();
goto free_vmlinux_btf;
}
load_module_btf:
/* skip loading module_btf, if not requested by caller */
if (!module_btf)
return 0;
*module_btf = btf__load_module_btf(module_name, *vmlinux_btf);
if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF"))
goto free_vmlinux_btf;
/* skip the test if the module does not have __user tags */
type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
test__skip();
goto free_module_btf;
}
return 0;
free_module_btf:
btf__free(*module_btf);
free_vmlinux_btf:
btf__free(*vmlinux_btf);
*vmlinux_btf = NULL;
if (module_btf)
*module_btf = NULL;
return -1;
}
static void test_btf_type_tag_mod_user(bool load_test_user1)
{
struct btf *vmlinux_btf = NULL, *module_btf = NULL;
struct btf_type_tag_user *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
if (load_test_user1)
bpf_program__set_autoload(skel->progs.test_user2, false);
else
bpf_program__set_autoload(skel->progs.test_user1, false);
err = btf_type_tag_user__load(skel);
ASSERT_ERR(err, "btf_type_tag_user");
btf_type_tag_user__destroy(skel);
cleanup:
btf__free(module_btf);
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_user(void)
{
struct btf_type_tag_user *skel;
struct btf *vmlinux_btf = NULL;
int err;
if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_user2, false);
bpf_program__set_autoload(skel->progs.test_user1, false);
err = btf_type_tag_user__load(skel);
ASSERT_ERR(err, "btf_type_tag_user");
btf_type_tag_user__destroy(skel);
cleanup:
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_mod_percpu(bool load_test_percpu1)
{
struct btf *vmlinux_btf, *module_btf;
struct btf_type_tag_percpu *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
if (load_test_percpu1)
bpf_program__set_autoload(skel->progs.test_percpu2, false);
else
bpf_program__set_autoload(skel->progs.test_percpu1, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu");
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(module_btf);
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_percpu(bool load_test)
{
struct btf_type_tag_percpu *skel;
struct btf *vmlinux_btf = NULL;
int err;
if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu2, false);
bpf_program__set_autoload(skel->progs.test_percpu1, false);
if (load_test) {
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu_load");
} else {
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_OK(err, "btf_type_tag_percpu_helper");
}
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(vmlinux_btf);
}
void test_btf_tag(void)
{
if (test__start_subtest("btf_decl_tag"))
test_btf_decl_tag();
if (test__start_subtest("btf_type_tag"))
test_btf_type_tag();
if (test__start_subtest("btf_type_tag_user_mod1"))
test_btf_type_tag_mod_user(true);
if (test__start_subtest("btf_type_tag_user_mod2"))
test_btf_type_tag_mod_user(false);
if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
test_btf_type_tag_vmlinux_user();
if (test__start_subtest("btf_type_tag_percpu_mod1"))
test_btf_type_tag_mod_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_mod2"))
test_btf_type_tag_mod_percpu(false);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_load"))
test_btf_type_tag_vmlinux_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper"))
test_btf_type_tag_vmlinux_percpu(false);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/btf_tag.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
/*
* Test suite for SOCKMAP/SOCKHASH holding listening sockets.
* Covers:
* 1. BPF map operations - bpf_map_{update,lookup delete}_elem
* 2. BPF redirect helpers - bpf_{sk,msg}_redirect_map
* 3. BPF reuseport helper - bpf_sk_select_reuseport
*/
#include <linux/compiler.h>
#include <errno.h>
#include <error.h>
#include <limits.h>
#include <netinet/in.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/select.h>
#include <unistd.h>
#include <linux/vm_sockets.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "test_progs.h"
#include "test_sockmap_listen.skel.h"
#include "sockmap_helpers.h"
static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
int err;
value = -1;
err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
if (!err || errno != EINVAL)
FAIL_ERRNO("map_update: expected EINVAL");
value = INT_MAX;
err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
if (!err || errno != EBADF)
FAIL_ERRNO("map_update: expected EBADF");
}
static void test_insert_opened(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
int err, s;
s = xsocket(family, sotype, 0);
if (s == -1)
return;
errno = 0;
value = s;
err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
if (sotype == SOCK_STREAM) {
if (!err || errno != EOPNOTSUPP)
FAIL_ERRNO("map_update: expected EOPNOTSUPP");
} else if (err)
FAIL_ERRNO("map_update: expected success");
xclose(s);
}
static void test_insert_bound(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
u32 key = 0;
u64 value;
int err, s;
init_addr_loopback(family, &addr, &len);
s = xsocket(family, sotype, 0);
if (s == -1)
return;
err = xbind(s, sockaddr(&addr), len);
if (err)
goto close;
errno = 0;
value = s;
err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
if (!err || errno != EOPNOTSUPP)
FAIL_ERRNO("map_update: expected EOPNOTSUPP");
close:
xclose(s);
}
static void test_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u64 value;
u32 key;
int s;
s = socket_loopback(family, sotype);
if (s < 0)
return;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
xclose(s);
}
static void test_delete_after_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u64 value;
u32 key;
int s;
s = socket_loopback(family, sotype);
if (s < 0)
return;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
xbpf_map_delete_elem(mapfd, &key);
xclose(s);
}
static void test_delete_after_close(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
int err, s;
u64 value;
u32 key;
s = socket_loopback(family, sotype);
if (s < 0)
return;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
xclose(s);
errno = 0;
err = bpf_map_delete_elem(mapfd, &key);
if (!err || (errno != EINVAL && errno != ENOENT))
/* SOCKMAP and SOCKHASH return different error codes */
FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
}
static void test_lookup_after_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u64 cookie, value;
socklen_t len;
u32 key;
int s;
s = socket_loopback(family, sotype);
if (s < 0)
return;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
len = sizeof(cookie);
xgetsockopt(s, SOL_SOCKET, SO_COOKIE, &cookie, &len);
xbpf_map_lookup_elem(mapfd, &key, &value);
if (value != cookie) {
FAIL("map_lookup: have %#llx, want %#llx",
(unsigned long long)value, (unsigned long long)cookie);
}
xclose(s);
}
static void test_lookup_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
int err, s;
u64 value;
u32 key;
s = socket_loopback(family, sotype);
if (s < 0)
return;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
xbpf_map_delete_elem(mapfd, &key);
errno = 0;
err = bpf_map_lookup_elem(mapfd, &key, &value);
if (!err || errno != ENOENT)
FAIL_ERRNO("map_lookup: expected ENOENT");
xclose(s);
}
static void test_lookup_32_bit_value(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
u32 key, value32;
int err, s;
s = socket_loopback(family, sotype);
if (s < 0)
return;
mapfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key),
sizeof(value32), 1, NULL);
if (mapfd < 0) {
FAIL_ERRNO("map_create");
goto close;
}
key = 0;
value32 = s;
xbpf_map_update_elem(mapfd, &key, &value32, BPF_NOEXIST);
errno = 0;
err = bpf_map_lookup_elem(mapfd, &key, &value32);
if (!err || errno != ENOSPC)
FAIL_ERRNO("map_lookup: expected ENOSPC");
xclose(mapfd);
close:
xclose(s);
}
static void test_update_existing(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
int s1, s2;
u64 value;
u32 key;
s1 = socket_loopback(family, sotype);
if (s1 < 0)
return;
s2 = socket_loopback(family, sotype);
if (s2 < 0)
goto close_s1;
key = 0;
value = s1;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
value = s2;
xbpf_map_update_elem(mapfd, &key, &value, BPF_EXIST);
xclose(s2);
close_s1:
xclose(s1);
}
/* Exercise the code path where we destroy child sockets that never
* got accept()'ed, aka orphans, when parent socket gets closed.
*/
static void do_destroy_orphan_child(int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
int err, s, c;
u64 value;
u32 key;
s = socket_loopback(family, sotype);
if (s < 0)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
c = xsocket(family, sotype, 0);
if (c == -1)
goto close_srv;
xconnect(c, sockaddr(&addr), len);
xclose(c);
close_srv:
xclose(s);
}
static void test_destroy_orphan_child(struct test_sockmap_listen *skel,
int family, int sotype, int mapfd)
{
int msg_verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
int skb_verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
const struct test {
int progfd;
enum bpf_attach_type atype;
} tests[] = {
{ -1, -1 },
{ msg_verdict, BPF_SK_MSG_VERDICT },
{ skb_verdict, BPF_SK_SKB_VERDICT },
};
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (t->progfd != -1 &&
xbpf_prog_attach(t->progfd, mapfd, t->atype, 0) != 0)
return;
do_destroy_orphan_child(family, sotype, mapfd);
if (t->progfd != -1)
xbpf_prog_detach2(t->progfd, mapfd, t->atype);
}
}
/* Perform a passive open after removing listening socket from SOCKMAP
* to ensure that callbacks get restored properly.
*/
static void test_clone_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
int err, s, c;
u64 value;
u32 key;
s = socket_loopback(family, sotype);
if (s < 0)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
key = 0;
value = s;
xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
xbpf_map_delete_elem(mapfd, &key);
c = xsocket(family, sotype, 0);
if (c < 0)
goto close_srv;
xconnect(c, sockaddr(&addr), len);
xclose(c);
close_srv:
xclose(s);
}
/* Check that child socket that got created while parent was in a
* SOCKMAP, but got accept()'ed only after the parent has been removed
* from SOCKMAP, gets cloned without parent psock state or callbacks.
*/
static void test_accept_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0;
int err, s, c, p;
socklen_t len;
u64 value;
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s == -1)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
value = s;
err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
if (err)
goto close_srv;
c = xsocket(family, sotype, 0);
if (c == -1)
goto close_srv;
/* Create child while parent is in sockmap */
err = xconnect(c, sockaddr(&addr), len);
if (err)
goto close_cli;
/* Remove parent from sockmap */
err = xbpf_map_delete_elem(mapfd, &zero);
if (err)
goto close_cli;
p = xaccept_nonblock(s, NULL, NULL);
if (p == -1)
goto close_cli;
/* Check that child sk_user_data is not set */
value = p;
xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
xclose(p);
close_cli:
xclose(c);
close_srv:
xclose(s);
}
/* Check that child socket that got created and accepted while parent
* was in a SOCKMAP is cloned without parent psock state or callbacks.
*/
static void test_accept_before_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0, one = 1;
int err, s, c, p;
socklen_t len;
u64 value;
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s == -1)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
value = s;
err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
if (err)
goto close_srv;
c = xsocket(family, sotype, 0);
if (c == -1)
goto close_srv;
/* Create & accept child while parent is in sockmap */
err = xconnect(c, sockaddr(&addr), len);
if (err)
goto close_cli;
p = xaccept_nonblock(s, NULL, NULL);
if (p == -1)
goto close_cli;
/* Check that child sk_user_data is not set */
value = p;
xbpf_map_update_elem(mapfd, &one, &value, BPF_NOEXIST);
xclose(p);
close_cli:
xclose(c);
close_srv:
xclose(s);
}
struct connect_accept_ctx {
int sockfd;
unsigned int done;
unsigned int nr_iter;
};
static bool is_thread_done(struct connect_accept_ctx *ctx)
{
return READ_ONCE(ctx->done);
}
static void *connect_accept_thread(void *arg)
{
struct connect_accept_ctx *ctx = arg;
struct sockaddr_storage addr;
int family, socktype;
socklen_t len;
int err, i, s;
s = ctx->sockfd;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto done;
len = sizeof(family);
err = xgetsockopt(s, SOL_SOCKET, SO_DOMAIN, &family, &len);
if (err)
goto done;
len = sizeof(socktype);
err = xgetsockopt(s, SOL_SOCKET, SO_TYPE, &socktype, &len);
if (err)
goto done;
for (i = 0; i < ctx->nr_iter; i++) {
int c, p;
c = xsocket(family, socktype, 0);
if (c < 0)
break;
err = xconnect(c, (struct sockaddr *)&addr, sizeof(addr));
if (err) {
xclose(c);
break;
}
p = xaccept_nonblock(s, NULL, NULL);
if (p < 0) {
xclose(c);
break;
}
xclose(p);
xclose(c);
}
done:
WRITE_ONCE(ctx->done, 1);
return NULL;
}
static void test_syn_recv_insert_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
struct sockaddr_storage addr;
socklen_t len;
u32 zero = 0;
pthread_t t;
int err, s;
u64 value;
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close;
ctx.sockfd = s;
ctx.nr_iter = 1000;
err = xpthread_create(&t, NULL, connect_accept_thread, &ctx);
if (err)
goto close;
value = s;
while (!is_thread_done(&ctx)) {
err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
if (err)
break;
err = xbpf_map_delete_elem(mapfd, &zero);
if (err)
break;
}
xpthread_join(t, NULL);
close:
xclose(s);
}
static void *listen_thread(void *arg)
{
struct sockaddr unspec = { AF_UNSPEC };
struct connect_accept_ctx *ctx = arg;
int err, i, s;
s = ctx->sockfd;
for (i = 0; i < ctx->nr_iter; i++) {
err = xlisten(s, 1);
if (err)
break;
err = xconnect(s, &unspec, sizeof(unspec));
if (err)
break;
}
WRITE_ONCE(ctx->done, 1);
return NULL;
}
static void test_race_insert_listen(struct test_sockmap_listen *skel __always_unused,
int family, int socktype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
const u32 zero = 0;
const int one = 1;
pthread_t t;
int err, s;
u64 value;
s = xsocket(family, socktype, 0);
if (s < 0)
return;
err = xsetsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
if (err)
goto close;
ctx.sockfd = s;
ctx.nr_iter = 10000;
err = pthread_create(&t, NULL, listen_thread, &ctx);
if (err)
goto close;
value = s;
while (!is_thread_done(&ctx)) {
err = bpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
/* Expecting EOPNOTSUPP before listen() */
if (err && errno != EOPNOTSUPP) {
FAIL_ERRNO("map_update");
break;
}
err = bpf_map_delete_elem(mapfd, &zero);
/* Expecting no entry after unhash on connect(AF_UNSPEC) */
if (err && errno != EINVAL && errno != ENOENT) {
FAIL_ERRNO("map_delete");
break;
}
}
xpthread_join(t, NULL);
close:
xclose(s);
}
static void zero_verdict_count(int mapfd)
{
unsigned int zero = 0;
int key;
key = SK_DROP;
xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
key = SK_PASS;
xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
}
enum redir_mode {
REDIR_INGRESS,
REDIR_EGRESS,
};
static const char *redir_mode_str(enum redir_mode mode)
{
switch (mode) {
case REDIR_INGRESS:
return "ingress";
case REDIR_EGRESS:
return "egress";
default:
return "unknown";
}
}
static void redir_to_connected(int family, int sotype, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
int s, c0, c1, p0, p1;
unsigned int pass;
int err, n;
u32 key;
char b;
zero_verdict_count(verd_mapfd);
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1);
if (err)
goto close_srv;
err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
goto close;
n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto close;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
close:
xclose(p1);
xclose(c1);
xclose(p0);
xclose(c0);
close_srv:
xclose(s);
}
static void test_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
if (err)
return;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (err)
goto detach;
redir_to_connected(family, sotype, sock_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
detach:
xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
}
static void test_msg_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
if (err)
return;
redir_to_connected(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
}
static void redir_to_listening(int family, int sotype, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
struct sockaddr_storage addr;
int s, c, p, err, n;
unsigned int drop;
socklen_t len;
u32 key;
zero_verdict_count(verd_mapfd);
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
c = xsocket(family, sotype, 0);
if (c < 0)
goto close_srv;
err = xconnect(c, sockaddr(&addr), len);
if (err)
goto close_cli;
p = xaccept_nonblock(s, NULL, NULL);
if (p < 0)
goto close_cli;
err = add_to_sockmap(sock_mapfd, s, p);
if (err)
goto close_peer;
n = write(mode == REDIR_INGRESS ? c : p, "a", 1);
if (n < 0 && errno != EACCES)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close_peer;
key = SK_DROP;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &drop);
if (err)
goto close_peer;
if (drop != 1)
FAIL("%s: want drop count 1, have %d", log_prefix, drop);
close_peer:
xclose(p);
close_cli:
xclose(c);
close_srv:
xclose(s);
}
static void test_skb_redir_to_listening(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
if (err)
return;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (err)
goto detach;
redir_to_listening(family, sotype, sock_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
detach:
xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
}
static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
if (err)
return;
redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
}
static void redir_partial(int family, int sotype, int sock_map, int parser_map)
{
int s, c0, c1, p0, p1;
int err, n, key, value;
char buf[] = "abc";
key = 0;
value = sizeof(buf) - 1;
err = xbpf_map_update_elem(parser_map, &key, &value, 0);
if (err)
return;
s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
goto clean_parser_map;
err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1);
if (err)
goto close_srv;
err = add_to_sockmap(sock_map, p0, p1);
if (err)
goto close;
n = xsend(c1, buf, sizeof(buf), 0);
if (n < sizeof(buf))
FAIL("incomplete write");
n = xrecv_nonblock(c0, buf, sizeof(buf), 0);
if (n != sizeof(buf) - 1)
FAIL("expect %zu, received %d", sizeof(buf) - 1, n);
close:
xclose(c0);
xclose(p0);
xclose(c1);
xclose(p1);
close_srv:
xclose(s);
clean_parser_map:
key = 0;
value = 0;
xbpf_map_update_elem(parser_map, &key, &value, 0);
}
static void test_skb_redir_partial(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int parser_map = bpf_map__fd(skel->maps.parser_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
if (err)
return;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
if (err)
goto detach;
redir_partial(family, sotype, sock_map, parser_map);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
detach:
xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
}
static void test_reuseport_select_listening(int family, int sotype,
int sock_map, int verd_map,
int reuseport_prog)
{
struct sockaddr_storage addr;
unsigned int pass;
int s, c, err;
socklen_t len;
u64 value;
u32 key;
zero_verdict_count(verd_map);
s = socket_loopback_reuseport(family, sotype | SOCK_NONBLOCK,
reuseport_prog);
if (s < 0)
return;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
key = 0;
value = s;
err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
if (err)
goto close_srv;
c = xsocket(family, sotype, 0);
if (c < 0)
goto close_srv;
err = xconnect(c, sockaddr(&addr), len);
if (err)
goto close_cli;
if (sotype == SOCK_STREAM) {
int p;
p = xaccept_nonblock(s, NULL, NULL);
if (p < 0)
goto close_cli;
xclose(p);
} else {
char b = 'a';
ssize_t n;
n = xsend(c, &b, sizeof(b), 0);
if (n == -1)
goto close_cli;
n = xrecv_nonblock(s, &b, sizeof(b), 0);
if (n == -1)
goto close_cli;
}
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_map, &key, &pass);
if (err)
goto close_cli;
if (pass != 1)
FAIL("want pass count 1, have %d", pass);
close_cli:
xclose(c);
close_srv:
xclose(s);
}
static void test_reuseport_select_connected(int family, int sotype,
int sock_map, int verd_map,
int reuseport_prog)
{
struct sockaddr_storage addr;
int s, c0, c1, p0, err;
unsigned int drop;
socklen_t len;
u64 value;
u32 key;
zero_verdict_count(verd_map);
s = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s < 0)
return;
/* Populate sock_map[0] to avoid ENOENT on first connection */
key = 0;
value = s;
err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
if (err)
goto close_srv;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
c0 = xsocket(family, sotype, 0);
if (c0 < 0)
goto close_srv;
err = xconnect(c0, sockaddr(&addr), len);
if (err)
goto close_cli0;
if (sotype == SOCK_STREAM) {
p0 = xaccept_nonblock(s, NULL, NULL);
if (p0 < 0)
goto close_cli0;
} else {
p0 = xsocket(family, sotype, 0);
if (p0 < 0)
goto close_cli0;
len = sizeof(addr);
err = xgetsockname(c0, sockaddr(&addr), &len);
if (err)
goto close_cli0;
err = xconnect(p0, sockaddr(&addr), len);
if (err)
goto close_cli0;
}
/* Update sock_map[0] to redirect to a connected socket */
key = 0;
value = p0;
err = xbpf_map_update_elem(sock_map, &key, &value, BPF_EXIST);
if (err)
goto close_peer0;
c1 = xsocket(family, sotype, 0);
if (c1 < 0)
goto close_peer0;
len = sizeof(addr);
err = xgetsockname(s, sockaddr(&addr), &len);
if (err)
goto close_srv;
errno = 0;
err = connect(c1, sockaddr(&addr), len);
if (sotype == SOCK_DGRAM) {
char b = 'a';
ssize_t n;
n = xsend(c1, &b, sizeof(b), 0);
if (n == -1)
goto close_cli1;
n = recv_timeout(c1, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
err = n == -1;
}
if (!err || errno != ECONNREFUSED)
FAIL_ERRNO("connect: expected ECONNREFUSED");
key = SK_DROP;
err = xbpf_map_lookup_elem(verd_map, &key, &drop);
if (err)
goto close_cli1;
if (drop != 1)
FAIL("want drop count 1, have %d", drop);
close_cli1:
xclose(c1);
close_peer0:
xclose(p0);
close_cli0:
xclose(c0);
close_srv:
xclose(s);
}
/* Check that redirecting across reuseport groups is not allowed. */
static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
int verd_map, int reuseport_prog)
{
struct sockaddr_storage addr;
int s1, s2, c, err;
unsigned int drop;
socklen_t len;
u32 key;
zero_verdict_count(verd_map);
/* Create two listeners, each in its own reuseport group */
s1 = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s1 < 0)
return;
s2 = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s2 < 0)
goto close_srv1;
err = add_to_sockmap(sock_map, s1, s2);
if (err)
goto close_srv2;
/* Connect to s2, reuseport BPF selects s1 via sock_map[0] */
len = sizeof(addr);
err = xgetsockname(s2, sockaddr(&addr), &len);
if (err)
goto close_srv2;
c = xsocket(family, sotype, 0);
if (c < 0)
goto close_srv2;
err = connect(c, sockaddr(&addr), len);
if (sotype == SOCK_DGRAM) {
char b = 'a';
ssize_t n;
n = xsend(c, &b, sizeof(b), 0);
if (n == -1)
goto close_cli;
n = recv_timeout(c, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
err = n == -1;
}
if (!err || errno != ECONNREFUSED) {
FAIL_ERRNO("connect: expected ECONNREFUSED");
goto close_cli;
}
/* Expect drop, can't redirect outside of reuseport group */
key = SK_DROP;
err = xbpf_map_lookup_elem(verd_map, &key, &drop);
if (err)
goto close_cli;
if (drop != 1)
FAIL("want drop count 1, have %d", drop);
close_cli:
xclose(c);
close_srv2:
xclose(s2);
close_srv1:
xclose(s1);
}
#define TEST(fn, ...) \
{ \
fn, #fn, __VA_ARGS__ \
}
static void test_ops_cleanup(const struct bpf_map *map)
{
int err, mapfd;
u32 key;
mapfd = bpf_map__fd(map);
for (key = 0; key < bpf_map__max_entries(map); key++) {
err = bpf_map_delete_elem(mapfd, &key);
if (err && errno != EINVAL && errno != ENOENT)
FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
}
}
static const char *family_str(sa_family_t family)
{
switch (family) {
case AF_INET:
return "IPv4";
case AF_INET6:
return "IPv6";
case AF_UNIX:
return "Unix";
case AF_VSOCK:
return "VSOCK";
default:
return "unknown";
}
}
static const char *map_type_str(const struct bpf_map *map)
{
int type;
if (!map)
return "invalid";
type = bpf_map__type(map);
switch (type) {
case BPF_MAP_TYPE_SOCKMAP:
return "sockmap";
case BPF_MAP_TYPE_SOCKHASH:
return "sockhash";
default:
return "unknown";
}
}
static const char *sotype_str(int sotype)
{
switch (sotype) {
case SOCK_DGRAM:
return "UDP";
case SOCK_STREAM:
return "TCP";
default:
return "unknown";
}
}
static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype)
{
const struct op_test {
void (*fn)(struct test_sockmap_listen *skel,
int family, int sotype, int mapfd);
const char *name;
int sotype;
} tests[] = {
/* insert */
TEST(test_insert_invalid),
TEST(test_insert_opened),
TEST(test_insert_bound, SOCK_STREAM),
TEST(test_insert),
/* delete */
TEST(test_delete_after_insert),
TEST(test_delete_after_close),
/* lookup */
TEST(test_lookup_after_insert),
TEST(test_lookup_after_delete),
TEST(test_lookup_32_bit_value),
/* update */
TEST(test_update_existing),
/* races with insert/delete */
TEST(test_destroy_orphan_child, SOCK_STREAM),
TEST(test_syn_recv_insert_delete, SOCK_STREAM),
TEST(test_race_insert_listen, SOCK_STREAM),
/* child clone */
TEST(test_clone_after_delete, SOCK_STREAM),
TEST(test_accept_after_delete, SOCK_STREAM),
TEST(test_accept_before_delete, SOCK_STREAM),
};
const char *family_name, *map_name, *sotype_name;
const struct op_test *t;
char s[MAX_TEST_NAME];
int map_fd;
family_name = family_str(family);
map_name = map_type_str(map);
sotype_name = sotype_str(sotype);
map_fd = bpf_map__fd(map);
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
sotype_name, t->name);
if (t->sotype != 0 && t->sotype != sotype)
continue;
if (!test__start_subtest(s))
continue;
t->fn(skel, family, sotype, map_fd);
test_ops_cleanup(map);
}
}
static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype)
{
const struct redir_test {
void (*fn)(struct test_sockmap_listen *skel,
struct bpf_map *map, int family, int sotype);
const char *name;
} tests[] = {
TEST(test_skb_redir_to_connected),
TEST(test_skb_redir_to_listening),
TEST(test_skb_redir_partial),
TEST(test_msg_redir_to_connected),
TEST(test_msg_redir_to_listening),
};
const char *family_name, *map_name;
const struct redir_test *t;
char s[MAX_TEST_NAME];
family_name = family_str(family);
map_name = map_type_str(map);
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
t->name);
if (!test__start_subtest(s))
continue;
t->fn(skel, map, family, sotype);
}
}
static void unix_redir_to_connected(int sotype, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
unsigned int pass;
int err, n;
int sfd[2];
u32 key;
char b;
zero_verdict_count(verd_mapfd);
if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
return;
c0 = sfd[0], p0 = sfd[1];
if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd))
goto close0;
c1 = sfd[0], p1 = sfd[1];
err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
goto close;
n = write(c1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto close;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
close:
xclose(c1);
xclose(p1);
close0:
xclose(c0);
xclose(p0);
}
static void unix_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
if (err)
return;
skel->bss->test_ingress = false;
unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_EGRESS);
skel->bss->test_ingress = true;
unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
int sotype)
{
const char *family_name, *map_name;
char s[MAX_TEST_NAME];
family_name = family_str(AF_UNIX);
map_name = map_type_str(map);
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
if (!test__start_subtest(s))
return;
unix_skb_redir_to_connected(skel, map, sotype);
}
/* Returns two connected loopback vsock sockets */
static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int s, p, c;
s = socket_loopback(AF_VSOCK, sotype);
if (s < 0)
return -1;
c = xsocket(AF_VSOCK, sotype | SOCK_NONBLOCK, 0);
if (c == -1)
goto close_srv;
if (getsockname(s, sockaddr(&addr), &len) < 0)
goto close_cli;
if (connect(c, sockaddr(&addr), len) < 0 && errno != EINPROGRESS) {
FAIL_ERRNO("connect");
goto close_cli;
}
len = sizeof(addr);
p = accept_timeout(s, sockaddr(&addr), &len, IO_TIMEOUT_SEC);
if (p < 0)
goto close_cli;
if (poll_connect(c, IO_TIMEOUT_SEC) < 0) {
FAIL_ERRNO("poll_connect");
goto close_acc;
}
*v0 = p;
*v1 = c;
return 0;
close_acc:
close(p);
close_cli:
close(c);
close_srv:
close(s);
return -1;
}
static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd,
enum redir_mode mode, int sotype)
{
const char *log_prefix = redir_mode_str(mode);
char a = 'a', b = 'b';
int u0, u1, v0, v1;
int sfd[2];
unsigned int pass;
int err, n;
u32 key;
zero_verdict_count(verd_mapfd);
if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, sfd))
return;
u0 = sfd[0];
u1 = sfd[1];
err = vsock_socketpair_connectible(sotype, &v0, &v1);
if (err) {
FAIL("vsock_socketpair_connectible() failed");
goto close_uds;
}
err = add_to_sockmap(sock_mapfd, u0, v0);
if (err) {
FAIL("add_to_sockmap failed");
goto close_vsock;
}
n = write(v1, &a, sizeof(a));
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto out;
n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0);
if (n < 0)
FAIL("%s: recv() err, errno=%d", log_prefix, errno);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
if (b != a)
FAIL("%s: vsock socket map failed, %c != %c", log_prefix, a, b);
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto out;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
out:
key = 0;
bpf_map_delete_elem(sock_mapfd, &key);
key = 1;
bpf_map_delete_elem(sock_mapfd, &key);
close_vsock:
close(v0);
close(v1);
close_uds:
close(u0);
close(u1);
}
static void vsock_unix_skb_redir_connectible(struct test_sockmap_listen *skel,
struct bpf_map *inner_map,
int sotype)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
if (err)
return;
skel->bss->test_ingress = false;
vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_EGRESS, sotype);
skel->bss->test_ingress = true;
vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_INGRESS, sotype);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
static void test_vsock_redir(struct test_sockmap_listen *skel, struct bpf_map *map)
{
const char *family_name, *map_name;
char s[MAX_TEST_NAME];
family_name = family_str(AF_VSOCK);
map_name = map_type_str(map);
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
if (!test__start_subtest(s))
return;
vsock_unix_skb_redir_connectible(skel, map, SOCK_STREAM);
vsock_unix_skb_redir_connectible(skel, map, SOCK_SEQPACKET);
}
static void test_reuseport(struct test_sockmap_listen *skel,
struct bpf_map *map, int family, int sotype)
{
const struct reuseport_test {
void (*fn)(int family, int sotype, int socket_map,
int verdict_map, int reuseport_prog);
const char *name;
int sotype;
} tests[] = {
TEST(test_reuseport_select_listening),
TEST(test_reuseport_select_connected),
TEST(test_reuseport_mixed_groups),
};
int socket_map, verdict_map, reuseport_prog;
const char *family_name, *map_name, *sotype_name;
const struct reuseport_test *t;
char s[MAX_TEST_NAME];
family_name = family_str(family);
map_name = map_type_str(map);
sotype_name = sotype_str(sotype);
socket_map = bpf_map__fd(map);
verdict_map = bpf_map__fd(skel->maps.verdict_map);
reuseport_prog = bpf_program__fd(skel->progs.prog_reuseport);
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
sotype_name, t->name);
if (t->sotype != 0 && t->sotype != sotype)
continue;
if (!test__start_subtest(s))
continue;
t->fn(family, sotype, socket_map, verdict_map, reuseport_prog);
}
}
static int inet_socketpair(int family, int type, int *s, int *c)
{
struct sockaddr_storage addr;
socklen_t len;
int p0, c0;
int err;
p0 = socket_loopback(family, type | SOCK_NONBLOCK);
if (p0 < 0)
return p0;
len = sizeof(addr);
err = xgetsockname(p0, sockaddr(&addr), &len);
if (err)
goto close_peer0;
c0 = xsocket(family, type | SOCK_NONBLOCK, 0);
if (c0 < 0) {
err = c0;
goto close_peer0;
}
err = xconnect(c0, sockaddr(&addr), len);
if (err)
goto close_cli0;
err = xgetsockname(c0, sockaddr(&addr), &len);
if (err)
goto close_cli0;
err = xconnect(p0, sockaddr(&addr), len);
if (err)
goto close_cli0;
*s = p0;
*c = c0;
return 0;
close_cli0:
xclose(c0);
close_peer0:
xclose(p0);
return err;
}
static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
unsigned int pass;
int err, n;
u32 key;
char b;
zero_verdict_count(verd_mapfd);
err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0);
if (err)
return;
err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1);
if (err)
goto close_cli0;
err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
goto close_cli1;
n = write(c1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close_cli1;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto close_cli1;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
close_cli1:
xclose(c1);
xclose(p1);
close_cli0:
xclose(c0);
xclose(p0);
}
static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
if (err)
return;
skel->bss->test_ingress = false;
udp_redir_to_connected(family, sock_map, verdict_map, REDIR_EGRESS);
skel->bss->test_ingress = true;
udp_redir_to_connected(family, sock_map, verdict_map, REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
int family)
{
const char *family_name, *map_name;
char s[MAX_TEST_NAME];
family_name = family_str(family);
map_name = map_type_str(map);
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
if (!test__start_subtest(s))
return;
udp_skb_redir_to_connected(skel, map, family);
}
static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
unsigned int pass;
int err, n;
int sfd[2];
u32 key;
char b;
zero_verdict_count(verd_mapfd);
if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
return;
c0 = sfd[0], p0 = sfd[1];
err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1);
if (err)
goto close;
err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
goto close_cli1;
n = write(c1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close_cli1;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto close_cli1;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
close_cli1:
xclose(c1);
xclose(p1);
close:
xclose(c0);
xclose(p0);
}
static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
if (err)
return;
skel->bss->test_ingress = false;
inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
REDIR_EGRESS);
inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
REDIR_EGRESS);
skel->bss->test_ingress = true;
inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
REDIR_INGRESS);
inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
int c0, c1, p0, p1;
unsigned int pass;
int err, n;
int sfd[2];
u32 key;
char b;
zero_verdict_count(verd_mapfd);
err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0);
if (err)
return;
if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
goto close_cli0;
c1 = sfd[0], p1 = sfd[1];
err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
goto close;
n = write(c1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
goto close;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
goto close;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
FAIL("%s: incomplete recv", log_prefix);
close:
xclose(c1);
xclose(p1);
close_cli0:
xclose(c0);
xclose(p0);
}
static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
if (err)
return;
skel->bss->test_ingress = false;
unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
REDIR_EGRESS);
unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
REDIR_EGRESS);
skel->bss->test_ingress = true;
unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
REDIR_INGRESS);
unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
int family)
{
const char *family_name, *map_name;
char s[MAX_TEST_NAME];
family_name = family_str(family);
map_name = map_type_str(map);
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
if (!test__start_subtest(s))
return;
inet_unix_skb_redir_to_connected(skel, map, family);
unix_inet_skb_redir_to_connected(skel, map, family);
}
static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
int family)
{
test_ops(skel, map, family, SOCK_STREAM);
test_ops(skel, map, family, SOCK_DGRAM);
test_redir(skel, map, family, SOCK_STREAM);
test_reuseport(skel, map, family, SOCK_STREAM);
test_reuseport(skel, map, family, SOCK_DGRAM);
test_udp_redir(skel, map, family);
test_udp_unix_redir(skel, map, family);
}
void serial_test_sockmap_listen(void)
{
struct test_sockmap_listen *skel;
skel = test_sockmap_listen__open_and_load();
if (!skel) {
FAIL("skeleton open/load failed");
return;
}
skel->bss->test_sockmap = true;
run_tests(skel, skel->maps.sock_map, AF_INET);
run_tests(skel, skel->maps.sock_map, AF_INET6);
test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM);
test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM);
test_vsock_redir(skel, skel->maps.sock_map);
skel->bss->test_sockmap = false;
run_tests(skel, skel->maps.sock_hash, AF_INET);
run_tests(skel, skel->maps.sock_hash, AF_INET6);
test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM);
test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM);
test_vsock_redir(skel, skel->maps.sock_hash);
test_sockmap_listen__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sockmap_listen.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
// Copyright (c) 2020 Isovalent, Inc.
/*
* Test that the socket assign program is able to redirect traffic towards a
* socket, regardless of whether the port or address destination of the traffic
* matches the port.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include "test_progs.h"
#define BIND_PORT 1234
#define CONNECT_PORT 4321
#define TEST_DADDR (0xC0A80203)
#define NS_SELF "/proc/self/ns/net"
#define SERVER_MAP_PATH "/sys/fs/bpf/tc/globals/server_map"
static const struct timeval timeo_sec = { .tv_sec = 3 };
static const size_t timeo_optlen = sizeof(timeo_sec);
static int stop, duration;
static bool
configure_stack(void)
{
char tc_version[128];
char tc_cmd[BUFSIZ];
char *prog;
FILE *tc;
/* Check whether tc is built with libbpf. */
tc = popen("tc -V", "r");
if (CHECK_FAIL(!tc))
return false;
if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc)))
return false;
if (strstr(tc_version, ", libbpf "))
prog = "test_sk_assign_libbpf.bpf.o";
else
prog = "test_sk_assign.bpf.o";
if (CHECK_FAIL(pclose(tc)))
return false;
/* Move to a new networking namespace */
if (CHECK_FAIL(unshare(CLONE_NEWNET)))
return false;
/* Configure necessary links, routes */
if (CHECK_FAIL(system("ip link set dev lo up")))
return false;
if (CHECK_FAIL(system("ip route add local default dev lo")))
return false;
if (CHECK_FAIL(system("ip -6 route add local default dev lo")))
return false;
/* Load qdisc, BPF program */
if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
return false;
sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf",
"direct-action object-file", prog,
"section tc",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
return false;
return true;
}
static int
start_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd;
fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
goto out;
if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
timeo_optlen)))
goto close_out;
if (CHECK_FAIL(bind(fd, addr, len) == -1))
goto close_out;
if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
goto close_out;
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int
connect_to_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd = -1;
fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
goto out;
if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
timeo_optlen)))
goto close_out;
if (CHECK_FAIL(connect(fd, addr, len)))
goto close_out;
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static in_port_t
get_port(int fd)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
in_port_t port = 0;
if (CHECK_FAIL(getsockname(fd, (struct sockaddr *)&ss, &slen)))
return port;
switch (ss.ss_family) {
case AF_INET:
port = ((struct sockaddr_in *)&ss)->sin_port;
break;
case AF_INET6:
port = ((struct sockaddr_in6 *)&ss)->sin6_port;
break;
default:
CHECK(1, "Invalid address family", "%d\n", ss.ss_family);
}
return port;
}
static ssize_t
rcv_msg(int srv_client, int type)
{
char buf[BUFSIZ];
if (type == SOCK_STREAM)
return read(srv_client, &buf, sizeof(buf));
else
return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL);
}
static int
run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type)
{
int client = -1, srv_client = -1;
char buf[] = "testing";
in_port_t port;
int ret = 1;
client = connect_to_server(addr, len, type);
if (client == -1) {
perror("Cannot connect to server");
goto out;
}
if (type == SOCK_STREAM) {
srv_client = accept(server_fd, NULL, NULL);
if (CHECK_FAIL(srv_client == -1)) {
perror("Can't accept connection");
goto out;
}
} else {
srv_client = server_fd;
}
if (CHECK_FAIL(write(client, buf, sizeof(buf)) != sizeof(buf))) {
perror("Can't write on client");
goto out;
}
if (CHECK_FAIL(rcv_msg(srv_client, type) != sizeof(buf))) {
perror("Can't read on server");
goto out;
}
port = get_port(srv_client);
if (CHECK_FAIL(!port))
goto out;
/* SOCK_STREAM is connected via accept(), so the server's local address
* will be the CONNECT_PORT rather than the BIND port that corresponds
* to the listen socket. SOCK_DGRAM on the other hand is connectionless
* so we can't really do the same check there; the server doesn't ever
* create a socket with CONNECT_PORT.
*/
if (type == SOCK_STREAM &&
CHECK(port != htons(CONNECT_PORT), "Expected", "port %u but got %u",
CONNECT_PORT, ntohs(port)))
goto out;
else if (type == SOCK_DGRAM &&
CHECK(port != htons(BIND_PORT), "Expected",
"port %u but got %u", BIND_PORT, ntohs(port)))
goto out;
ret = 0;
out:
close(client);
if (srv_client != server_fd)
close(srv_client);
if (ret)
WRITE_ONCE(stop, 1);
return ret;
}
static void
prepare_addr(struct sockaddr *addr, int family, __u16 port, bool rewrite_addr)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_port = htons(port);
if (rewrite_addr)
addr4->sin_addr.s_addr = htonl(TEST_DADDR);
else
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
break;
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_port = htons(port);
addr6->sin6_addr = in6addr_loopback;
if (rewrite_addr)
addr6->sin6_addr.s6_addr32[3] = htonl(TEST_DADDR);
break;
default:
fprintf(stderr, "Invalid family %d", family);
}
}
struct test_sk_cfg {
const char *name;
int family;
struct sockaddr *addr;
socklen_t len;
int type;
bool rewrite_addr;
};
#define TEST(NAME, FAMILY, TYPE, REWRITE) \
{ \
.name = NAME, \
.family = FAMILY, \
.addr = (FAMILY == AF_INET) ? (struct sockaddr *)&addr4 \
: (struct sockaddr *)&addr6, \
.len = (FAMILY == AF_INET) ? sizeof(addr4) : sizeof(addr6), \
.type = TYPE, \
.rewrite_addr = REWRITE, \
}
void test_sk_assign(void)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
struct test_sk_cfg tests[] = {
TEST("ipv4 tcp port redir", AF_INET, SOCK_STREAM, false),
TEST("ipv4 tcp addr redir", AF_INET, SOCK_STREAM, true),
TEST("ipv6 tcp port redir", AF_INET6, SOCK_STREAM, false),
TEST("ipv6 tcp addr redir", AF_INET6, SOCK_STREAM, true),
TEST("ipv4 udp port redir", AF_INET, SOCK_DGRAM, false),
TEST("ipv4 udp addr redir", AF_INET, SOCK_DGRAM, true),
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
};
__s64 server = -1;
int server_map;
int self_net;
int i;
self_net = open(NS_SELF, O_RDONLY);
if (CHECK_FAIL(self_net < 0)) {
perror("Unable to open "NS_SELF);
return;
}
if (!configure_stack()) {
perror("configure_stack");
goto cleanup;
}
server_map = bpf_obj_get(SERVER_MAP_PATH);
if (CHECK_FAIL(server_map < 0)) {
perror("Unable to open " SERVER_MAP_PATH);
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
struct test_sk_cfg *test = &tests[i];
const struct sockaddr *addr;
const int zero = 0;
int err;
if (!test__start_subtest(test->name))
continue;
prepare_addr(test->addr, test->family, BIND_PORT, false);
addr = (const struct sockaddr *)test->addr;
server = start_server(addr, test->len, test->type);
if (server == -1)
goto close;
err = bpf_map_update_elem(server_map, &zero, &server, BPF_ANY);
if (CHECK_FAIL(err)) {
perror("Unable to update server_map");
goto close;
}
/* connect to unbound ports */
prepare_addr(test->addr, test->family, CONNECT_PORT,
test->rewrite_addr);
if (run_test(server, addr, test->len, test->type))
goto close;
close(server);
server = -1;
}
close:
close(server);
close(server_map);
cleanup:
if (CHECK_FAIL(unlink(SERVER_MAP_PATH)))
perror("Unable to unlink " SERVER_MAP_PATH);
if (CHECK_FAIL(setns(self_net, CLONE_NEWNET)))
perror("Failed to setns("NS_SELF")");
close(self_net);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/sk_assign.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Google LLC.
// Copyright (c) 2018 Facebook
#include <test_progs.h>
#include "socket_cookie_prog.skel.h"
#include "network_helpers.h"
static int duration;
struct socket_cookie {
__u64 cookie_key;
__u32 cookie_value;
};
void test_socket_cookie(void)
{
int server_fd = 0, client_fd = 0, cgroup_fd = 0, err = 0;
socklen_t addr_len = sizeof(struct sockaddr_in6);
struct socket_cookie_prog *skel;
__u32 cookie_expected_value;
struct sockaddr_in6 addr;
struct socket_cookie val;
skel = socket_cookie_prog__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
cgroup_fd = test__join_cgroup("/socket_cookie");
if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n"))
goto out;
skel->links.set_cookie = bpf_program__attach_cgroup(
skel->progs.set_cookie, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
goto close_cgroup_fd;
skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
skel->progs.update_cookie_sockops, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
goto close_cgroup_fd;
skel->links.update_cookie_tracing = bpf_program__attach(
skel->progs.update_cookie_tracing);
if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
goto close_cgroup_fd;
server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno))
goto close_cgroup_fd;
client_fd = connect_to_fd(server_fd, 0);
if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno))
goto close_server_fd;
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
&client_fd, &val);
if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
goto close_client_fd;
err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
if (!ASSERT_OK(err, "getsockname"))
goto close_client_fd;
cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
close_client_fd:
close(client_fd);
close_server_fd:
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
out:
socket_cookie_prog__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/socket_cookie.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "skb_pkt_end.skel.h"
static int sanity_run(struct bpf_program *prog)
{
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run"))
return -1;
if (!ASSERT_EQ(topts.retval, 123, "test_run retval"))
return -1;
return 0;
}
void test_test_skb_pkt_end(void)
{
struct skb_pkt_end *skb_pkt_end_skel = NULL;
__u32 duration = 0;
int err;
skb_pkt_end_skel = skb_pkt_end__open_and_load();
if (CHECK(!skb_pkt_end_skel, "skb_pkt_end_skel_load", "skb_pkt_end skeleton failed\n"))
goto cleanup;
err = skb_pkt_end__attach(skb_pkt_end_skel);
if (CHECK(err, "skb_pkt_end_attach", "skb_pkt_end attach failed: %d\n", err))
goto cleanup;
if (sanity_run(skb_pkt_end_skel->progs.main_prog))
goto cleanup;
cleanup:
skb_pkt_end__destroy(skb_pkt_end_skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_stacktrace_map_raw_tp(void)
{
const char *prog_name = "oncpu";
int control_map_fd, stackid_hmap_fd, stackmap_fd;
const char *file = "./test_stacktrace_map.bpf.o";
__u32 key, val, duration = 0;
int err, prog_fd;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link = NULL;
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sched_switch");
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK_FAIL(control_map_fd < 0))
goto close_prog;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK_FAIL(stackid_hmap_fd < 0))
goto close_prog;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK_FAIL(stackmap_fd < 0))
goto close_prog;
/* give some time for bpf program run */
sleep(1);
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto close_prog;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto close_prog;
close_prog:
bpf_link__destroy(link);
bpf_object__close(obj);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_link.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <bpf/bpf_endian.h>
#include <uapi/linux/netdev.h>
#include "test_xdp_do_redirect.skel.h"
struct udp_packet {
struct ethhdr eth;
struct ipv6hdr iph;
struct udphdr udp;
__u8 payload[64 - sizeof(struct udphdr)
- sizeof(struct ethhdr) - sizeof(struct ipv6hdr)];
} __packed;
static struct udp_packet pkt_udp = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55},
.eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb},
.iph.version = 6,
.iph.nexthdr = IPPROTO_UDP,
.iph.payload_len = bpf_htons(sizeof(struct udp_packet)
- offsetof(struct udp_packet, udp)),
.iph.hop_limit = 2,
.iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)},
.iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)},
.udp.source = bpf_htons(1),
.udp.dest = bpf_htons(1),
.udp.len = bpf_htons(sizeof(struct udp_packet)
- offsetof(struct udp_packet, udp)),
.payload = {0x42}, /* receiver XDP program matches on this */
};
static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
int ret;
ret = bpf_tc_hook_create(hook);
if (!ASSERT_OK(ret, "create tc hook"))
return ret;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
return 0;
}
/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - XDP_PACKET_HEADROOM =
* 3408 bytes for 64-byte cacheline and 3216 for 256-byte one.
*/
#if defined(__s390x__)
#define MAX_PKT_SIZE 3216
#else
#define MAX_PKT_SIZE 3408
#endif
static void test_max_pkt_size(int fd)
{
char data[MAX_PKT_SIZE + 1] = {};
int err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
.data_size_in = MAX_PKT_SIZE,
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
);
err = bpf_prog_test_run_opts(fd, &opts);
ASSERT_OK(err, "prog_run_max_size");
opts.data_size_in += 1;
err = bpf_prog_test_run_opts(fd, &opts);
ASSERT_EQ(err, -EINVAL, "prog_run_too_big");
}
#define NUM_PKTS 10000
void test_xdp_do_redirect(void)
{
int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
char data[sizeof(pkt_udp) + sizeof(__u64)];
struct test_xdp_do_redirect *skel = NULL;
struct nstoken *nstoken = NULL;
struct bpf_link *link;
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
struct xdp_md ctx_in = { .data = sizeof(__u64),
.data_end = sizeof(data) };
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
.data_size_in = sizeof(data),
.ctx_in = &ctx_in,
.ctx_size_in = sizeof(ctx_in),
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = NUM_PKTS,
.batch_size = 64,
);
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.attach_point = BPF_TC_INGRESS);
memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp));
*((__u32 *)data) = 0x42; /* metadata test value */
*((__u32 *)data + 4) = 0;
skel = test_xdp_do_redirect__open();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
/* The XDP program we run with bpf_prog_run() will cycle through all
* three xmit (PASS/TX/REDIRECT) return codes starting from above, and
* ending up with PASS, so we should end up with two packets on the dst
* iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP
* payload.
*/
SYS(out, "ip netns add testns");
nstoken = open_netns("testns");
if (!ASSERT_OK_PTR(nstoken, "setns"))
goto out;
SYS(out, "ip link add veth_src type veth peer name veth_dst");
SYS(out, "ip link set dev veth_src address 00:11:22:33:44:55");
SYS(out, "ip link set dev veth_dst address 66:77:88:99:aa:bb");
SYS(out, "ip link set dev veth_src up");
SYS(out, "ip link set dev veth_dst up");
SYS(out, "ip addr add dev veth_src fc00::1/64");
SYS(out, "ip addr add dev veth_dst fc00::2/64");
SYS(out, "ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb");
/* We enable forwarding in the test namespace because that will cause
* the packets that go through the kernel stack (with XDP_PASS) to be
* forwarded back out the same interface (because of the packet dst
* combined with the interface addresses). When this happens, the
* regular forwarding path will end up going through the same
* veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a
* deadlock if it happens on the same CPU. There's a local_bh_disable()
* in the test_run code to prevent this, but an earlier version of the
* code didn't have this, so we keep the test behaviour to make sure the
* bug doesn't resurface.
*/
SYS(out, "sysctl -qw net.ipv6.conf.all.forwarding=1");
ifindex_src = if_nametoindex("veth_src");
ifindex_dst = if_nametoindex("veth_dst");
if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") ||
!ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
goto out;
/* Check xdp features supported by veth driver */
err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "veth_src bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG,
"veth_src query_opts.feature_flags"))
goto out;
err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "veth_dst bpf_xdp_query"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG,
"veth_dst query_opts.feature_flags"))
goto out;
/* Enable GRO */
SYS(out, "ethtool -K veth_src gro on");
SYS(out, "ethtool -K veth_dst gro on");
err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT_SG,
"veth_src query_opts.feature_flags gro on"))
goto out;
err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
if (!ASSERT_OK(err, "veth_dst bpf_xdp_query gro on"))
goto out;
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT_SG,
"veth_dst query_opts.feature_flags gro on"))
goto out;
memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
skel->rodata->ifindex_in = ifindex_src;
ctx_in.ingress_ifindex = ifindex_src;
tc_hook.ifindex = ifindex_src;
if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load"))
goto out;
link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst);
if (!ASSERT_OK_PTR(link, "prog_attach"))
goto out;
skel->links.xdp_count_pkts = link;
tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts);
if (attach_tc_prog(&tc_hook, tc_prog_fd))
goto out;
xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect);
err = bpf_prog_test_run_opts(xdp_prog_fd, &opts);
if (!ASSERT_OK(err, "prog_run"))
goto out_tc;
/* wait for the packets to be flushed */
kern_sync_rcu();
/* There will be one packet sent through XDP_REDIRECT and one through
* XDP_TX; these will show up on the XDP counting program, while the
* rest will be counted at the TC ingress hook (and the counting program
* resets the packet payload so they don't get counted twice even though
* they are re-xmited out the veth device
*/
ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp");
ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero");
ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc");
test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts));
out_tc:
bpf_tc_hook_destroy(&tc_hook);
out:
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del testns");
test_xdp_do_redirect__destroy(skel);
}
| linux-master | tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c |
// SPDX-License-Identifier: GPL-2.0
#include <arpa/inet.h>
#include <linux/bpf.h>
#include <netinet/in.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_maps.h>
struct test_lpm_key {
__u32 prefix;
struct in_addr ipv4;
};
static void map_batch_update(int map_fd, __u32 max_entries,
struct test_lpm_key *keys, int *values)
{
__u32 i;
int err;
char buff[16] = { 0 };
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
for (i = 0; i < max_entries; i++) {
keys[i].prefix = 32;
snprintf(buff, 16, "192.168.1.%d", i + 1);
inet_pton(AF_INET, buff, &keys[i].ipv4);
values[i] = i + 1;
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
static void map_batch_verify(int *visited, __u32 max_entries,
struct test_lpm_key *keys, int *values)
{
char buff[16] = { 0 };
int lower_byte = 0;
__u32 i;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
inet_ntop(AF_INET, &keys[i].ipv4, buff, 32);
CHECK(sscanf(buff, "192.168.1.%d", &lower_byte) == EOF,
"sscanf()", "error: i %d\n", i);
CHECK(lower_byte != values[i], "key/value checking",
"error: i %d key %s value %d\n", i, buff, values[i]);
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
CHECK(visited[i] != 1, "visited checking",
"error: keys array at index %d missing\n", i);
}
}
void test_lpm_trie_map_batch_ops(void)
{
LIBBPF_OPTS(bpf_map_create_opts, create_opts, .map_flags = BPF_F_NO_PREALLOC);
struct test_lpm_key *keys, key;
int map_fd, *values, *visited;
__u32 step, count, total, total_success;
const __u32 max_entries = 10;
__u64 batch = 0;
int err;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map",
sizeof(struct test_lpm_key), sizeof(int),
max_entries, &create_opts);
CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n",
strerror(errno));
keys = malloc(max_entries * sizeof(struct test_lpm_key));
values = malloc(max_entries * sizeof(int));
visited = malloc(max_entries * sizeof(int));
CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
strerror(errno));
total_success = 0;
for (step = 1; step < max_entries; step++) {
map_batch_update(map_fd, max_entries, keys, values);
map_batch_verify(visited, max_entries, keys, values);
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * sizeof(*values));
batch = 0;
total = 0;
/* iteratively lookup/delete elements with 'step'
* elements each.
*/
count = step;
while (true) {
err = bpf_map_lookup_batch(map_fd,
total ? &batch : NULL, &batch,
keys + total, values + total, &count, &opts);
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
map_batch_verify(visited, max_entries, keys, values);
total = 0;
count = step;
while (total < max_entries) {
if (max_entries - total < step)
count = max_entries - total;
err = bpf_map_delete_batch(map_fd, keys + total, &count,
&opts);
CHECK((err && errno != ENOENT), "delete batch",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
CHECK(total != max_entries, "delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
/* check map is empty, errono == ENOENT */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
"error: %s\n", strerror(errno));
total_success++;
}
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
printf("%s:PASS\n", __func__);
free(keys);
free(values);
free(visited);
close(map_fd);
}
| linux-master | tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <errno.h>
#include <unistd.h>
#include <pthread.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf_util.h>
#include <test_maps.h>
#include "map_percpu_stats.skel.h"
#define MAX_ENTRIES 16384
#define MAX_ENTRIES_HASH_OF_MAPS 64
#define N_THREADS 8
#define MAX_MAP_KEY_SIZE 4
static void map_info(int map_fd, struct bpf_map_info *info)
{
__u32 len = sizeof(*info);
int ret;
memset(info, 0, sizeof(*info));
ret = bpf_obj_get_info_by_fd(map_fd, info, &len);
CHECK(ret < 0, "bpf_obj_get_info_by_fd", "error: %s\n", strerror(errno));
}
static const char *map_type_to_s(__u32 type)
{
switch (type) {
case BPF_MAP_TYPE_HASH:
return "HASH";
case BPF_MAP_TYPE_PERCPU_HASH:
return "PERCPU_HASH";
case BPF_MAP_TYPE_LRU_HASH:
return "LRU_HASH";
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
return "LRU_PERCPU_HASH";
case BPF_MAP_TYPE_HASH_OF_MAPS:
return "BPF_MAP_TYPE_HASH_OF_MAPS";
default:
return "<define-me>";
}
}
static __u32 map_count_elements(__u32 type, int map_fd)
{
__u32 key = -1;
int n = 0;
while (!bpf_map_get_next_key(map_fd, &key, &key))
n++;
return n;
}
#define BATCH true
static void delete_and_lookup_batch(int map_fd, void *keys, __u32 count)
{
static __u8 values[(8 << 10) * MAX_ENTRIES];
void *in_batch = NULL, *out_batch;
__u32 save_count = count;
int ret;
ret = bpf_map_lookup_and_delete_batch(map_fd,
&in_batch, &out_batch,
keys, values, &count,
NULL);
/*
* Despite what uapi header says, lookup_and_delete_batch will return
* -ENOENT in case we successfully have deleted all elements, so check
* this separately
*/
CHECK(ret < 0 && (errno != ENOENT || !count), "bpf_map_lookup_and_delete_batch",
"error: %s\n", strerror(errno));
CHECK(count != save_count,
"bpf_map_lookup_and_delete_batch",
"deleted not all elements: removed=%u expected=%u\n",
count, save_count);
}
static void delete_all_elements(__u32 type, int map_fd, bool batch)
{
static __u8 val[8 << 10]; /* enough for 1024 CPUs */
__u32 key = -1;
void *keys;
__u32 i, n;
int ret;
keys = calloc(MAX_MAP_KEY_SIZE, MAX_ENTRIES);
CHECK(!keys, "calloc", "error: %s\n", strerror(errno));
for (n = 0; !bpf_map_get_next_key(map_fd, &key, &key); n++)
memcpy(keys + n*MAX_MAP_KEY_SIZE, &key, MAX_MAP_KEY_SIZE);
if (batch) {
/* Can't mix delete_batch and delete_and_lookup_batch because
* they have different semantics in relation to the keys
* argument. However, delete_batch utilize map_delete_elem,
* so we actually test it in non-batch scenario */
delete_and_lookup_batch(map_fd, keys, n);
} else {
/* Intentionally mix delete and lookup_and_delete so we can test both */
for (i = 0; i < n; i++) {
void *keyp = keys + i*MAX_MAP_KEY_SIZE;
if (i % 2 || type == BPF_MAP_TYPE_HASH_OF_MAPS) {
ret = bpf_map_delete_elem(map_fd, keyp);
CHECK(ret < 0, "bpf_map_delete_elem",
"error: key %u: %s\n", i, strerror(errno));
} else {
ret = bpf_map_lookup_and_delete_elem(map_fd, keyp, val);
CHECK(ret < 0, "bpf_map_lookup_and_delete_elem",
"error: key %u: %s\n", i, strerror(errno));
}
}
}
free(keys);
}
static bool is_lru(__u32 map_type)
{
return map_type == BPF_MAP_TYPE_LRU_HASH ||
map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
struct upsert_opts {
__u32 map_type;
int map_fd;
__u32 n;
};
static int create_small_hash(void)
{
int map_fd;
map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, "small", 4, 4, 4, NULL);
CHECK(map_fd < 0, "bpf_map_create()", "error:%s (name=%s)\n",
strerror(errno), "small");
return map_fd;
}
static void *patch_map_thread(void *arg)
{
struct upsert_opts *opts = arg;
int val;
int ret;
int i;
for (i = 0; i < opts->n; i++) {
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
val = create_small_hash();
else
val = rand();
ret = bpf_map_update_elem(opts->map_fd, &i, &val, 0);
CHECK(ret < 0, "bpf_map_update_elem", "key=%d error: %s\n", i, strerror(errno));
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
close(val);
}
return NULL;
}
static void upsert_elements(struct upsert_opts *opts)
{
pthread_t threads[N_THREADS];
int ret;
int i;
for (i = 0; i < ARRAY_SIZE(threads); i++) {
ret = pthread_create(&i[threads], NULL, patch_map_thread, opts);
CHECK(ret != 0, "pthread_create", "error: %s\n", strerror(ret));
}
for (i = 0; i < ARRAY_SIZE(threads); i++) {
ret = pthread_join(i[threads], NULL);
CHECK(ret != 0, "pthread_join", "error: %s\n", strerror(ret));
}
}
static __u32 read_cur_elements(int iter_fd)
{
char buf[64];
ssize_t n;
__u32 ret;
n = read(iter_fd, buf, sizeof(buf)-1);
CHECK(n <= 0, "read", "error: %s\n", strerror(errno));
buf[n] = '\0';
errno = 0;
ret = (__u32)strtol(buf, NULL, 10);
CHECK(errno != 0, "strtol", "error: %s\n", strerror(errno));
return ret;
}
static __u32 get_cur_elements(int map_id)
{
struct map_percpu_stats *skel;
struct bpf_link *link;
__u32 n_elements;
int iter_fd;
int ret;
skel = map_percpu_stats__open();
CHECK(skel == NULL, "map_percpu_stats__open", "error: %s", strerror(errno));
skel->bss->target_id = map_id;
ret = map_percpu_stats__load(skel);
CHECK(ret != 0, "map_percpu_stats__load", "error: %s", strerror(errno));
link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
CHECK(!link, "bpf_program__attach_iter", "error: %s\n", strerror(errno));
iter_fd = bpf_iter_create(bpf_link__fd(link));
CHECK(iter_fd < 0, "bpf_iter_create", "error: %s\n", strerror(errno));
n_elements = read_cur_elements(iter_fd);
close(iter_fd);
bpf_link__destroy(link);
map_percpu_stats__destroy(skel);
return n_elements;
}
static void check_expected_number_elements(__u32 n_inserted, int map_fd,
struct bpf_map_info *info)
{
__u32 n_real;
__u32 n_iter;
/* Count the current number of elements in the map by iterating through
* all the map keys via bpf_get_next_key
*/
n_real = map_count_elements(info->type, map_fd);
/* The "real" number of elements should be the same as the inserted
* number of elements in all cases except LRU maps, where some elements
* may have been evicted
*/
if (n_inserted == 0 || !is_lru(info->type))
CHECK(n_inserted != n_real, "map_count_elements",
"n_real(%u) != n_inserted(%u)\n", n_real, n_inserted);
/* Count the current number of elements in the map using an iterator */
n_iter = get_cur_elements(info->id);
/* Both counts should be the same, as all updates are over */
CHECK(n_iter != n_real, "get_cur_elements",
"n_iter=%u, expected %u (map_type=%s,map_flags=%08x)\n",
n_iter, n_real, map_type_to_s(info->type), info->map_flags);
}
static void __test(int map_fd)
{
struct upsert_opts opts = {
.map_fd = map_fd,
};
struct bpf_map_info info;
map_info(map_fd, &info);
opts.map_type = info.type;
opts.n = info.max_entries;
/* Reduce the number of elements we are updating such that we don't
* bump into -E2BIG from non-preallocated hash maps, but still will
* have some evictions for LRU maps */
if (opts.map_type != BPF_MAP_TYPE_HASH_OF_MAPS)
opts.n -= 512;
else
opts.n /= 2;
/*
* Upsert keys [0, n) under some competition: with random values from
* N_THREADS threads. Check values, then delete all elements and check
* values again.
*/
upsert_elements(&opts);
check_expected_number_elements(opts.n, map_fd, &info);
delete_all_elements(info.type, map_fd, !BATCH);
check_expected_number_elements(0, map_fd, &info);
/* Now do the same, but using batch delete operations */
upsert_elements(&opts);
check_expected_number_elements(opts.n, map_fd, &info);
delete_all_elements(info.type, map_fd, BATCH);
check_expected_number_elements(0, map_fd, &info);
close(map_fd);
}
static int map_create_opts(__u32 type, const char *name,
struct bpf_map_create_opts *map_opts,
__u32 key_size, __u32 val_size)
{
int max_entries;
int map_fd;
if (type == BPF_MAP_TYPE_HASH_OF_MAPS)
max_entries = MAX_ENTRIES_HASH_OF_MAPS;
else
max_entries = MAX_ENTRIES;
map_fd = bpf_map_create(type, name, key_size, val_size, max_entries, map_opts);
CHECK(map_fd < 0, "bpf_map_create()", "error:%s (name=%s)\n",
strerror(errno), name);
return map_fd;
}
static int map_create(__u32 type, const char *name, struct bpf_map_create_opts *map_opts)
{
return map_create_opts(type, name, map_opts, sizeof(int), sizeof(int));
}
static int create_hash(void)
{
struct bpf_map_create_opts map_opts = {
.sz = sizeof(map_opts),
.map_flags = BPF_F_NO_PREALLOC,
};
return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts);
}
static int create_percpu_hash(void)
{
struct bpf_map_create_opts map_opts = {
.sz = sizeof(map_opts),
.map_flags = BPF_F_NO_PREALLOC,
};
return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts);
}
static int create_hash_prealloc(void)
{
return map_create(BPF_MAP_TYPE_HASH, "hash", NULL);
}
static int create_percpu_hash_prealloc(void)
{
return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash_prealloc", NULL);
}
static int create_lru_hash(__u32 type, __u32 map_flags)
{
struct bpf_map_create_opts map_opts = {
.sz = sizeof(map_opts),
.map_flags = map_flags,
};
return map_create(type, "lru_hash", &map_opts);
}
static int create_hash_of_maps(void)
{
struct bpf_map_create_opts map_opts = {
.sz = sizeof(map_opts),
.map_flags = BPF_F_NO_PREALLOC,
.inner_map_fd = create_small_hash(),
};
int ret;
ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
&map_opts, sizeof(int), sizeof(int));
close(map_opts.inner_map_fd);
return ret;
}
static void map_percpu_stats_hash(void)
{
__test(create_hash());
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_percpu_hash(void)
{
__test(create_percpu_hash());
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_hash_prealloc(void)
{
__test(create_hash_prealloc());
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_percpu_hash_prealloc(void)
{
__test(create_percpu_hash_prealloc());
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_lru_hash(void)
{
__test(create_lru_hash(BPF_MAP_TYPE_LRU_HASH, 0));
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_lru_hash_no_common(void)
{
__test(create_lru_hash(BPF_MAP_TYPE_LRU_HASH, BPF_F_NO_COMMON_LRU));
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_percpu_lru_hash(void)
{
__test(create_lru_hash(BPF_MAP_TYPE_LRU_PERCPU_HASH, 0));
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_percpu_lru_hash_no_common(void)
{
__test(create_lru_hash(BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_F_NO_COMMON_LRU));
printf("test_%s:PASS\n", __func__);
}
static void map_percpu_stats_hash_of_maps(void)
{
__test(create_hash_of_maps());
printf("test_%s:PASS\n", __func__);
}
void test_map_percpu_stats(void)
{
map_percpu_stats_hash();
map_percpu_stats_percpu_hash();
map_percpu_stats_hash_prealloc();
map_percpu_stats_percpu_hash_prealloc();
map_percpu_stats_lru_hash();
map_percpu_stats_lru_hash_no_common();
map_percpu_stats_percpu_lru_hash();
map_percpu_stats_percpu_lru_hash_no_common();
map_percpu_stats_hash_of_maps();
}
| linux-master | tools/testing/selftests/bpf/map_tests/map_percpu_stats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf_util.h>
#include <test_maps.h>
static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
void *values, bool is_pcpu)
{
typedef BPF_DECLARE_PERCPU(int, value);
value *v = NULL;
int i, j, err;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
if (is_pcpu)
v = (value *)values;
for (i = 0; i < max_entries; i++) {
keys[i] = i + 1;
if (is_pcpu)
for (j = 0; j < bpf_num_possible_cpus(); j++)
bpf_percpu(v[i], j) = i + 2 + j;
else
((int *)values)[i] = i + 2;
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
static void map_batch_verify(int *visited, __u32 max_entries,
int *keys, void *values, bool is_pcpu)
{
typedef BPF_DECLARE_PERCPU(int, value);
value *v = NULL;
int i, j;
if (is_pcpu)
v = (value *)values;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
if (is_pcpu) {
for (j = 0; j < bpf_num_possible_cpus(); j++) {
CHECK(keys[i] + 1 + j != bpf_percpu(v[i], j),
"key/value checking",
"error: i %d j %d key %d value %d\n",
i, j, keys[i], bpf_percpu(v[i], j));
}
} else {
CHECK(keys[i] + 1 != ((int *)values)[i],
"key/value checking",
"error: i %d key %d value %d\n", i, keys[i],
((int *)values)[i]);
}
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
CHECK(visited[i] != 1, "visited checking",
"error: keys array at index %d missing\n", i);
}
}
void __test_map_lookup_and_delete_batch(bool is_pcpu)
{
__u32 batch, count, total, total_success;
typedef BPF_DECLARE_PERCPU(int, value);
int map_fd, *keys, *visited, key;
const __u32 max_entries = 10;
value pcpu_values[max_entries];
int err, step, value_size;
bool nospace_err;
void *values;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH,
"hash_map", sizeof(int), sizeof(int), max_entries, NULL);
CHECK(map_fd == -1,
"bpf_map_create()", "error:%s\n", strerror(errno));
value_size = is_pcpu ? sizeof(value) : sizeof(int);
keys = malloc(max_entries * sizeof(int));
if (is_pcpu)
values = pcpu_values;
else
values = malloc(max_entries * sizeof(int));
visited = malloc(max_entries * sizeof(int));
CHECK(!keys || !values || !visited, "malloc()",
"error:%s\n", strerror(errno));
/* test 1: lookup/delete an empty hash table, -ENOENT */
count = max_entries;
err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
values, &count, &opts);
CHECK((err && errno != ENOENT), "empty map",
"error: %s\n", strerror(errno));
/* populate elements to the map */
map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
/* test 2: lookup/delete with count = 0, success */
count = 0;
err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
values, &count, &opts);
CHECK(err, "count = 0", "error: %s\n", strerror(errno));
/* test 3: lookup/delete with count = max_entries, success */
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * value_size);
count = max_entries;
err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
values, &count, &opts);
CHECK((err && errno != ENOENT), "count = max_entries",
"error: %s\n", strerror(errno));
CHECK(count != max_entries, "count = max_entries",
"count = %u, max_entries = %u\n", count, max_entries);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
/* bpf_map_get_next_key() should return -ENOENT for an empty map. */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno));
/* test 4: lookup/delete in a loop with various steps. */
total_success = 0;
for (step = 1; step < max_entries; step++) {
map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * value_size);
total = 0;
/* iteratively lookup/delete elements with 'step'
* elements each
*/
count = step;
nospace_err = false;
while (true) {
err = bpf_map_lookup_batch(map_fd,
total ? &batch : NULL,
&batch, keys + total,
values +
total * value_size,
&count, &opts);
/* It is possible that we are failing due to buffer size
* not big enough. In such cases, let us just exit and
* go with large steps. Not that a buffer size with
* max_entries should always work.
*/
if (err && errno == ENOSPC) {
nospace_err = true;
break;
}
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
if (nospace_err == true)
continue;
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
total = 0;
count = step;
while (total < max_entries) {
if (max_entries - total < step)
count = max_entries - total;
err = bpf_map_delete_batch(map_fd,
keys + total,
&count, &opts);
CHECK((err && errno != ENOENT), "delete batch",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
CHECK(total != max_entries, "delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
/* check map is empty, errono == ENOENT */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
"error: %s\n", strerror(errno));
/* iteratively lookup/delete elements with 'step'
* elements each
*/
map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * value_size);
total = 0;
count = step;
nospace_err = false;
while (true) {
err = bpf_map_lookup_and_delete_batch(map_fd,
total ? &batch : NULL,
&batch, keys + total,
values +
total * value_size,
&count, &opts);
/* It is possible that we are failing due to buffer size
* not big enough. In such cases, let us just exit and
* go with large steps. Not that a buffer size with
* max_entries should always work.
*/
if (err && errno == ENOSPC) {
nospace_err = true;
break;
}
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
if (nospace_err == true)
continue;
CHECK(total != max_entries, "lookup/delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err, "bpf_map_get_next_key()", "error: %s\n",
strerror(errno));
total_success++;
}
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
free(keys);
free(visited);
if (!is_pcpu)
free(values);
close(map_fd);
}
void htab_map_batch_ops(void)
{
__test_map_lookup_and_delete_batch(false);
printf("test_%s:PASS\n", __func__);
}
void htab_percpu_map_batch_ops(void)
{
__test_map_lookup_and_delete_batch(true);
printf("test_%s:PASS\n", __func__);
}
void test_htab_map_batch_ops(void)
{
htab_map_batch_ops();
htab_percpu_map_batch_ops();
}
| linux-master | tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/compiler.h>
#include <linux/err.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <linux/btf.h>
#include <unistd.h>
#include <signal.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_btf.h>
#include <test_maps.h>
static struct bpf_map_create_opts map_opts = {
.sz = sizeof(map_opts),
.btf_key_type_id = 1,
.btf_value_type_id = 3,
.btf_fd = -1,
.map_flags = BPF_F_NO_PREALLOC,
};
static unsigned int nr_sk_threads_done;
static unsigned int nr_sk_threads_err;
static unsigned int nr_sk_per_thread = 4096;
static unsigned int nr_sk_threads = 4;
static int sk_storage_map = -1;
static unsigned int stop;
static int runtime_s = 5;
static bool is_stopped(void)
{
return READ_ONCE(stop);
}
static unsigned int threads_err(void)
{
return READ_ONCE(nr_sk_threads_err);
}
static void notify_thread_err(void)
{
__sync_add_and_fetch(&nr_sk_threads_err, 1);
}
static bool wait_for_threads_err(void)
{
while (!is_stopped() && !threads_err())
usleep(500);
return !is_stopped();
}
static unsigned int threads_done(void)
{
return READ_ONCE(nr_sk_threads_done);
}
static void notify_thread_done(void)
{
__sync_add_and_fetch(&nr_sk_threads_done, 1);
}
static void notify_thread_redo(void)
{
__sync_sub_and_fetch(&nr_sk_threads_done, 1);
}
static bool wait_for_threads_done(void)
{
while (threads_done() != nr_sk_threads && !is_stopped() &&
!threads_err())
usleep(50);
return !is_stopped() && !threads_err();
}
static bool wait_for_threads_redo(void)
{
while (threads_done() && !is_stopped() && !threads_err())
usleep(50);
return !is_stopped() && !threads_err();
}
static bool wait_for_map(void)
{
while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
usleep(50);
return !is_stopped();
}
static bool wait_for_map_close(void)
{
while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
;
return !is_stopped();
}
static int load_btf(void)
{
const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
__u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* struct bpf_spin_lock */ /* [2] */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
BTF_MEMBER_ENC(15, 1, 0), /* int val; */
/* struct val */ /* [3] */
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
};
struct btf_header btf_hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = sizeof(btf_raw_types),
.str_off = sizeof(btf_raw_types),
.str_len = sizeof(btf_str_sec),
};
__u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
sizeof(btf_str_sec)];
memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
btf_str_sec, sizeof(btf_str_sec));
return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL);
}
static int create_sk_storage_map(void)
{
int btf_fd, map_fd;
btf_fd = load_btf();
CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
btf_fd, errno);
map_opts.btf_fd = btf_fd;
map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
map_opts.btf_fd = -1;
close(btf_fd);
CHECK(map_fd == -1,
"bpf_map_create()", "errno:%d\n", errno);
return map_fd;
}
static void *insert_close_thread(void *arg)
{
struct {
int cnt;
int lock;
} value = { .cnt = 0xeB9F, .lock = 0, };
int i, map_fd, err, *sk_fds;
sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
if (!sk_fds) {
notify_thread_err();
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < nr_sk_per_thread; i++)
sk_fds[i] = -1;
while (!is_stopped()) {
if (!wait_for_map())
goto close_all;
map_fd = READ_ONCE(sk_storage_map);
for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
if (sk_fds[i] == -1) {
err = -errno;
fprintf(stderr, "socket(): errno:%d\n", errno);
goto errout;
}
err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
BPF_NOEXIST);
if (err) {
err = -errno;
fprintf(stderr,
"bpf_map_update_elem(): errno:%d\n",
errno);
goto errout;
}
}
notify_thread_done();
wait_for_map_close();
close_all:
for (i = 0; i < nr_sk_per_thread; i++) {
close(sk_fds[i]);
sk_fds[i] = -1;
}
notify_thread_redo();
}
free(sk_fds);
return NULL;
errout:
for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
close(sk_fds[i]);
free(sk_fds);
notify_thread_err();
return ERR_PTR(err);
}
static int do_sk_storage_map_stress_free(void)
{
int i, map_fd = -1, err = 0, nr_threads_created = 0;
pthread_t *sk_thread_ids;
void *thread_ret;
sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
if (!sk_thread_ids) {
fprintf(stderr, "malloc(sk_threads): NULL\n");
return -ENOMEM;
}
for (i = 0; i < nr_sk_threads; i++) {
err = pthread_create(&sk_thread_ids[i], NULL,
insert_close_thread, NULL);
if (err) {
err = -errno;
goto done;
}
nr_threads_created++;
}
while (!is_stopped()) {
map_fd = create_sk_storage_map();
WRITE_ONCE(sk_storage_map, map_fd);
if (!wait_for_threads_done())
break;
WRITE_ONCE(sk_storage_map, -1);
close(map_fd);
map_fd = -1;
if (!wait_for_threads_redo())
break;
}
done:
WRITE_ONCE(stop, 1);
for (i = 0; i < nr_threads_created; i++) {
pthread_join(sk_thread_ids[i], &thread_ret);
if (IS_ERR(thread_ret) && !err) {
err = PTR_ERR(thread_ret);
fprintf(stderr, "threads#%u: err:%d\n", i, err);
}
}
free(sk_thread_ids);
if (map_fd != -1)
close(map_fd);
return err;
}
static void *update_thread(void *arg)
{
struct {
int cnt;
int lock;
} value = { .cnt = 0xeB9F, .lock = 0, };
int map_fd = READ_ONCE(sk_storage_map);
int sk_fd = *(int *)arg;
int err = 0; /* Suppress compiler false alarm */
while (!is_stopped()) {
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
if (err && errno != EAGAIN) {
err = -errno;
fprintf(stderr, "bpf_map_update_elem: %d %d\n",
err, errno);
break;
}
}
if (!is_stopped()) {
notify_thread_err();
return ERR_PTR(err);
}
return NULL;
}
static void *delete_thread(void *arg)
{
int map_fd = READ_ONCE(sk_storage_map);
int sk_fd = *(int *)arg;
int err = 0; /* Suppress compiler false alarm */
while (!is_stopped()) {
err = bpf_map_delete_elem(map_fd, &sk_fd);
if (err && errno != ENOENT) {
err = -errno;
fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
err, errno);
break;
}
}
if (!is_stopped()) {
notify_thread_err();
return ERR_PTR(err);
}
return NULL;
}
static int do_sk_storage_map_stress_change(void)
{
int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
pthread_t *sk_thread_ids;
void *thread_ret;
sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
if (!sk_thread_ids) {
fprintf(stderr, "malloc(sk_threads): NULL\n");
return -ENOMEM;
}
sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (sk_fd == -1) {
err = -errno;
goto done;
}
map_fd = create_sk_storage_map();
WRITE_ONCE(sk_storage_map, map_fd);
for (i = 0; i < nr_sk_threads; i++) {
if (i & 0x1)
err = pthread_create(&sk_thread_ids[i], NULL,
update_thread, &sk_fd);
else
err = pthread_create(&sk_thread_ids[i], NULL,
delete_thread, &sk_fd);
if (err) {
err = -errno;
goto done;
}
nr_threads_created++;
}
wait_for_threads_err();
done:
WRITE_ONCE(stop, 1);
for (i = 0; i < nr_threads_created; i++) {
pthread_join(sk_thread_ids[i], &thread_ret);
if (IS_ERR(thread_ret) && !err) {
err = PTR_ERR(thread_ret);
fprintf(stderr, "threads#%u: err:%d\n", i, err);
}
}
free(sk_thread_ids);
if (sk_fd != -1)
close(sk_fd);
close(map_fd);
return err;
}
static void stop_handler(int signum)
{
if (signum != SIGALRM)
printf("stopping...\n");
WRITE_ONCE(stop, 1);
}
#define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
#define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
#define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
#define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
static void test_sk_storage_map_stress_free(void)
{
struct rlimit rlim_old, rlim_new = {};
int err;
getrlimit(RLIMIT_NOFILE, &rlim_old);
signal(SIGTERM, stop_handler);
signal(SIGINT, stop_handler);
if (runtime_s > 0) {
signal(SIGALRM, stop_handler);
alarm(runtime_s);
}
if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
rlim_new.rlim_max = rlim_new.rlim_cur + 128;
err = setrlimit(RLIMIT_NOFILE, &rlim_new);
CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
rlim_new.rlim_cur, errno);
}
err = do_sk_storage_map_stress_free();
signal(SIGTERM, SIG_DFL);
signal(SIGINT, SIG_DFL);
if (runtime_s > 0) {
signal(SIGALRM, SIG_DFL);
alarm(0);
}
if (rlim_new.rlim_cur)
setrlimit(RLIMIT_NOFILE, &rlim_old);
CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
}
static void test_sk_storage_map_stress_change(void)
{
int err;
signal(SIGTERM, stop_handler);
signal(SIGINT, stop_handler);
if (runtime_s > 0) {
signal(SIGALRM, stop_handler);
alarm(runtime_s);
}
err = do_sk_storage_map_stress_change();
signal(SIGTERM, SIG_DFL);
signal(SIGINT, SIG_DFL);
if (runtime_s > 0) {
signal(SIGALRM, SIG_DFL);
alarm(0);
}
CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
}
static void test_sk_storage_map_basic(void)
{
struct {
int cnt;
int lock;
} value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
struct bpf_map_create_opts bad_xattr;
int btf_fd, map_fd, sk_fd, err;
btf_fd = load_btf();
CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
btf_fd, errno);
map_opts.btf_fd = btf_fd;
sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
sk_fd, errno);
map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
CHECK(map_fd == -1, "bpf_map_create(good_xattr)",
"map_fd:%d errno:%d\n", map_fd, errno);
/* Add new elem */
memcpy(&lookup_value, &value, sizeof(value));
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_NOEXIST | BPF_F_LOCK);
CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_EXIST | BPF_F_LOCK);
CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Update with BPF_NOEXIST */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_NOEXIST | BPF_F_LOCK);
CHECK(!err || errno != EEXIST,
"bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
"err:%d errno:%d\n", err, errno);
value.cnt -= 1;
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt again and update with map_flags == 0 */
value.cnt += 1;
value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d lock:%x cnt:%x(%x)\n",
err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &sk_fd);
CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
CHECK(!err || errno != ENOENT,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_delete_elem(map_fd, &sk_fd);
CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
"err:%d errno:%d\n", err, errno);
memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.btf_key_type_id = 0;
err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.btf_key_type_id = 3;
err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts);
CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.map_flags = 0;
err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
"err:%d errno:%d\n", err, errno);
map_opts.btf_fd = -1;
close(btf_fd);
close(map_fd);
close(sk_fd);
}
void test_sk_storage_map(void)
{
const char *test_name, *env_opt;
bool test_ran = false;
test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
if (env_opt)
nr_sk_threads = atoi(env_opt);
env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
if (env_opt)
nr_sk_per_thread = atoi(env_opt);
env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
if (env_opt)
runtime_s = atoi(env_opt);
if (!test_name || !strcmp(test_name, "basic")) {
test_sk_storage_map_basic();
test_ran = true;
}
if (!test_name || !strcmp(test_name, "stress_free")) {
test_sk_storage_map_stress_free();
test_ran = true;
}
if (!test_name || !strcmp(test_name, "stress_change")) {
test_sk_storage_map_stress_change();
test_ran = true;
}
if (test_ran)
printf("%s:PASS\n", __func__);
else
CHECK(1, "Invalid test_name", "%s\n", test_name);
}
| linux-master | tools/testing/selftests/bpf/map_tests/sk_storage_map.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.