python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Test sigreturn to an unaligned address, ie. low 2 bits set.
* Nothing bad should happen.
* This was able to trigger warnings with CONFIG_PPC_RFI_SRR_DEBUG=y.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
#include "utils.h"
static void sigusr1_handler(int signo, siginfo_t *info, void *ptr)
{
ucontext_t *uc = ptr;
UCONTEXT_NIA(uc) |= 3;
}
static int test_sigreturn_unaligned(void)
{
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = sigusr1_handler;
action.sa_flags = SA_SIGINFO;
FAIL_IF(sigaction(SIGUSR1, &action, NULL) == -1);
raise(SIGUSR1);
return 0;
}
int main(void)
{
return test_harness(test_sigreturn_unaligned, "sigreturn_unaligned");
}
| linux-master | tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018, Breno Leitao, IBM Corp.
* Licensed under GPLv2.
*
* Sigfuz(tm): A PowerPC TM-aware signal fuzzer.
*
* This is a new selftest that raises SIGUSR1 signals and handles it in a set
* of different ways, trying to create different scenario for testing
* purpose.
*
* This test works raising a signal and calling sigreturn interleaved with
* TM operations, as starting, suspending and terminating a transaction. The
* test depends on random numbers, and, based on them, it sets different TM
* states.
*
* Other than that, the test fills out the user context struct that is passed
* to the sigreturn system call with random data, in order to make sure that
* the signal handler syscall can handle different and invalid states
* properly.
*
* This selftest has command line parameters to control what kind of tests the
* user wants to run, as for example, if a transaction should be started prior
* to signal being raised, or, after the signal being raised and before the
* sigreturn. If no parameter is given, the default is enabling all options.
*
* This test does not check if the user context is being read and set
* properly by the kernel. Its purpose, at this time, is basically
* guaranteeing that the kernel does not crash on invalid scenarios.
*/
#include <stdio.h>
#include <limits.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <ucontext.h>
#include <sys/mman.h>
#include <pthread.h>
#include "utils.h"
/* Selftest defaults */
#define COUNT_MAX 600 /* Number of interactions */
#define THREADS 16 /* Number of threads */
/* Arguments options */
#define ARG_MESS_WITH_TM_AT 0x1
#define ARG_MESS_WITH_TM_BEFORE 0x2
#define ARG_MESS_WITH_MSR_AT 0x4
#define ARG_FOREVER 0x10
#define ARG_COMPLETE (ARG_MESS_WITH_TM_AT | \
ARG_MESS_WITH_TM_BEFORE | \
ARG_MESS_WITH_MSR_AT)
static int args;
static int nthread = THREADS;
static int count_max = COUNT_MAX;
/* checkpoint context */
static ucontext_t *tmp_uc;
/* Return true with 1/x probability */
static int one_in_chance(int x)
{
return rand() % x == 0;
}
/* Change TM states */
static void mess_with_tm(void)
{
/* Starts a transaction 33% of the time */
if (one_in_chance(3)) {
asm ("tbegin. ;"
"beq 8 ;");
/* And suspended half of them */
if (one_in_chance(2))
asm("tsuspend. ;");
}
/* Call 'tend' in 5% of the runs */
if (one_in_chance(20))
asm("tend. ;");
}
/* Signal handler that will be invoked with raise() */
static void trap_signal_handler(int signo, siginfo_t *si, void *uc)
{
ucontext_t *ucp = uc;
ucp->uc_link = tmp_uc;
/*
* Set uc_link in three possible ways:
* - Setting a single 'int' in the whole chunk
* - Cloning ucp into uc_link
* - Allocating a new memory chunk
*/
if (one_in_chance(3)) {
memset(ucp->uc_link, rand(), sizeof(ucontext_t));
} else if (one_in_chance(2)) {
memcpy(ucp->uc_link, uc, sizeof(ucontext_t));
} else if (one_in_chance(2)) {
if (tmp_uc) {
free(tmp_uc);
tmp_uc = NULL;
}
tmp_uc = malloc(sizeof(ucontext_t));
ucp->uc_link = tmp_uc;
/* Trying to cause a major page fault at Kernel level */
madvise(ucp->uc_link, sizeof(ucontext_t), MADV_DONTNEED);
}
if (args & ARG_MESS_WITH_MSR_AT) {
/* Changing the checkpointed registers */
if (one_in_chance(4)) {
ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |= MSR_TS_S;
} else {
if (one_in_chance(2)) {
ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |=
MSR_TS_T;
} else if (one_in_chance(2)) {
ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |=
MSR_TS_T | MSR_TS_S;
}
}
/* Checking the current register context */
if (one_in_chance(2)) {
ucp->uc_mcontext.gp_regs[PT_MSR] |= MSR_TS_S;
} else if (one_in_chance(2)) {
if (one_in_chance(2))
ucp->uc_mcontext.gp_regs[PT_MSR] |=
MSR_TS_T;
else if (one_in_chance(2))
ucp->uc_mcontext.gp_regs[PT_MSR] |=
MSR_TS_T | MSR_TS_S;
}
}
if (one_in_chance(20)) {
/* Nested transaction start */
if (one_in_chance(5))
mess_with_tm();
/* Return without changing any other context info */
return;
}
if (one_in_chance(10))
ucp->uc_mcontext.gp_regs[PT_MSR] = random();
if (one_in_chance(10))
ucp->uc_mcontext.gp_regs[PT_NIP] = random();
if (one_in_chance(10))
ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] = random();
if (one_in_chance(10))
ucp->uc_link->uc_mcontext.gp_regs[PT_NIP] = random();
ucp->uc_mcontext.gp_regs[PT_TRAP] = random();
ucp->uc_mcontext.gp_regs[PT_DSISR] = random();
ucp->uc_mcontext.gp_regs[PT_DAR] = random();
ucp->uc_mcontext.gp_regs[PT_ORIG_R3] = random();
ucp->uc_mcontext.gp_regs[PT_XER] = random();
ucp->uc_mcontext.gp_regs[PT_RESULT] = random();
ucp->uc_mcontext.gp_regs[PT_SOFTE] = random();
ucp->uc_mcontext.gp_regs[PT_DSCR] = random();
ucp->uc_mcontext.gp_regs[PT_CTR] = random();
ucp->uc_mcontext.gp_regs[PT_LNK] = random();
ucp->uc_mcontext.gp_regs[PT_CCR] = random();
ucp->uc_mcontext.gp_regs[PT_REGS_COUNT] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_TRAP] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_DSISR] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_DAR] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_ORIG_R3] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_XER] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_RESULT] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_SOFTE] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_DSCR] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_CTR] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_LNK] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_CCR] = random();
ucp->uc_link->uc_mcontext.gp_regs[PT_REGS_COUNT] = random();
if (args & ARG_MESS_WITH_TM_BEFORE) {
if (one_in_chance(2))
mess_with_tm();
}
}
static void seg_signal_handler(int signo, siginfo_t *si, void *uc)
{
/* Clear exit for process that segfaults */
exit(0);
}
static void *sigfuz_test(void *thrid)
{
struct sigaction trap_sa, seg_sa;
int ret, i = 0;
pid_t t;
tmp_uc = malloc(sizeof(ucontext_t));
/* Main signal handler */
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
/* SIGSEGV signal handler */
seg_sa.sa_flags = SA_SIGINFO;
seg_sa.sa_sigaction = seg_signal_handler;
/* The signal handler will enable MSR_TS */
sigaction(SIGUSR1, &trap_sa, NULL);
/* If it does not crash, it will segfault, avoid it to retest */
sigaction(SIGSEGV, &seg_sa, NULL);
while (i < count_max) {
t = fork();
if (t == 0) {
/* Once seed per process */
srand(time(NULL) + getpid());
if (args & ARG_MESS_WITH_TM_AT) {
if (one_in_chance(2))
mess_with_tm();
}
raise(SIGUSR1);
exit(0);
} else {
waitpid(t, &ret, 0);
}
if (!(args & ARG_FOREVER))
i++;
}
/* If not freed already, free now */
if (tmp_uc) {
free(tmp_uc);
tmp_uc = NULL;
}
return NULL;
}
static int signal_fuzzer(void)
{
int t, rc;
pthread_t *threads;
threads = malloc(nthread * sizeof(pthread_t));
for (t = 0; t < nthread; t++) {
rc = pthread_create(&threads[t], NULL, sigfuz_test,
(void *)&t);
if (rc)
perror("Thread creation error\n");
}
for (t = 0; t < nthread; t++) {
rc = pthread_join(threads[t], NULL);
if (rc)
perror("Thread join error\n");
}
free(threads);
return EXIT_SUCCESS;
}
static void show_help(char *name)
{
printf("%s: Sigfuzzer for powerpc\n", name);
printf("Usage:\n");
printf("\t-b\t Mess with TM before raising a SIGUSR1 signal\n");
printf("\t-a\t Mess with TM after raising a SIGUSR1 signal\n");
printf("\t-m\t Mess with MSR[TS] bits at mcontext\n");
printf("\t-x\t Mess with everything above\n");
printf("\t-f\t Run forever (Press ^C to Quit)\n");
printf("\t-i\t Amount of interactions. (Default = %d)\n", COUNT_MAX);
printf("\t-t\t Amount of threads. (Default = %d)\n", THREADS);
exit(-1);
}
int main(int argc, char **argv)
{
int opt;
while ((opt = getopt(argc, argv, "bamxt:fi:h")) != -1) {
if (opt == 'b') {
printf("Mess with TM before signal\n");
args |= ARG_MESS_WITH_TM_BEFORE;
} else if (opt == 'a') {
printf("Mess with TM at signal handler\n");
args |= ARG_MESS_WITH_TM_AT;
} else if (opt == 'm') {
printf("Mess with MSR[TS] bits in mcontext\n");
args |= ARG_MESS_WITH_MSR_AT;
} else if (opt == 'x') {
printf("Running with all options enabled\n");
args |= ARG_COMPLETE;
} else if (opt == 't') {
nthread = atoi(optarg);
printf("Threads = %d\n", nthread);
} else if (opt == 'f') {
args |= ARG_FOREVER;
printf("Press ^C to stop\n");
test_harness_set_timeout(-1);
} else if (opt == 'i') {
count_max = atoi(optarg);
printf("Running for %d interactions\n", count_max);
} else if (opt == 'h') {
show_help(argv[0]);
}
}
/* Default test suite */
if (!args)
args = ARG_COMPLETE;
test_harness(signal_fuzzer, "signal_fuzzer");
}
| linux-master | tools/testing/selftests/powerpc/signal/sigfuz.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
* Sending one self a signal should always get delivered.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <altivec.h>
#include "utils.h"
#define MAX_ATTEMPT 500000
#define TIMEOUT 5
extern long signal_self(pid_t pid, int sig);
static sig_atomic_t signaled;
static sig_atomic_t fail;
static void signal_handler(int sig)
{
if (sig == SIGUSR1)
signaled = 1;
else
fail = 1;
}
static int test_signal()
{
int i;
struct sigaction act;
pid_t ppid = getpid();
pid_t pid;
act.sa_handler = signal_handler;
act.sa_flags = 0;
sigemptyset(&act.sa_mask);
if (sigaction(SIGUSR1, &act, NULL) < 0) {
perror("sigaction SIGUSR1");
exit(1);
}
if (sigaction(SIGALRM, &act, NULL) < 0) {
perror("sigaction SIGALRM");
exit(1);
}
/* Don't do this for MAX_ATTEMPT, its simply too long */
for(i = 0; i < 1000; i++) {
pid = fork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid == 0) {
signal_self(ppid, SIGUSR1);
exit(1);
} else {
alarm(0); /* Disable any pending */
alarm(2);
while (!signaled && !fail)
asm volatile("": : :"memory");
if (!signaled) {
fprintf(stderr, "Didn't get signal from child\n");
FAIL_IF(1); /* For the line number */
}
/* Otherwise we'll loop too fast and fork() will eventually fail */
waitpid(pid, NULL, 0);
}
}
for (i = 0; i < MAX_ATTEMPT; i++) {
long rc;
alarm(0); /* Disable any pending */
signaled = 0;
alarm(TIMEOUT);
rc = signal_self(ppid, SIGUSR1);
if (rc) {
fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx",
i, fail, rc);
FAIL_IF(1); /* For the line number */
}
while (!signaled && !fail)
asm volatile("": : :"memory");
if (!signaled) {
fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx",
i, fail, rc);
FAIL_IF(1); /* For the line number */
}
}
return 0;
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(test_signal, "signal");
}
| linux-master | tools/testing/selftests/powerpc/signal/signal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test that a syscall does not get restarted twice, handled by trap_norestart()
*
* Based on Al's description, and a test for the bug fixed in this commit:
*
* commit 9a81c16b527528ad307843be5571111aa8d35a80
* Author: Al Viro <[email protected]>
* Date: Mon Sep 20 21:48:57 2010 +0100
*
* powerpc: fix double syscall restarts
*
* Make sigreturn zero regs->trap, make do_signal() do the same on all
* paths. As it is, signal interrupting e.g. read() from fd 512 (==
* ERESTARTSYS) with another signal getting unblocked when the first
* handler finishes will lead to restart one insn earlier than it ought
* to. Same for multiple signals with in-kernel handlers interrupting
* that sucker at the same time. Same for multiple signals of any kind
* interrupting that sucker on 64bit...
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <signal.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "utils.h"
static void SIGUSR1_handler(int sig)
{
kill(getpid(), SIGUSR2);
/*
* SIGUSR2 is blocked until the handler exits, at which point it will
* be raised again and think there is a restart to be done because the
* pending restarted syscall has 512 (ERESTARTSYS) in r3. The second
* restart will retreat NIP another 4 bytes to fail case branch.
*/
}
static void SIGUSR2_handler(int sig)
{
}
static ssize_t raw_read(int fd, void *buf, size_t count)
{
register long nr asm("r0") = __NR_read;
register long _fd asm("r3") = fd;
register void *_buf asm("r4") = buf;
register size_t _count asm("r5") = count;
asm volatile(
" b 0f \n"
" b 1f \n"
" 0: sc 0 \n"
" bns 2f \n"
" neg %0,%0 \n"
" b 2f \n"
" 1: \n"
" li %0,%4 \n"
" 2: \n"
: "+r"(_fd), "+r"(nr), "+r"(_buf), "+r"(_count)
: "i"(-ENOANO)
: "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "cr0");
if (_fd < 0) {
errno = -_fd;
_fd = -1;
}
return _fd;
}
#define DATA "test 123"
#define DLEN (strlen(DATA)+1)
int test_restart(void)
{
int pipefd[2];
pid_t pid;
char buf[512];
if (pipe(pipefd) == -1) {
perror("pipe");
exit(EXIT_FAILURE);
}
pid = fork();
if (pid == -1) {
perror("fork");
exit(EXIT_FAILURE);
}
if (pid == 0) { /* Child reads from pipe */
struct sigaction act;
int fd;
memset(&act, 0, sizeof(act));
sigaddset(&act.sa_mask, SIGUSR2);
act.sa_handler = SIGUSR1_handler;
act.sa_flags = SA_RESTART;
if (sigaction(SIGUSR1, &act, NULL) == -1) {
perror("sigaction");
exit(EXIT_FAILURE);
}
memset(&act, 0, sizeof(act));
act.sa_handler = SIGUSR2_handler;
act.sa_flags = SA_RESTART;
if (sigaction(SIGUSR2, &act, NULL) == -1) {
perror("sigaction");
exit(EXIT_FAILURE);
}
/* Let's get ERESTARTSYS into r3 */
while ((fd = dup(pipefd[0])) != 512) {
if (fd == -1) {
perror("dup");
exit(EXIT_FAILURE);
}
}
if (raw_read(fd, buf, 512) == -1) {
if (errno == ENOANO) {
fprintf(stderr, "Double restart moved restart before sc instruction.\n");
_exit(EXIT_FAILURE);
}
perror("read");
exit(EXIT_FAILURE);
}
if (strncmp(buf, DATA, DLEN)) {
fprintf(stderr, "bad test string %s\n", buf);
exit(EXIT_FAILURE);
}
return 0;
} else {
int wstatus;
usleep(100000); /* Hack to get reader waiting */
kill(pid, SIGUSR1);
usleep(100000);
if (write(pipefd[1], DATA, DLEN) != DLEN) {
perror("write");
exit(EXIT_FAILURE);
}
close(pipefd[0]);
close(pipefd[1]);
if (wait(&wstatus) == -1) {
perror("wait");
exit(EXIT_FAILURE);
}
if (!WIFEXITED(wstatus)) {
fprintf(stderr, "child exited abnormally\n");
exit(EXIT_FAILURE);
}
FAIL_IF(WEXITSTATUS(wstatus) != EXIT_SUCCESS);
return 0;
}
}
int main(void)
{
test_harness_set_timeout(10);
return test_harness(test_restart, "sig sys restart");
}
| linux-master | tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that we can take signals with and without the VDSO mapped, which trigger
* different paths in the signal handling code.
*
* See handle_rt_signal64() and setup_trampoline() in signal_64.c
*/
#define _GNU_SOURCE
#include <errno.h>
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
// Ensure assert() is not compiled out
#undef NDEBUG
#include <assert.h>
#include "utils.h"
static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high)
{
unsigned long start, end;
static char buf[4096];
char name[128];
FILE *f;
int rc = -1;
f = fopen("/proc/self/maps", "r");
if (!f) {
perror("fopen");
return -1;
}
while (fgets(buf, sizeof(buf), f)) {
rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n",
&start, &end, name);
if (rc == 2)
continue;
if (rc != 3) {
printf("sscanf errored\n");
rc = -1;
break;
}
if (strstr(name, needle)) {
*low = start;
*high = end - 1;
rc = 0;
break;
}
}
fclose(f);
return rc;
}
static volatile sig_atomic_t took_signal = 0;
static void sigusr1_handler(int sig)
{
took_signal++;
}
int test_sigreturn_vdso(void)
{
unsigned long low, high, size;
struct sigaction act;
char *p;
act.sa_handler = sigusr1_handler;
act.sa_flags = 0;
sigemptyset(&act.sa_mask);
assert(sigaction(SIGUSR1, &act, NULL) == 0);
// Confirm the VDSO is mapped, and work out where it is
assert(search_proc_maps("[vdso]", &low, &high) == 0);
size = high - low + 1;
printf("VDSO is at 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
kill(getpid(), SIGUSR1);
assert(took_signal == 1);
printf("Signal delivered OK with VDSO mapped\n");
// Remap the VDSO somewhere else
p = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
assert(p != MAP_FAILED);
assert(mremap((void *)low, size, size, MREMAP_MAYMOVE|MREMAP_FIXED, p) != MAP_FAILED);
assert(search_proc_maps("[vdso]", &low, &high) == 0);
size = high - low + 1;
printf("VDSO moved to 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
kill(getpid(), SIGUSR1);
assert(took_signal == 2);
printf("Signal delivered OK with VDSO moved\n");
assert(munmap((void *)low, size) == 0);
printf("Unmapped VDSO\n");
// Confirm the VDSO is not mapped anymore
assert(search_proc_maps("[vdso]", &low, &high) != 0);
// Make the stack executable
assert(search_proc_maps("[stack]", &low, &high) == 0);
size = high - low + 1;
mprotect((void *)low, size, PROT_READ|PROT_WRITE|PROT_EXEC);
printf("Remapped the stack executable\n");
kill(getpid(), SIGUSR1);
assert(took_signal == 3);
printf("Signal delivered OK with VDSO unmapped\n");
return 0;
}
int main(void)
{
return test_harness(test_sigreturn_vdso, "sigreturn_vdso");
}
| linux-master | tools/testing/selftests/powerpc/signal/sigreturn_vdso.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2020 IBM Corp.
*
* Author: Bulent Abali <[email protected]>
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <endian.h>
#include <bits/endian.h>
#include <sys/ioctl.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include "vas-api.h"
#include "nx.h"
#include "copy-paste.h"
#include "nxu.h"
#include "nx_dbg.h"
#include <sys/platform/ppc.h>
#define barrier()
#define hwsync() ({ asm volatile("sync" ::: "memory"); })
#ifndef NX_NO_CPU_PRI
#define cpu_pri_default() ({ asm volatile ("or 2, 2, 2"); })
#define cpu_pri_low() ({ asm volatile ("or 31, 31, 31"); })
#else
#define cpu_pri_default()
#define cpu_pri_low()
#endif
void *nx_fault_storage_address;
struct nx_handle {
int fd;
int function;
void *paste_addr;
};
static int open_device_nodes(char *devname, int pri, struct nx_handle *handle)
{
int rc, fd;
void *addr;
struct vas_tx_win_open_attr txattr;
fd = open(devname, O_RDWR);
if (fd < 0) {
fprintf(stderr, " open device name %s\n", devname);
return -errno;
}
memset(&txattr, 0, sizeof(txattr));
txattr.version = 1;
txattr.vas_id = pri;
rc = ioctl(fd, VAS_TX_WIN_OPEN, (unsigned long)&txattr);
if (rc < 0) {
fprintf(stderr, "ioctl() n %d, error %d\n", rc, errno);
rc = -errno;
goto out;
}
addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0ULL);
if (addr == MAP_FAILED) {
fprintf(stderr, "mmap() failed, errno %d\n", errno);
rc = -errno;
goto out;
}
handle->fd = fd;
handle->paste_addr = (void *)((char *)addr + 0x400);
rc = 0;
out:
close(fd);
return rc;
}
void *nx_function_begin(int function, int pri)
{
int rc;
char *devname = "/dev/crypto/nx-gzip";
struct nx_handle *nxhandle;
if (function != NX_FUNC_COMP_GZIP) {
errno = EINVAL;
fprintf(stderr, " NX_FUNC_COMP_GZIP not found\n");
return NULL;
}
nxhandle = malloc(sizeof(*nxhandle));
if (!nxhandle) {
errno = ENOMEM;
fprintf(stderr, " No memory\n");
return NULL;
}
nxhandle->function = function;
rc = open_device_nodes(devname, pri, nxhandle);
if (rc < 0) {
errno = -rc;
fprintf(stderr, " open_device_nodes failed\n");
return NULL;
}
return nxhandle;
}
int nx_function_end(void *handle)
{
int rc = 0;
struct nx_handle *nxhandle = handle;
rc = munmap(nxhandle->paste_addr - 0x400, 4096);
if (rc < 0) {
fprintf(stderr, "munmap() failed, errno %d\n", errno);
return rc;
}
close(nxhandle->fd);
free(nxhandle);
return rc;
}
static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp)
{
long poll = 0;
uint64_t t;
/* Save power and let other threads use the h/w. top may show
* 100% but only because OS doesn't know we slowed the this
* h/w thread while polling. We're letting other threads have
* higher throughput on the core.
*/
cpu_pri_low();
#define CSB_MAX_POLL 200000000UL
#define USLEEP_TH 300000UL
t = __ppc_get_timebase();
while (getnn(cmdp->crb.csb, csb_v) == 0) {
++poll;
hwsync();
cpu_pri_low();
/* usleep(0) takes around 29000 ticks ~60 us.
* 300000 is spinning for about 600 us then
* start sleeping.
*/
if ((__ppc_get_timebase() - t) > USLEEP_TH) {
cpu_pri_default();
usleep(1);
}
if (poll > CSB_MAX_POLL)
break;
/* Fault address from signal handler */
if (nx_fault_storage_address) {
cpu_pri_default();
return -EAGAIN;
}
}
cpu_pri_default();
/* hw has updated csb and output buffer */
hwsync();
/* Check CSB flags. */
if (getnn(cmdp->crb.csb, csb_v) == 0) {
fprintf(stderr, "CSB still not valid after %d polls.\n",
(int) poll);
prt_err("CSB still not valid after %d polls, giving up.\n",
(int) poll);
return -ETIMEDOUT;
}
return 0;
}
static int nxu_run_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
{
int i, ret, retries;
struct nx_handle *nxhandle = handle;
assert(handle != NULL);
i = 0;
retries = 5000;
while (i++ < retries) {
hwsync();
vas_copy(&cmdp->crb, 0);
ret = vas_paste(nxhandle->paste_addr, 0);
hwsync();
NXPRT(fprintf(stderr, "Paste attempt %d/%d returns 0x%x\n",
i, retries, ret));
if ((ret == 2) || (ret == 3)) {
ret = nx_wait_for_csb(cmdp);
if (!ret) {
goto out;
} else if (ret == -EAGAIN) {
long x;
prt_err("Touching address %p, 0x%lx\n",
nx_fault_storage_address,
*(long *) nx_fault_storage_address);
x = *(long *) nx_fault_storage_address;
*(long *) nx_fault_storage_address = x;
nx_fault_storage_address = 0;
continue;
} else {
prt_err("wait_for_csb() returns %d\n", ret);
break;
}
} else {
if (i < 10) {
/* spin for few ticks */
#define SPIN_TH 500UL
uint64_t fail_spin;
fail_spin = __ppc_get_timebase();
while ((__ppc_get_timebase() - fail_spin) <
SPIN_TH)
;
} else {
/* sleep */
unsigned int pr = 0;
if (pr++ % 100 == 0) {
prt_err("Paste attempt %d/", i);
prt_err("%d, failed pid= %d\n", retries,
getpid());
}
usleep(1);
}
continue;
}
}
out:
cpu_pri_default();
return ret;
}
int nxu_submit_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
{
int cc;
cc = nxu_run_job(cmdp, handle);
if (!cc)
cc = getnn(cmdp->crb.csb, csb_cc); /* CC Table 6-8 */
return cc;
}
void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx)
{
fprintf(stderr, "%d: Got signal %d si_code %d, si_addr %p\n", getpid(),
sig, info->si_code, info->si_addr);
nx_fault_storage_address = info->si_addr;
}
/*
* Fault in pages prior to NX job submission. wr=1 may be required to
* touch writeable pages. System zero pages do not fault-in the page as
* intended. Typically set wr=1 for NX target pages and set wr=0 for NX
* source pages.
*/
int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr)
{
char *begin = buf;
char *end = (char *) buf + buf_len - 1;
volatile char t;
assert(buf_len >= 0 && !!buf);
NXPRT(fprintf(stderr, "touch %p %p len 0x%lx wr=%d\n", buf,
(buf + buf_len), buf_len, wr));
if (buf_len <= 0 || buf == NULL)
return -1;
do {
t = *begin;
if (wr)
*begin = t;
begin = begin + page_len;
} while (begin < end);
/* When buf_sz is small or buf tail is in another page */
t = *end;
if (wr)
*end = t;
return 0;
}
| linux-master | tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* P9 gunzip sample code for demonstrating the P9 NX hardware
* interface. Not intended for productive uses or for performance or
* compression ratio measurements. Note also that /dev/crypto/gzip,
* VAS and skiboot support are required
*
* Copyright 2020 IBM Corp.
*
* Author: Bulent Abali <[email protected]>
*
* https://github.com/libnxz/power-gzip for zlib api and other utils
* Definitions of acronyms used here. See
* P9 NX Gzip Accelerator User's Manual for details:
* https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
*
* adler/crc: 32 bit checksums appended to stream tail
* ce: completion extension
* cpb: coprocessor parameter block (metadata)
* crb: coprocessor request block (command)
* csb: coprocessor status block (status)
* dht: dynamic huffman table
* dde: data descriptor element (address, length)
* ddl: list of ddes
* dh/fh: dynamic and fixed huffman types
* fc: coprocessor function code
* histlen: history/dictionary length
* history: sliding window of up to 32KB of data
* lzcount: Deflate LZ symbol counts
* rembytecnt: remaining byte count
* sfbt: source final block type; last block's type during decomp
* spbc: source processed byte count
* subc: source unprocessed bit count
* tebc: target ending bit count; valid bits in the last byte
* tpbc: target processed byte count
* vas: virtual accelerator switch; the user mode interface
*/
#define _ISOC11_SOURCE // For aligned_alloc()
#define _DEFAULT_SOURCE // For endian.h
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <endian.h>
#include <bits/endian.h>
#include <sys/ioctl.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include "nxu.h"
#include "nx.h"
#include "crb.h"
int nx_dbg;
FILE *nx_gzip_log;
#define NX_MIN(X, Y) (((X) < (Y))?(X):(Y))
#define NX_MAX(X, Y) (((X) > (Y))?(X):(Y))
#define GETINPC(X) fgetc(X)
#define FNAME_MAX 1024
/* fifo queue management */
#define fifo_used_bytes(used) (used)
#define fifo_free_bytes(used, len) ((len)-(used))
/* amount of free bytes in the first and last parts */
#define fifo_free_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
? (len)-((cur)+(used)) : 0)
#define fifo_free_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
? (cur) : (len)-(used))
/* amount of used bytes in the first and last parts */
#define fifo_used_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
? (used) : (len)-(cur))
#define fifo_used_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
? 0 : ((used)+(cur))-(len))
/* first and last free parts start here */
#define fifo_free_first_offset(cur, used) ((cur)+(used))
#define fifo_free_last_offset(cur, used, len) \
fifo_used_last_bytes(cur, used, len)
/* first and last used parts start here */
#define fifo_used_first_offset(cur) (cur)
#define fifo_used_last_offset(cur) (0)
const int fifo_in_len = 1<<24;
const int fifo_out_len = 1<<24;
const int page_sz = 1<<16;
const int line_sz = 1<<7;
const int window_max = 1<<15;
/*
* Adds an (address, len) pair to the list of ddes (ddl) and updates
* the base dde. ddl[0] is the only dde in a direct dde which
* contains a single (addr,len) pair. For more pairs, ddl[0] becomes
* the indirect (base) dde that points to a list of direct ddes.
* See Section 6.4 of the NX-gzip user manual for DDE description.
* Addr=NULL, len=0 clears the ddl[0]. Returns the total number of
* bytes in ddl. Caller is responsible for allocting the array of
* nx_dde_t *ddl. If N addresses are required in the scatter-gather
* list, the ddl array must have N+1 entries minimum.
*/
static inline uint32_t nx_append_dde(struct nx_dde_t *ddl, void *addr,
uint32_t len)
{
uint32_t ddecnt;
uint32_t bytes;
if (addr == NULL && len == 0) {
clearp_dde(ddl);
return 0;
}
NXPRT(fprintf(stderr, "%d: %s addr %p len %x\n", __LINE__, addr,
__func__, len));
/* Number of ddes in the dde list ; == 0 when it is a direct dde */
ddecnt = getpnn(ddl, dde_count);
bytes = getp32(ddl, ddebc);
if (ddecnt == 0 && bytes == 0) {
/* First dde is unused; make it a direct dde */
bytes = len;
putp32(ddl, ddebc, bytes);
putp64(ddl, ddead, (uint64_t) addr);
} else if (ddecnt == 0) {
/* Converting direct to indirect dde
* ddl[0] becomes head dde of ddl
* copy direct to indirect first.
*/
ddl[1] = ddl[0];
/* Add the new dde next */
clear_dde(ddl[2]);
put32(ddl[2], ddebc, len);
put64(ddl[2], ddead, (uint64_t) addr);
/* Ddl head points to 2 direct ddes */
ddecnt = 2;
putpnn(ddl, dde_count, ddecnt);
bytes = bytes + len;
putp32(ddl, ddebc, bytes);
/* Pointer to the first direct dde */
putp64(ddl, ddead, (uint64_t) &ddl[1]);
} else {
/* Append a dde to an existing indirect ddl */
++ddecnt;
clear_dde(ddl[ddecnt]);
put64(ddl[ddecnt], ddead, (uint64_t) addr);
put32(ddl[ddecnt], ddebc, len);
putpnn(ddl, dde_count, ddecnt);
bytes = bytes + len;
putp32(ddl, ddebc, bytes); /* byte sum of all dde */
}
return bytes;
}
/*
* Touch specified number of pages represented in number bytes
* beginning from the first buffer in a dde list.
* Do not touch the pages past buf_sz-th byte's page.
*
* Set buf_sz = 0 to touch all pages described by the ddep.
*/
static int nx_touch_pages_dde(struct nx_dde_t *ddep, long buf_sz, long page_sz,
int wr)
{
uint32_t indirect_count;
uint32_t buf_len;
long total;
uint64_t buf_addr;
struct nx_dde_t *dde_list;
int i;
assert(!!ddep);
indirect_count = getpnn(ddep, dde_count);
NXPRT(fprintf(stderr, "%s dde_count %d request len ", __func__,
indirect_count));
NXPRT(fprintf(stderr, "0x%lx\n", buf_sz));
if (indirect_count == 0) {
/* Direct dde */
buf_len = getp32(ddep, ddebc);
buf_addr = getp64(ddep, ddead);
NXPRT(fprintf(stderr, "touch direct ddebc 0x%x ddead %p\n",
buf_len, (void *)buf_addr));
if (buf_sz == 0)
nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
else
nxu_touch_pages((void *)buf_addr, NX_MIN(buf_len,
buf_sz), page_sz, wr);
return ERR_NX_OK;
}
/* Indirect dde */
if (indirect_count > MAX_DDE_COUNT)
return ERR_NX_EXCESSIVE_DDE;
/* First address of the list */
dde_list = (struct nx_dde_t *) getp64(ddep, ddead);
if (buf_sz == 0)
buf_sz = getp32(ddep, ddebc);
total = 0;
for (i = 0; i < indirect_count; i++) {
buf_len = get32(dde_list[i], ddebc);
buf_addr = get64(dde_list[i], ddead);
total += buf_len;
NXPRT(fprintf(stderr, "touch loop len 0x%x ddead %p total ",
buf_len, (void *)buf_addr));
NXPRT(fprintf(stderr, "0x%lx\n", total));
/* Touching fewer pages than encoded in the ddebc */
if (total > buf_sz) {
buf_len = NX_MIN(buf_len, total - buf_sz);
nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
NXPRT(fprintf(stderr, "touch loop break len 0x%x ",
buf_len));
NXPRT(fprintf(stderr, "ddead %p\n", (void *)buf_addr));
break;
}
nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
}
return ERR_NX_OK;
}
/*
* Src and dst buffers are supplied in scatter gather lists.
* NX function code and other parameters supplied in cmdp.
*/
static int nx_submit_job(struct nx_dde_t *src, struct nx_dde_t *dst,
struct nx_gzip_crb_cpb_t *cmdp, void *handle)
{
uint64_t csbaddr;
memset((void *)&cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
cmdp->crb.source_dde = *src;
cmdp->crb.target_dde = *dst;
/* Status, output byte count in tpbc */
csbaddr = ((uint64_t) &cmdp->crb.csb) & csb_address_mask;
put64(cmdp->crb, csb_address, csbaddr);
/* NX reports input bytes in spbc; cleared */
cmdp->cpb.out_spbc_comp_wrap = 0;
cmdp->cpb.out_spbc_comp_with_count = 0;
cmdp->cpb.out_spbc_decomp = 0;
/* Clear output */
put32(cmdp->cpb, out_crc, INIT_CRC);
put32(cmdp->cpb, out_adler, INIT_ADLER);
/* Submit the crb, the job descriptor, to the accelerator. */
return nxu_submit_job(cmdp, handle);
}
int decompress_file(int argc, char **argv, void *devhandle)
{
FILE *inpf = NULL;
FILE *outf = NULL;
int c, expect, i, cc, rc = 0;
char gzfname[FNAME_MAX];
/* Queuing, file ops, byte counting */
char *fifo_in, *fifo_out;
int used_in, cur_in, used_out, cur_out, read_sz, n;
int first_free, last_free, first_used, last_used;
int first_offset, last_offset;
int write_sz, free_space, source_sz;
int source_sz_estimate, target_sz_estimate;
uint64_t last_comp_ratio = 0; /* 1000 max */
uint64_t total_out = 0;
int is_final, is_eof;
/* nx hardware */
int sfbt, subc, spbc, tpbc, nx_ce, fc, resuming = 0;
int history_len = 0;
struct nx_gzip_crb_cpb_t cmd, *cmdp;
struct nx_dde_t *ddl_in;
struct nx_dde_t dde_in[6] __aligned(128);
struct nx_dde_t *ddl_out;
struct nx_dde_t dde_out[6] __aligned(128);
int pgfault_retries;
/* when using mmap'ed files */
off_t input_file_offset;
if (argc > 2) {
fprintf(stderr, "usage: %s <fname> or stdin\n", argv[0]);
fprintf(stderr, " writes to stdout or <fname>.nx.gunzip\n");
return -1;
}
if (argc == 1) {
inpf = stdin;
outf = stdout;
} else if (argc == 2) {
char w[1024];
char *wp;
inpf = fopen(argv[1], "r");
if (inpf == NULL) {
perror(argv[1]);
return -1;
}
/* Make a new file name to write to. Ignoring '.gz' */
wp = (NULL != (wp = strrchr(argv[1], '/'))) ? (wp+1) : argv[1];
strcpy(w, wp);
strcat(w, ".nx.gunzip");
outf = fopen(w, "w");
if (outf == NULL) {
perror(w);
return -1;
}
}
/* Decode the gzip header */
c = GETINPC(inpf); expect = 0x1f; /* ID1 */
if (c != expect)
goto err1;
c = GETINPC(inpf); expect = 0x8b; /* ID2 */
if (c != expect)
goto err1;
c = GETINPC(inpf); expect = 0x08; /* CM */
if (c != expect)
goto err1;
int flg = GETINPC(inpf); /* FLG */
if (flg & 0xE0 || flg & 0x4 || flg == EOF)
goto err2;
fprintf(stderr, "gzHeader FLG %x\n", flg);
/* Read 6 bytes; ignoring the MTIME, XFL, OS fields in this
* sample code.
*/
for (i = 0; i < 6; i++) {
char tmp[10];
tmp[i] = GETINPC(inpf);
if (tmp[i] == EOF)
goto err3;
fprintf(stderr, "%02x ", tmp[i]);
if (i == 5)
fprintf(stderr, "\n");
}
fprintf(stderr, "gzHeader MTIME, XFL, OS ignored\n");
/* FNAME */
if (flg & 0x8) {
int k = 0;
do {
c = GETINPC(inpf);
if (c == EOF || k >= FNAME_MAX)
goto err3;
gzfname[k++] = c;
} while (c);
fprintf(stderr, "gzHeader FNAME: %s\n", gzfname);
}
/* FHCRC */
if (flg & 0x2) {
c = GETINPC(inpf);
if (c == EOF)
goto err3;
c = GETINPC(inpf);
if (c == EOF)
goto err3;
fprintf(stderr, "gzHeader FHCRC: ignored\n");
}
used_in = cur_in = used_out = cur_out = 0;
is_final = is_eof = 0;
/* Allocate one page larger to prevent page faults due to NX
* overfetching.
* Either do this (char*)(uintptr_t)aligned_alloc or use
* -std=c11 flag to make the int-to-pointer warning go away.
*/
assert((fifo_in = (char *)(uintptr_t)aligned_alloc(line_sz,
fifo_in_len + page_sz)) != NULL);
assert((fifo_out = (char *)(uintptr_t)aligned_alloc(line_sz,
fifo_out_len + page_sz + line_sz)) != NULL);
/* Leave unused space due to history rounding rules */
fifo_out = fifo_out + line_sz;
nxu_touch_pages(fifo_out, fifo_out_len, page_sz, 1);
ddl_in = &dde_in[0];
ddl_out = &dde_out[0];
cmdp = &cmd;
memset(&cmdp->crb, 0, sizeof(cmdp->crb));
read_state:
/* Read from .gz file */
NXPRT(fprintf(stderr, "read_state:\n"));
if (is_eof != 0)
goto write_state;
/* We read in to fifo_in in two steps: first: read in to from
* cur_in to the end of the buffer. last: if free space wrapped
* around, read from fifo_in offset 0 to offset cur_in.
*/
/* Reset fifo head to reduce unnecessary wrap arounds */
cur_in = (used_in == 0) ? 0 : cur_in;
/* Free space total is reduced by a gap */
free_space = NX_MAX(0, fifo_free_bytes(used_in, fifo_in_len)
- line_sz);
/* Free space may wrap around as first and last */
first_free = fifo_free_first_bytes(cur_in, used_in, fifo_in_len);
last_free = fifo_free_last_bytes(cur_in, used_in, fifo_in_len);
/* Start offsets of the free memory */
first_offset = fifo_free_first_offset(cur_in, used_in);
last_offset = fifo_free_last_offset(cur_in, used_in, fifo_in_len);
/* Reduce read_sz because of the line_sz gap */
read_sz = NX_MIN(free_space, first_free);
n = 0;
if (read_sz > 0) {
/* Read in to offset cur_in + used_in */
n = fread(fifo_in + first_offset, 1, read_sz, inpf);
used_in = used_in + n;
free_space = free_space - n;
assert(n <= read_sz);
if (n != read_sz) {
/* Either EOF or error; exit the read loop */
is_eof = 1;
goto write_state;
}
}
/* If free space wrapped around */
if (last_free > 0) {
/* Reduce read_sz because of the line_sz gap */
read_sz = NX_MIN(free_space, last_free);
n = 0;
if (read_sz > 0) {
n = fread(fifo_in + last_offset, 1, read_sz, inpf);
used_in = used_in + n; /* Increase used space */
free_space = free_space - n; /* Decrease free space */
assert(n <= read_sz);
if (n != read_sz) {
/* Either EOF or error; exit the read loop */
is_eof = 1;
goto write_state;
}
}
}
/* At this point we have used_in bytes in fifo_in with the
* data head starting at cur_in and possibly wrapping around.
*/
write_state:
/* Write decompressed data to output file */
NXPRT(fprintf(stderr, "write_state:\n"));
if (used_out == 0)
goto decomp_state;
/* If fifo_out has data waiting, write it out to the file to
* make free target space for the accelerator used bytes in
* the first and last parts of fifo_out.
*/
first_used = fifo_used_first_bytes(cur_out, used_out, fifo_out_len);
last_used = fifo_used_last_bytes(cur_out, used_out, fifo_out_len);
write_sz = first_used;
n = 0;
if (write_sz > 0) {
n = fwrite(fifo_out + cur_out, 1, write_sz, outf);
used_out = used_out - n;
/* Move head of the fifo */
cur_out = (cur_out + n) % fifo_out_len;
assert(n <= write_sz);
if (n != write_sz) {
fprintf(stderr, "error: write\n");
rc = -1;
goto err5;
}
}
if (last_used > 0) { /* If more data available in the last part */
write_sz = last_used; /* Keep it here for later */
n = 0;
if (write_sz > 0) {
n = fwrite(fifo_out, 1, write_sz, outf);
used_out = used_out - n;
cur_out = (cur_out + n) % fifo_out_len;
assert(n <= write_sz);
if (n != write_sz) {
fprintf(stderr, "error: write\n");
rc = -1;
goto err5;
}
}
}
decomp_state:
/* NX decompresses input data */
NXPRT(fprintf(stderr, "decomp_state:\n"));
if (is_final)
goto finish_state;
/* Address/len lists */
clearp_dde(ddl_in);
clearp_dde(ddl_out);
/* FC, CRC, HistLen, Table 6-6 */
if (resuming) {
/* Resuming a partially decompressed input.
* The key to resume is supplying the 32KB
* dictionary (history) to NX, which is basically
* the last 32KB of output produced.
*/
fc = GZIP_FC_DECOMPRESS_RESUME;
cmdp->cpb.in_crc = cmdp->cpb.out_crc;
cmdp->cpb.in_adler = cmdp->cpb.out_adler;
/* Round up the history size to quadword. Section 2.10 */
history_len = (history_len + 15) / 16;
putnn(cmdp->cpb, in_histlen, history_len);
history_len = history_len * 16; /* bytes */
if (history_len > 0) {
/* Chain in the history buffer to the DDE list */
if (cur_out >= history_len) {
nx_append_dde(ddl_in, fifo_out
+ (cur_out - history_len),
history_len);
} else {
nx_append_dde(ddl_in, fifo_out
+ ((fifo_out_len + cur_out)
- history_len),
history_len - cur_out);
/* Up to 32KB history wraps around fifo_out */
nx_append_dde(ddl_in, fifo_out, cur_out);
}
}
} else {
/* First decompress job */
fc = GZIP_FC_DECOMPRESS;
history_len = 0;
/* Writing 0 clears out subc as well */
cmdp->cpb.in_histlen = 0;
total_out = 0;
put32(cmdp->cpb, in_crc, INIT_CRC);
put32(cmdp->cpb, in_adler, INIT_ADLER);
put32(cmdp->cpb, out_crc, INIT_CRC);
put32(cmdp->cpb, out_adler, INIT_ADLER);
/* Assuming 10% compression ratio initially; use the
* most recently measured compression ratio as a
* heuristic to estimate the input and output
* sizes. If we give too much input, the target buffer
* overflows and NX cycles are wasted, and then we
* must retry with smaller input size. 1000 is 100%.
*/
last_comp_ratio = 100UL;
}
cmdp->crb.gzip_fc = 0;
putnn(cmdp->crb, gzip_fc, fc);
/*
* NX source buffers
*/
first_used = fifo_used_first_bytes(cur_in, used_in, fifo_in_len);
last_used = fifo_used_last_bytes(cur_in, used_in, fifo_in_len);
if (first_used > 0)
nx_append_dde(ddl_in, fifo_in + cur_in, first_used);
if (last_used > 0)
nx_append_dde(ddl_in, fifo_in, last_used);
/*
* NX target buffers
*/
first_free = fifo_free_first_bytes(cur_out, used_out, fifo_out_len);
last_free = fifo_free_last_bytes(cur_out, used_out, fifo_out_len);
/* Reduce output free space amount not to overwrite the history */
int target_max = NX_MAX(0, fifo_free_bytes(used_out, fifo_out_len)
- (1<<16));
NXPRT(fprintf(stderr, "target_max %d (0x%x)\n", target_max,
target_max));
first_free = NX_MIN(target_max, first_free);
if (first_free > 0) {
first_offset = fifo_free_first_offset(cur_out, used_out);
nx_append_dde(ddl_out, fifo_out + first_offset, first_free);
}
if (last_free > 0) {
last_free = NX_MIN(target_max - first_free, last_free);
if (last_free > 0) {
last_offset = fifo_free_last_offset(cur_out, used_out,
fifo_out_len);
nx_append_dde(ddl_out, fifo_out + last_offset,
last_free);
}
}
/* Target buffer size is used to limit the source data size
* based on previous measurements of compression ratio.
*/
/* source_sz includes history */
source_sz = getp32(ddl_in, ddebc);
assert(source_sz > history_len);
source_sz = source_sz - history_len;
/* Estimating how much source is needed to 3/4 fill a
* target_max size target buffer. If we overshoot, then NX
* must repeat the job with smaller input and we waste
* bandwidth. If we undershoot then we use more NX calls than
* necessary.
*/
source_sz_estimate = ((uint64_t)target_max * last_comp_ratio * 3UL)
/ 4000;
if (source_sz_estimate < source_sz) {
/* Target might be small, therefore limiting the
* source data.
*/
source_sz = source_sz_estimate;
target_sz_estimate = target_max;
} else {
/* Source file might be small, therefore limiting target
* touch pages to a smaller value to save processor cycles.
*/
target_sz_estimate = ((uint64_t)source_sz * 1000UL)
/ (last_comp_ratio + 1);
target_sz_estimate = NX_MIN(2 * target_sz_estimate,
target_max);
}
source_sz = source_sz + history_len;
/* Some NX condition codes require submitting the NX job again.
* Kernel doesn't handle NX page faults. Expects user code to
* touch pages.
*/
pgfault_retries = NX_MAX_FAULTS;
restart_nx:
putp32(ddl_in, ddebc, source_sz);
/* Fault in pages */
nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), page_sz, 1);
nx_touch_pages_dde(ddl_in, 0, page_sz, 0);
nx_touch_pages_dde(ddl_out, target_sz_estimate, page_sz, 1);
/* Send job to NX */
cc = nx_submit_job(ddl_in, ddl_out, cmdp, devhandle);
switch (cc) {
case ERR_NX_AT_FAULT:
/* We touched the pages ahead of time. In the most common case
* we shouldn't be here. But may be some pages were paged out.
* Kernel should have placed the faulting address to fsaddr.
*/
NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n",
(void *)cmdp->crb.csb.fsaddr));
if (pgfault_retries == NX_MAX_FAULTS) {
/* Try once with exact number of pages */
--pgfault_retries;
goto restart_nx;
} else if (pgfault_retries > 0) {
/* If still faulting try fewer input pages
* assuming memory outage
*/
if (source_sz > page_sz)
source_sz = NX_MAX(source_sz / 2, page_sz);
--pgfault_retries;
goto restart_nx;
} else {
fprintf(stderr, "cannot make progress; too many ");
fprintf(stderr, "page fault retries cc= %d\n", cc);
rc = -1;
goto err5;
}
case ERR_NX_DATA_LENGTH:
NXPRT(fprintf(stderr, "ERR_NX_DATA_LENGTH; "));
NXPRT(fprintf(stderr, "stream may have trailing data\n"));
/* Not an error in the most common case; it just says
* there is trailing data that we must examine.
*
* CC=3 CE(1)=0 CE(0)=1 indicates partial completion
* Fig.6-7 and Table 6-8.
*/
nx_ce = get_csb_ce_ms3b(cmdp->crb.csb);
if (!csb_ce_termination(nx_ce) &&
csb_ce_partial_completion(nx_ce)) {
/* Check CPB for more information
* spbc and tpbc are valid
*/
sfbt = getnn(cmdp->cpb, out_sfbt); /* Table 6-4 */
subc = getnn(cmdp->cpb, out_subc); /* Table 6-4 */
spbc = get32(cmdp->cpb, out_spbc_decomp);
tpbc = get32(cmdp->crb.csb, tpbc);
assert(target_max >= tpbc);
goto ok_cc3; /* not an error */
} else {
/* History length error when CE(1)=1 CE(0)=0. */
rc = -1;
fprintf(stderr, "history length error cc= %d\n", cc);
goto err5;
}
case ERR_NX_TARGET_SPACE:
/* Target buffer not large enough; retry smaller input
* data; give at least 1 byte. SPBC/TPBC are not valid.
*/
assert(source_sz > history_len);
source_sz = ((source_sz - history_len + 2) / 2) + history_len;
NXPRT(fprintf(stderr, "ERR_NX_TARGET_SPACE; retry with "));
NXPRT(fprintf(stderr, "smaller input data src %d hist %d\n",
source_sz, history_len));
goto restart_nx;
case ERR_NX_OK:
/* This should not happen for gzip formatted data;
* we need trailing crc and isize
*/
fprintf(stderr, "ERR_NX_OK\n");
spbc = get32(cmdp->cpb, out_spbc_decomp);
tpbc = get32(cmdp->crb.csb, tpbc);
assert(target_max >= tpbc);
assert(spbc >= history_len);
source_sz = spbc - history_len;
goto offsets_state;
default:
fprintf(stderr, "error: cc= %d\n", cc);
rc = -1;
goto err5;
}
ok_cc3:
NXPRT(fprintf(stderr, "cc3: sfbt: %x\n", sfbt));
assert(spbc > history_len);
source_sz = spbc - history_len;
/* Table 6-4: Source Final Block Type (SFBT) describes the
* last processed deflate block and clues the software how to
* resume the next job. SUBC indicates how many input bits NX
* consumed but did not process. SPBC indicates how many
* bytes of source were given to the accelerator including
* history bytes.
*/
switch (sfbt) {
int dhtlen;
case 0x0: /* Deflate final EOB received */
/* Calculating the checksum start position. */
source_sz = source_sz - subc / 8;
is_final = 1;
break;
/* Resume decompression cases are below. Basically
* indicates where NX has suspended and how to resume
* the input stream.
*/
case 0x8: /* Within a literal block; use rembytecount */
case 0x9: /* Within a literal block; use rembytecount; bfinal=1 */
/* Supply the partially processed source byte again */
source_sz = source_sz - ((subc + 7) / 8);
/* SUBC LS 3bits: number of bits in the first source byte need
* to be processed.
* 000 means all 8 bits; Table 6-3
* Clear subc, histlen, sfbt, rembytecnt, dhtlen
*/
cmdp->cpb.in_subc = 0;
cmdp->cpb.in_sfbt = 0;
putnn(cmdp->cpb, in_subc, subc % 8);
putnn(cmdp->cpb, in_sfbt, sfbt);
putnn(cmdp->cpb, in_rembytecnt, getnn(cmdp->cpb,
out_rembytecnt));
break;
case 0xA: /* Within a FH block; */
case 0xB: /* Within a FH block; bfinal=1 */
source_sz = source_sz - ((subc + 7) / 8);
/* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
cmdp->cpb.in_subc = 0;
cmdp->cpb.in_sfbt = 0;
putnn(cmdp->cpb, in_subc, subc % 8);
putnn(cmdp->cpb, in_sfbt, sfbt);
break;
case 0xC: /* Within a DH block; */
case 0xD: /* Within a DH block; bfinal=1 */
source_sz = source_sz - ((subc + 7) / 8);
/* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
cmdp->cpb.in_subc = 0;
cmdp->cpb.in_sfbt = 0;
putnn(cmdp->cpb, in_subc, subc % 8);
putnn(cmdp->cpb, in_sfbt, sfbt);
dhtlen = getnn(cmdp->cpb, out_dhtlen);
putnn(cmdp->cpb, in_dhtlen, dhtlen);
assert(dhtlen >= 42);
/* Round up to a qword */
dhtlen = (dhtlen + 127) / 128;
while (dhtlen > 0) { /* Copy dht from cpb.out to cpb.in */
--dhtlen;
cmdp->cpb.in_dht[dhtlen] = cmdp->cpb.out_dht[dhtlen];
}
break;
case 0xE: /* Within a block header; bfinal=0; */
/* Also given if source data exactly ends (SUBC=0) with
* EOB code with BFINAL=0. Means the next byte will
* contain a block header.
*/
case 0xF: /* within a block header with BFINAL=1. */
source_sz = source_sz - ((subc + 7) / 8);
/* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
cmdp->cpb.in_subc = 0;
cmdp->cpb.in_sfbt = 0;
putnn(cmdp->cpb, in_subc, subc % 8);
putnn(cmdp->cpb, in_sfbt, sfbt);
/* Engine did not process any data */
if (is_eof && (source_sz == 0))
is_final = 1;
}
offsets_state:
/* Adjust the source and target buffer offsets and lengths */
NXPRT(fprintf(stderr, "offsets_state:\n"));
/* Delete input data from fifo_in */
used_in = used_in - source_sz;
cur_in = (cur_in + source_sz) % fifo_in_len;
input_file_offset = input_file_offset + source_sz;
/* Add output data to fifo_out */
used_out = used_out + tpbc;
assert(used_out <= fifo_out_len);
total_out = total_out + tpbc;
/* Deflate history is 32KB max. No need to supply more
* than 32KB on a resume.
*/
history_len = (total_out > window_max) ? window_max : total_out;
/* To estimate expected expansion in the next NX job; 500 means 50%.
* Deflate best case is around 1 to 1000.
*/
last_comp_ratio = (1000UL * ((uint64_t)source_sz + 1))
/ ((uint64_t)tpbc + 1);
last_comp_ratio = NX_MAX(NX_MIN(1000UL, last_comp_ratio), 1);
NXPRT(fprintf(stderr, "comp_ratio %ld source_sz %d spbc %d tpbc %d\n",
last_comp_ratio, source_sz, spbc, tpbc));
resuming = 1;
finish_state:
NXPRT(fprintf(stderr, "finish_state:\n"));
if (is_final) {
if (used_out)
goto write_state; /* More data to write out */
else if (used_in < 8) {
/* Need at least 8 more bytes containing gzip crc
* and isize.
*/
rc = -1;
goto err4;
} else {
/* Compare checksums and exit */
int i;
unsigned char tail[8];
uint32_t cksum, isize;
for (i = 0; i < 8; i++)
tail[i] = fifo_in[(cur_in + i) % fifo_in_len];
fprintf(stderr, "computed checksum %08x isize %08x\n",
cmdp->cpb.out_crc, (uint32_t) (total_out
% (1ULL<<32)));
cksum = ((uint32_t) tail[0] | (uint32_t) tail[1]<<8
| (uint32_t) tail[2]<<16
| (uint32_t) tail[3]<<24);
isize = ((uint32_t) tail[4] | (uint32_t) tail[5]<<8
| (uint32_t) tail[6]<<16
| (uint32_t) tail[7]<<24);
fprintf(stderr, "stored checksum %08x isize %08x\n",
cksum, isize);
if (cksum == cmdp->cpb.out_crc && isize == (uint32_t)
(total_out % (1ULL<<32))) {
rc = 0; goto ok1;
} else {
rc = -1; goto err4;
}
}
} else
goto read_state;
return -1;
err1:
fprintf(stderr, "error: not a gzip file, expect %x, read %x\n",
expect, c);
return -1;
err2:
fprintf(stderr, "error: the FLG byte is wrong or not being handled\n");
return -1;
err3:
fprintf(stderr, "error: gzip header\n");
return -1;
err4:
fprintf(stderr, "error: checksum missing or mismatch\n");
err5:
ok1:
fprintf(stderr, "decomp is complete: fclose\n");
fclose(outf);
return rc;
}
int main(int argc, char **argv)
{
int rc;
struct sigaction act;
void *handle;
nx_dbg = 0;
nx_gzip_log = NULL;
act.sa_handler = 0;
act.sa_sigaction = nxu_sigsegv_handler;
act.sa_flags = SA_SIGINFO;
act.sa_restorer = 0;
sigemptyset(&act.sa_mask);
sigaction(SIGSEGV, &act, NULL);
handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
if (!handle) {
fprintf(stderr, "Unable to init NX, errno %d\n", errno);
exit(-1);
}
rc = decompress_file(argc, argv, handle);
nx_function_end(handle);
return rc;
}
| linux-master | tools/testing/selftests/powerpc/nx-gzip/gunz_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* P9 gzip sample code for demonstrating the P9 NX hardware interface.
* Not intended for productive uses or for performance or compression
* ratio measurements. For simplicity of demonstration, this sample
* code compresses in to fixed Huffman blocks only (Deflate btype=1)
* and has very simple memory management. Dynamic Huffman blocks
* (Deflate btype=2) are more involved as detailed in the user guide.
* Note also that /dev/crypto/gzip, VAS and skiboot support are
* required.
*
* Copyright 2020 IBM Corp.
*
* https://github.com/libnxz/power-gzip for zlib api and other utils
*
* Author: Bulent Abali <[email protected]>
*
* Definitions of acronyms used here. See
* P9 NX Gzip Accelerator User's Manual for details:
* https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
*
* adler/crc: 32 bit checksums appended to stream tail
* ce: completion extension
* cpb: coprocessor parameter block (metadata)
* crb: coprocessor request block (command)
* csb: coprocessor status block (status)
* dht: dynamic huffman table
* dde: data descriptor element (address, length)
* ddl: list of ddes
* dh/fh: dynamic and fixed huffman types
* fc: coprocessor function code
* histlen: history/dictionary length
* history: sliding window of up to 32KB of data
* lzcount: Deflate LZ symbol counts
* rembytecnt: remaining byte count
* sfbt: source final block type; last block's type during decomp
* spbc: source processed byte count
* subc: source unprocessed bit count
* tebc: target ending bit count; valid bits in the last byte
* tpbc: target processed byte count
* vas: virtual accelerator switch; the user mode interface
*/
#define _ISOC11_SOURCE // For aligned_alloc()
#define _DEFAULT_SOURCE // For endian.h
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <endian.h>
#include <bits/endian.h>
#include <sys/ioctl.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include "utils.h"
#include "nxu.h"
#include "nx.h"
int nx_dbg;
FILE *nx_gzip_log;
#define NX_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
#define FNAME_MAX 1024
#define FEXT ".nx.gz"
#define SYSFS_MAX_REQ_BUF_PATH "devices/vio/ibm,compression-v1/nx_gzip_caps/req_max_processed_len"
/*
* LZ counts returned in the user supplied nx_gzip_crb_cpb_t structure.
*/
static int compress_fht_sample(char *src, uint32_t srclen, char *dst,
uint32_t dstlen, int with_count,
struct nx_gzip_crb_cpb_t *cmdp, void *handle)
{
uint32_t fc;
assert(!!cmdp);
put32(cmdp->crb, gzip_fc, 0); /* clear */
fc = (with_count) ? GZIP_FC_COMPRESS_RESUME_FHT_COUNT :
GZIP_FC_COMPRESS_RESUME_FHT;
putnn(cmdp->crb, gzip_fc, fc);
putnn(cmdp->cpb, in_histlen, 0); /* resuming with no history */
memset((void *) &cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
/* Section 6.6 programming notes; spbc may be in two different
* places depending on FC.
*/
if (!with_count)
put32(cmdp->cpb, out_spbc_comp, 0);
else
put32(cmdp->cpb, out_spbc_comp_with_count, 0);
/* Figure 6-3 6-4; CSB location */
put64(cmdp->crb, csb_address, 0);
put64(cmdp->crb, csb_address,
(uint64_t) &cmdp->crb.csb & csb_address_mask);
/* Source direct dde (scatter-gather list) */
clear_dde(cmdp->crb.source_dde);
putnn(cmdp->crb.source_dde, dde_count, 0);
put32(cmdp->crb.source_dde, ddebc, srclen);
put64(cmdp->crb.source_dde, ddead, (uint64_t) src);
/* Target direct dde (scatter-gather list) */
clear_dde(cmdp->crb.target_dde);
putnn(cmdp->crb.target_dde, dde_count, 0);
put32(cmdp->crb.target_dde, ddebc, dstlen);
put64(cmdp->crb.target_dde, ddead, (uint64_t) dst);
/* Submit the crb, the job descriptor, to the accelerator */
return nxu_submit_job(cmdp, handle);
}
/*
* Prepares a blank no filename no timestamp gzip header and returns
* the number of bytes written to buf.
* Gzip specification at https://tools.ietf.org/html/rfc1952
*/
int gzip_header_blank(char *buf)
{
int i = 0;
buf[i++] = 0x1f; /* ID1 */
buf[i++] = 0x8b; /* ID2 */
buf[i++] = 0x08; /* CM */
buf[i++] = 0x00; /* FLG */
buf[i++] = 0x00; /* MTIME */
buf[i++] = 0x00; /* MTIME */
buf[i++] = 0x00; /* MTIME */
buf[i++] = 0x00; /* MTIME */
buf[i++] = 0x04; /* XFL 4=fastest */
buf[i++] = 0x03; /* OS UNIX */
return i;
}
/*
* Z_SYNC_FLUSH as described in zlib.h.
* Returns number of appended bytes
*/
int append_sync_flush(char *buf, int tebc, int final)
{
uint64_t flush;
int shift = (tebc & 0x7);
if (tebc > 0) {
/* Last byte is partially full */
buf = buf - 1;
*buf = *buf & (unsigned char) ((1<<tebc)-1);
} else
*buf = 0;
flush = ((0x1ULL & final) << shift) | *buf;
shift = shift + 3; /* BFINAL and BTYPE written */
shift = (shift <= 8) ? 8 : 16;
flush |= (0xFFFF0000ULL) << shift; /* Zero length block */
shift = shift + 32;
while (shift > 0) {
*buf++ = (unsigned char) (flush & 0xffULL);
flush = flush >> 8;
shift = shift - 8;
}
return(((tebc > 5) || (tebc == 0)) ? 5 : 4);
}
/*
* Final deflate block bit. This call assumes the block
* beginning is byte aligned.
*/
static void set_bfinal(void *buf, int bfinal)
{
char *b = buf;
if (bfinal)
*b = *b | (unsigned char) 0x01;
else
*b = *b & (unsigned char) 0xfe;
}
int compress_file(int argc, char **argv, void *handle)
{
char *inbuf, *outbuf, *srcbuf, *dstbuf;
char outname[FNAME_MAX];
uint32_t srclen, dstlen;
uint32_t flushlen, chunk;
size_t inlen, outlen, dsttotlen, srctotlen;
uint32_t crc, spbc, tpbc, tebc;
int lzcounts = 0;
int cc;
int num_hdr_bytes;
struct nx_gzip_crb_cpb_t *cmdp;
uint32_t pagelen = 65536;
int fault_tries = NX_MAX_FAULTS;
char buf[32];
cmdp = (void *)(uintptr_t)
aligned_alloc(sizeof(struct nx_gzip_crb_cpb_t),
sizeof(struct nx_gzip_crb_cpb_t));
if (argc != 2) {
fprintf(stderr, "usage: %s <fname>\n", argv[0]);
exit(-1);
}
if (read_file_alloc(argv[1], &inbuf, &inlen))
exit(-1);
fprintf(stderr, "file %s read, %ld bytes\n", argv[1], inlen);
/* Generous output buffer for header/trailer */
outlen = 2 * inlen + 1024;
assert(NULL != (outbuf = (char *)malloc(outlen)));
nxu_touch_pages(outbuf, outlen, pagelen, 1);
/*
* On PowerVM, the hypervisor defines the maximum request buffer
* size is defined and this value is available via sysfs.
*/
if (!read_sysfs_file(SYSFS_MAX_REQ_BUF_PATH, buf, sizeof(buf))) {
chunk = atoi(buf);
} else {
/* sysfs entry is not available on PowerNV */
/* Compress piecemeal in smallish chunks */
chunk = 1<<22;
}
/* Write the gzip header to the stream */
num_hdr_bytes = gzip_header_blank(outbuf);
dstbuf = outbuf + num_hdr_bytes;
outlen = outlen - num_hdr_bytes;
dsttotlen = num_hdr_bytes;
srcbuf = inbuf;
srctotlen = 0;
/* Init the CRB, the coprocessor request block */
memset(&cmdp->crb, 0, sizeof(cmdp->crb));
/* Initial gzip crc32 */
put32(cmdp->cpb, in_crc, 0);
while (inlen > 0) {
/* Submit chunk size source data per job */
srclen = NX_MIN(chunk, inlen);
/* Supply large target in case data expands */
dstlen = NX_MIN(2*srclen, outlen);
/* Page faults are handled by the user code */
/* Fault-in pages; an improved code wouldn't touch so
* many pages but would try to estimate the
* compression ratio and adjust both the src and dst
* touch amounts.
*/
nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), pagelen,
1);
nxu_touch_pages(srcbuf, srclen, pagelen, 0);
nxu_touch_pages(dstbuf, dstlen, pagelen, 1);
cc = compress_fht_sample(
srcbuf, srclen,
dstbuf, dstlen,
lzcounts, cmdp, handle);
if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC &&
cc != ERR_NX_AT_FAULT) {
fprintf(stderr, "nx error: cc= %d\n", cc);
exit(-1);
}
/* Page faults are handled by the user code */
if (cc == ERR_NX_AT_FAULT) {
NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc));
NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n",
fault_tries,
(unsigned long long) cmdp->crb.csb.fsaddr));
fault_tries--;
if (fault_tries > 0) {
continue;
} else {
fprintf(stderr, "error: cannot progress; ");
fprintf(stderr, "too many faults\n");
exit(-1);
}
}
fault_tries = NX_MAX_FAULTS; /* Reset for the next chunk */
inlen = inlen - srclen;
srcbuf = srcbuf + srclen;
srctotlen = srctotlen + srclen;
/* Two possible locations for spbc depending on the function
* code.
*/
spbc = (!lzcounts) ? get32(cmdp->cpb, out_spbc_comp) :
get32(cmdp->cpb, out_spbc_comp_with_count);
assert(spbc == srclen);
/* Target byte count */
tpbc = get32(cmdp->crb.csb, tpbc);
/* Target ending bit count */
tebc = getnn(cmdp->cpb, out_tebc);
NXPRT(fprintf(stderr, "compressed chunk %d ", spbc));
NXPRT(fprintf(stderr, "to %d bytes, tebc= %d\n", tpbc, tebc));
if (inlen > 0) { /* More chunks to go */
set_bfinal(dstbuf, 0);
dstbuf = dstbuf + tpbc;
dsttotlen = dsttotlen + tpbc;
outlen = outlen - tpbc;
/* Round up to the next byte with a flush
* block; do not set the BFINAqL bit.
*/
flushlen = append_sync_flush(dstbuf, tebc, 0);
dsttotlen = dsttotlen + flushlen;
outlen = outlen - flushlen;
dstbuf = dstbuf + flushlen;
NXPRT(fprintf(stderr, "added sync_flush %d bytes\n",
flushlen));
} else { /* Done */
/* Set the BFINAL bit of the last block per Deflate
* specification.
*/
set_bfinal(dstbuf, 1);
dstbuf = dstbuf + tpbc;
dsttotlen = dsttotlen + tpbc;
outlen = outlen - tpbc;
}
/* Resuming crc32 for the next chunk */
crc = get32(cmdp->cpb, out_crc);
put32(cmdp->cpb, in_crc, crc);
crc = be32toh(crc);
}
/* Append crc32 and ISIZE to the end */
memcpy(dstbuf, &crc, 4);
memcpy(dstbuf+4, &srctotlen, 4);
dsttotlen = dsttotlen + 8;
outlen = outlen - 8;
assert(FNAME_MAX > (strlen(argv[1]) + strlen(FEXT)));
strcpy(outname, argv[1]);
strcat(outname, FEXT);
if (write_file(outname, outbuf, dsttotlen)) {
fprintf(stderr, "write error: %s\n", outname);
exit(-1);
}
fprintf(stderr, "compressed %ld to %ld bytes total, ", srctotlen,
dsttotlen);
fprintf(stderr, "crc32 checksum = %08x\n", crc);
if (inbuf != NULL)
free(inbuf);
if (outbuf != NULL)
free(outbuf);
return 0;
}
int main(int argc, char **argv)
{
int rc;
struct sigaction act;
void *handle;
nx_dbg = 0;
nx_gzip_log = NULL;
act.sa_handler = 0;
act.sa_sigaction = nxu_sigsegv_handler;
act.sa_flags = SA_SIGINFO;
act.sa_restorer = 0;
sigemptyset(&act.sa_mask);
sigaction(SIGSEGV, &act, NULL);
handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
if (!handle) {
fprintf(stderr, "Unable to init NX, errno %d\n", errno);
exit(-1);
}
rc = compress_file(argc, argv, handle);
nx_function_end(handle);
return rc;
}
| linux-master | tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Michael Ellerman, IBM Corp.
*
* This test simply tests that certain syscalls are implemented. It doesn't
* actually exercise their logic in any way.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include "utils.h"
#define DO_TEST(_name, _num) \
static int test_##_name(void) \
{ \
int rc; \
printf("Testing " #_name); \
errno = 0; \
rc = syscall(_num, -1, 0, 0, 0, 0, 0); \
printf("\treturned %d, errno %d\n", rc, errno); \
return errno == ENOSYS; \
}
#include "ipc.h"
#undef DO_TEST
static int ipc_unmuxed(void)
{
int tests_done = 0;
#define DO_TEST(_name, _num) \
FAIL_IF(test_##_name()); \
tests_done++;
#include "ipc.h"
#undef DO_TEST
/*
* If we ran no tests then it means none of the syscall numbers were
* defined, possibly because we were built against old headers. But it
* means we didn't really test anything, so instead of passing mark it
* as a skip to give the user a clue.
*/
SKIP_IF(tests_done == 0);
return 0;
}
int main(void)
{
return test_harness(ipc_unmuxed, "ipc_unmuxed");
}
| linux-master | tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2005-2020 IBM Corporation.
*
* Includes code from librtas (https://github.com/ibm-power-utilities/librtas/)
*/
#include <byteswap.h>
#include <stdint.h>
#include <inttypes.h>
#include <linux/limits.h>
#include <stdio.h>
#include <string.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include "utils.h"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define cpu_to_be32(x) bswap_32(x)
#define be32_to_cpu(x) bswap_32(x)
#else
#define cpu_to_be32(x) (x)
#define be32_to_cpu(x) (x)
#endif
#define RTAS_IO_ASSERT -1098 /* Unexpected I/O Error */
#define RTAS_UNKNOWN_OP -1099 /* No Firmware Implementation of Function */
#define BLOCK_SIZE 4096
#define PAGE_SIZE 4096
#define MAX_PAGES 64
static const char *ofdt_rtas_path = "/proc/device-tree/rtas";
typedef __be32 uint32_t;
struct rtas_args {
__be32 token;
__be32 nargs;
__be32 nret;
__be32 args[16];
__be32 *rets; /* Pointer to return values in args[]. */
};
struct region {
uint64_t addr;
uint32_t size;
struct region *next;
};
static int get_property(const char *prop_path, const char *prop_name,
char **prop_val, size_t *prop_len)
{
char path[PATH_MAX];
int len = snprintf(path, sizeof(path), "%s/%s", prop_path, prop_name);
if (len < 0 || len >= sizeof(path))
return -ENOMEM;
return read_file_alloc(path, prop_val, prop_len);
}
int rtas_token(const char *call_name)
{
char *prop_buf = NULL;
size_t len;
int rc;
rc = get_property(ofdt_rtas_path, call_name, &prop_buf, &len);
if (rc < 0) {
rc = RTAS_UNKNOWN_OP;
goto err;
}
rc = be32_to_cpu(*(int *)prop_buf);
err:
free(prop_buf);
return rc;
}
static int read_kregion_bounds(struct region *kregion)
{
char *buf;
int err;
err = read_file_alloc("/proc/ppc64/rtas/rmo_buffer", &buf, NULL);
if (err) {
perror("Could not open rmo_buffer file");
return RTAS_IO_ASSERT;
}
sscanf(buf, "%" SCNx64 " %x", &kregion->addr, &kregion->size);
free(buf);
if (!(kregion->size && kregion->addr) ||
(kregion->size > (PAGE_SIZE * MAX_PAGES))) {
printf("Unexpected kregion bounds\n");
return RTAS_IO_ASSERT;
}
return 0;
}
static int rtas_call(const char *name, int nargs,
int nrets, ...)
{
struct rtas_args args;
__be32 *rets[16];
int i, rc, token;
va_list ap;
va_start(ap, nrets);
token = rtas_token(name);
if (token == RTAS_UNKNOWN_OP) {
// We don't care if the call doesn't exist
printf("call '%s' not available, skipping...", name);
rc = RTAS_UNKNOWN_OP;
goto err;
}
args.token = cpu_to_be32(token);
args.nargs = cpu_to_be32(nargs);
args.nret = cpu_to_be32(nrets);
for (i = 0; i < nargs; i++)
args.args[i] = (__be32) va_arg(ap, unsigned long);
for (i = 0; i < nrets; i++)
rets[i] = (__be32 *) va_arg(ap, unsigned long);
rc = syscall(__NR_rtas, &args);
if (rc) {
rc = -errno;
goto err;
}
if (nrets) {
*(rets[0]) = be32_to_cpu(args.args[nargs]);
for (i = 1; i < nrets; i++) {
*(rets[i]) = args.args[nargs + i];
}
}
err:
va_end(ap);
return rc;
}
static int test(void)
{
struct region rmo_region;
uint32_t rmo_start;
uint32_t rmo_end;
__be32 rets[1];
int rc;
// Test a legitimate harmless call
// Expected: call succeeds
printf("Test a permitted call, no parameters... ");
rc = rtas_call("get-time-of-day", 0, 1, rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != 0 && rc != RTAS_UNKNOWN_OP);
// Test a prohibited call
// Expected: call returns -EINVAL
printf("Test a prohibited call... ");
rc = rtas_call("nvram-fetch", 0, 1, rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
// Get RMO
rc = read_kregion_bounds(&rmo_region);
if (rc) {
printf("Couldn't read RMO region bounds, skipping remaining cases\n");
return 0;
}
rmo_start = rmo_region.addr;
rmo_end = rmo_start + rmo_region.size - 1;
printf("RMO range: %08x - %08x\n", rmo_start, rmo_end);
// Test a permitted call, user-supplied size, buffer inside RMO
// Expected: call succeeds
printf("Test a permitted call, user-supplied size, buffer inside RMO... ");
rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_start),
cpu_to_be32(rmo_end - rmo_start + 1), rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != 0 && rc != RTAS_UNKNOWN_OP);
// Test a permitted call, user-supplied size, buffer start outside RMO
// Expected: call returns -EINVAL
printf("Test a permitted call, user-supplied size, buffer start outside RMO... ");
rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_end + 1),
cpu_to_be32(4000), rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
// Test a permitted call, user-supplied size, buffer end outside RMO
// Expected: call returns -EINVAL
printf("Test a permitted call, user-supplied size, buffer end outside RMO... ");
rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_start),
cpu_to_be32(rmo_end - rmo_start + 2), rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
// Test a permitted call, fixed size, buffer end outside RMO
// Expected: call returns -EINVAL
printf("Test a permitted call, fixed size, buffer end outside RMO... ");
rc = rtas_call("ibm,configure-connector", 2, 1, cpu_to_be32(rmo_end - 4000), 0, rets);
printf("rc: %d\n", rc);
FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(test, "rtas_filter");
}
| linux-master | tools/testing/selftests/powerpc/syscalls/rtas_filter.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/netlink.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/socket.h>
#include <sched.h>
#include <sys/eventfd.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#define __DEV_FULL "/sys/devices/virtual/mem/full/uevent"
#define __UEVENT_BUFFER_SIZE (2048 * 2)
#define __UEVENT_HEADER "add@/devices/virtual/mem/full"
#define __UEVENT_HEADER_LEN sizeof("add@/devices/virtual/mem/full")
#define __UEVENT_LISTEN_ALL -1
ssize_t read_nointr(int fd, void *buf, size_t count)
{
ssize_t ret;
again:
ret = read(fd, buf, count);
if (ret < 0 && errno == EINTR)
goto again;
return ret;
}
ssize_t write_nointr(int fd, const void *buf, size_t count)
{
ssize_t ret;
again:
ret = write(fd, buf, count);
if (ret < 0 && errno == EINTR)
goto again;
return ret;
}
int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (ret != pid)
goto again;
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
return -1;
return 0;
}
static int uevent_listener(unsigned long post_flags, bool expect_uevent,
int sync_fd)
{
int sk_fd, ret;
socklen_t sk_addr_len;
int fret = -1, rcv_buf_sz = __UEVENT_BUFFER_SIZE;
uint64_t sync_add = 1;
struct sockaddr_nl sk_addr = { 0 }, rcv_addr = { 0 };
char buf[__UEVENT_BUFFER_SIZE] = { 0 };
struct iovec iov = { buf, __UEVENT_BUFFER_SIZE };
char control[CMSG_SPACE(sizeof(struct ucred))];
struct msghdr hdr = {
&rcv_addr, sizeof(rcv_addr), &iov, 1,
control, sizeof(control), 0,
};
sk_fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC,
NETLINK_KOBJECT_UEVENT);
if (sk_fd < 0) {
fprintf(stderr, "%s - Failed to open uevent socket\n", strerror(errno));
return -1;
}
ret = setsockopt(sk_fd, SOL_SOCKET, SO_RCVBUF, &rcv_buf_sz,
sizeof(rcv_buf_sz));
if (ret < 0) {
fprintf(stderr, "%s - Failed to set socket options\n", strerror(errno));
goto on_error;
}
sk_addr.nl_family = AF_NETLINK;
sk_addr.nl_groups = __UEVENT_LISTEN_ALL;
sk_addr_len = sizeof(sk_addr);
ret = bind(sk_fd, (struct sockaddr *)&sk_addr, sk_addr_len);
if (ret < 0) {
fprintf(stderr, "%s - Failed to bind socket\n", strerror(errno));
goto on_error;
}
ret = getsockname(sk_fd, (struct sockaddr *)&sk_addr, &sk_addr_len);
if (ret < 0) {
fprintf(stderr, "%s - Failed to retrieve socket name\n", strerror(errno));
goto on_error;
}
if ((size_t)sk_addr_len != sizeof(sk_addr)) {
fprintf(stderr, "Invalid socket address size\n");
goto on_error;
}
if (post_flags & CLONE_NEWUSER) {
ret = unshare(CLONE_NEWUSER);
if (ret < 0) {
fprintf(stderr,
"%s - Failed to unshare user namespace\n",
strerror(errno));
goto on_error;
}
}
if (post_flags & CLONE_NEWNET) {
ret = unshare(CLONE_NEWNET);
if (ret < 0) {
fprintf(stderr,
"%s - Failed to unshare network namespace\n",
strerror(errno));
goto on_error;
}
}
ret = write_nointr(sync_fd, &sync_add, sizeof(sync_add));
close(sync_fd);
if (ret != sizeof(sync_add)) {
fprintf(stderr, "Failed to synchronize with parent process\n");
goto on_error;
}
fret = 0;
for (;;) {
ssize_t r;
r = recvmsg(sk_fd, &hdr, 0);
if (r <= 0) {
fprintf(stderr, "%s - Failed to receive uevent\n", strerror(errno));
ret = -1;
break;
}
/* ignore libudev messages */
if (memcmp(buf, "libudev", 8) == 0)
continue;
/* ignore uevents we didn't trigger */
if (memcmp(buf, __UEVENT_HEADER, __UEVENT_HEADER_LEN) != 0)
continue;
if (!expect_uevent) {
fprintf(stderr, "Received unexpected uevent:\n");
ret = -1;
}
if (TH_LOG_ENABLED) {
/* If logging is enabled dump the received uevent. */
(void)write_nointr(STDERR_FILENO, buf, r);
(void)write_nointr(STDERR_FILENO, "\n", 1);
}
break;
}
on_error:
close(sk_fd);
return fret;
}
int trigger_uevent(unsigned int times)
{
int fd, ret;
unsigned int i;
fd = open(__DEV_FULL, O_RDWR | O_CLOEXEC);
if (fd < 0) {
if (errno != ENOENT)
return -EINVAL;
return -1;
}
for (i = 0; i < times; i++) {
ret = write_nointr(fd, "add\n", sizeof("add\n") - 1);
if (ret < 0) {
fprintf(stderr, "Failed to trigger uevent\n");
break;
}
}
close(fd);
return ret;
}
int set_death_signal(void)
{
int ret;
pid_t ppid;
ret = prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
/* Check whether we have been orphaned. */
ppid = getppid();
if (ppid == 1) {
pid_t self;
self = getpid();
ret = kill(self, SIGKILL);
}
if (ret < 0)
return -1;
return 0;
}
static int do_test(unsigned long pre_flags, unsigned long post_flags,
bool expect_uevent, int sync_fd)
{
int ret;
uint64_t wait_val;
pid_t pid;
sigset_t mask;
sigset_t orig_mask;
struct timespec timeout;
sigemptyset(&mask);
sigaddset(&mask, SIGCHLD);
ret = sigprocmask(SIG_BLOCK, &mask, &orig_mask);
if (ret < 0) {
fprintf(stderr, "%s- Failed to block SIGCHLD\n", strerror(errno));
return -1;
}
pid = fork();
if (pid < 0) {
fprintf(stderr, "%s - Failed to fork() new process\n", strerror(errno));
return -1;
}
if (pid == 0) {
/* Make sure that we go away when our parent dies. */
ret = set_death_signal();
if (ret < 0) {
fprintf(stderr, "Failed to set PR_SET_PDEATHSIG to SIGKILL\n");
_exit(EXIT_FAILURE);
}
if (pre_flags & CLONE_NEWUSER) {
ret = unshare(CLONE_NEWUSER);
if (ret < 0) {
fprintf(stderr,
"%s - Failed to unshare user namespace\n",
strerror(errno));
_exit(EXIT_FAILURE);
}
}
if (pre_flags & CLONE_NEWNET) {
ret = unshare(CLONE_NEWNET);
if (ret < 0) {
fprintf(stderr,
"%s - Failed to unshare network namespace\n",
strerror(errno));
_exit(EXIT_FAILURE);
}
}
if (uevent_listener(post_flags, expect_uevent, sync_fd) < 0)
_exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
}
ret = read_nointr(sync_fd, &wait_val, sizeof(wait_val));
if (ret != sizeof(wait_val)) {
fprintf(stderr, "Failed to synchronize with child process\n");
_exit(EXIT_FAILURE);
}
/* Trigger 10 uevents to account for the case where the kernel might
* drop some.
*/
ret = trigger_uevent(10);
if (ret < 0)
fprintf(stderr, "Failed triggering uevents\n");
/* Wait for 2 seconds before considering this failed. This should be
* plenty of time for the kernel to deliver the uevent even under heavy
* load.
*/
timeout.tv_sec = 2;
timeout.tv_nsec = 0;
again:
ret = sigtimedwait(&mask, NULL, &timeout);
if (ret < 0) {
if (errno == EINTR)
goto again;
if (!expect_uevent)
ret = kill(pid, SIGTERM); /* success */
else
ret = kill(pid, SIGUSR1); /* error */
if (ret < 0)
return -1;
}
ret = wait_for_pid(pid);
if (ret < 0)
return -1;
return ret;
}
static void signal_handler(int sig)
{
if (sig == SIGTERM)
_exit(EXIT_SUCCESS);
_exit(EXIT_FAILURE);
}
TEST(uevent_filtering)
{
int ret, sync_fd;
struct sigaction act;
if (geteuid()) {
TH_LOG("Uevent filtering tests require root privileges. Skipping test");
_exit(KSFT_SKIP);
}
ret = access(__DEV_FULL, F_OK);
EXPECT_EQ(0, ret) {
if (errno == ENOENT) {
TH_LOG(__DEV_FULL " does not exist. Skipping test");
_exit(KSFT_SKIP);
}
_exit(KSFT_FAIL);
}
act.sa_handler = signal_handler;
act.sa_flags = 0;
sigemptyset(&act.sa_mask);
ret = sigaction(SIGTERM, &act, NULL);
ASSERT_EQ(0, ret);
sync_fd = eventfd(0, EFD_CLOEXEC);
ASSERT_GE(sync_fd, 0);
/*
* Setup:
* - Open uevent listening socket in initial network namespace owned by
* initial user namespace.
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(0, 0, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - Open uevent listening socket in non-initial network namespace
* owned by initial user namespace.
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(CLONE_NEWNET, 0, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - unshare user namespace
* - Open uevent listening socket in initial network namespace
* owned by initial user namespace.
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(CLONE_NEWUSER, 0, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - Open uevent listening socket in non-initial network namespace
* owned by non-initial user namespace.
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives no uevent
*/
ret = do_test(CLONE_NEWUSER | CLONE_NEWNET, 0, false, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - Open uevent listening socket in initial network namespace
* owned by initial user namespace.
* - unshare network namespace
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(0, CLONE_NEWNET, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - Open uevent listening socket in initial network namespace
* owned by initial user namespace.
* - unshare user namespace
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(0, CLONE_NEWUSER, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
/*
* Setup:
* - Open uevent listening socket in initial network namespace
* owned by initial user namespace.
* - unshare user namespace
* - unshare network namespace
* - Trigger uevent in initial network namespace owned by initial user
* namespace.
* Expected Result:
* - uevent listening socket receives uevent
*/
ret = do_test(0, CLONE_NEWUSER | CLONE_NEWNET, true, sync_fd);
ASSERT_EQ(0, ret) {
goto do_cleanup;
}
do_cleanup:
close(sync_fd);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/uevent/uevent_filtering.c |
/* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018 Davide Caratti, Red Hat inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s)
{
return TC_ACT_OK;
}
__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s)
{
s->data = 0x0;
return TC_ACT_OK;
}
char _license[] __attribute__((section("license"),used)) = "GPL";
| linux-master | tools/testing/selftests/tc-testing/action.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_test_getcpu.c: Sample code to test parse_vdso.c and vDSO getcpu()
*
* Copyright (c) 2020 Arm Ltd
*/
#include <stdint.h>
#include <elf.h>
#include <stdio.h>
#include <sys/auxv.h>
#include <sys/time.h>
#include "../kselftest.h"
#include "parse_vdso.h"
#if defined(__riscv)
const char *version = "LINUX_4.15";
#else
const char *version = "LINUX_2.6";
#endif
const char *name = "__vdso_getcpu";
struct getcpu_cache;
typedef long (*getcpu_t)(unsigned int *, unsigned int *,
struct getcpu_cache *);
int main(int argc, char **argv)
{
unsigned long sysinfo_ehdr;
unsigned int cpu, node;
getcpu_t get_cpu;
long ret;
sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
if (!sysinfo_ehdr) {
printf("AT_SYSINFO_EHDR is not present!\n");
return KSFT_SKIP;
}
vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
get_cpu = (getcpu_t)vdso_sym(version, name);
if (!get_cpu) {
printf("Could not find %s\n", name);
return KSFT_SKIP;
}
ret = get_cpu(&cpu, &node, 0);
if (ret == 0) {
printf("Running on CPU %u node %u\n", cpu, node);
} else {
printf("%s failed\n", name);
return KSFT_FAIL;
}
return 0;
}
| linux-master | tools/testing/selftests/vDSO/vdso_test_getcpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ldt_gdt.c - Test cases for LDT and GDT access
* Copyright (c) 2011-2015 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <dlfcn.h>
#include <string.h>
#include <errno.h>
#include <sched.h>
#include <stdbool.h>
#include <limits.h>
#include "vdso_config.h"
#include "../kselftest.h"
static const char **name;
#ifndef SYS_getcpu
# ifdef __x86_64__
# define SYS_getcpu 309
# else
# define SYS_getcpu 318
# endif
#endif
#ifndef __NR_clock_gettime64
#define __NR_clock_gettime64 403
#endif
#ifndef __kernel_timespec
struct __kernel_timespec {
long long tv_sec;
long long tv_nsec;
};
#endif
/* max length of lines in /proc/self/maps - anything longer is skipped here */
#define MAPS_LINE_LEN 128
int nerrs = 0;
typedef int (*vgettime_t)(clockid_t, struct timespec *);
vgettime_t vdso_clock_gettime;
typedef int (*vgettime64_t)(clockid_t, struct __kernel_timespec *);
vgettime64_t vdso_clock_gettime64;
typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
vgtod_t vdso_gettimeofday;
typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
getcpu_t vgetcpu;
getcpu_t vdso_getcpu;
static void *vsyscall_getcpu(void)
{
#ifdef __x86_64__
FILE *maps;
char line[MAPS_LINE_LEN];
bool found = false;
maps = fopen("/proc/self/maps", "r");
if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
return NULL;
while (fgets(line, MAPS_LINE_LEN, maps)) {
char r, x;
void *start, *end;
char name[MAPS_LINE_LEN];
/* sscanf() is safe here as strlen(name) >= strlen(line) */
if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
&start, &end, &r, &x, name) != 5)
continue;
if (strcmp(name, "[vsyscall]"))
continue;
/* assume entries are OK, as we test vDSO here not vsyscall */
found = true;
break;
}
fclose(maps);
if (!found) {
printf("Warning: failed to find vsyscall getcpu\n");
return NULL;
}
return (void *) (0xffffffffff600800);
#else
return NULL;
#endif
}
static void fill_function_pointers()
{
void *vdso = dlopen("linux-vdso.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-gate.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso) {
printf("[WARN]\tfailed to find vDSO\n");
return;
}
vdso_getcpu = (getcpu_t)dlsym(vdso, name[4]);
if (!vdso_getcpu)
printf("Warning: failed to find getcpu in vDSO\n");
vgetcpu = (getcpu_t) vsyscall_getcpu();
vdso_clock_gettime = (vgettime_t)dlsym(vdso, name[1]);
if (!vdso_clock_gettime)
printf("Warning: failed to find clock_gettime in vDSO\n");
#if defined(VDSO_32BIT)
vdso_clock_gettime64 = (vgettime64_t)dlsym(vdso, name[5]);
if (!vdso_clock_gettime64)
printf("Warning: failed to find clock_gettime64 in vDSO\n");
#endif
vdso_gettimeofday = (vgtod_t)dlsym(vdso, name[0]);
if (!vdso_gettimeofday)
printf("Warning: failed to find gettimeofday in vDSO\n");
}
static long sys_getcpu(unsigned * cpu, unsigned * node,
void* cache)
{
return syscall(__NR_getcpu, cpu, node, cache);
}
static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
{
return syscall(__NR_clock_gettime, id, ts);
}
static inline int sys_clock_gettime64(clockid_t id, struct __kernel_timespec *ts)
{
return syscall(__NR_clock_gettime64, id, ts);
}
static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
{
return syscall(__NR_gettimeofday, tv, tz);
}
static void test_getcpu(void)
{
printf("[RUN]\tTesting getcpu...\n");
for (int cpu = 0; ; cpu++) {
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
return;
unsigned cpu_sys, cpu_vdso, cpu_vsys,
node_sys, node_vdso, node_vsys;
long ret_sys, ret_vdso = 1, ret_vsys = 1;
unsigned node;
ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
if (vdso_getcpu)
ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
if (vgetcpu)
ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
if (!ret_sys)
node = node_sys;
else if (!ret_vdso)
node = node_vdso;
else if (!ret_vsys)
node = node_vsys;
bool ok = true;
if (!ret_sys && (cpu_sys != cpu || node_sys != node))
ok = false;
if (!ret_vdso && (cpu_vdso != cpu || node_vdso != node))
ok = false;
if (!ret_vsys && (cpu_vsys != cpu || node_vsys != node))
ok = false;
printf("[%s]\tCPU %u:", ok ? "OK" : "FAIL", cpu);
if (!ret_sys)
printf(" syscall: cpu %u, node %u", cpu_sys, node_sys);
if (!ret_vdso)
printf(" vdso: cpu %u, node %u", cpu_vdso, node_vdso);
if (!ret_vsys)
printf(" vsyscall: cpu %u, node %u", cpu_vsys,
node_vsys);
printf("\n");
if (!ok)
nerrs++;
}
}
static bool ts_leq(const struct timespec *a, const struct timespec *b)
{
if (a->tv_sec != b->tv_sec)
return a->tv_sec < b->tv_sec;
else
return a->tv_nsec <= b->tv_nsec;
}
static bool ts64_leq(const struct __kernel_timespec *a,
const struct __kernel_timespec *b)
{
if (a->tv_sec != b->tv_sec)
return a->tv_sec < b->tv_sec;
else
return a->tv_nsec <= b->tv_nsec;
}
static bool tv_leq(const struct timeval *a, const struct timeval *b)
{
if (a->tv_sec != b->tv_sec)
return a->tv_sec < b->tv_sec;
else
return a->tv_usec <= b->tv_usec;
}
static char const * const clocknames[] = {
[0] = "CLOCK_REALTIME",
[1] = "CLOCK_MONOTONIC",
[2] = "CLOCK_PROCESS_CPUTIME_ID",
[3] = "CLOCK_THREAD_CPUTIME_ID",
[4] = "CLOCK_MONOTONIC_RAW",
[5] = "CLOCK_REALTIME_COARSE",
[6] = "CLOCK_MONOTONIC_COARSE",
[7] = "CLOCK_BOOTTIME",
[8] = "CLOCK_REALTIME_ALARM",
[9] = "CLOCK_BOOTTIME_ALARM",
[10] = "CLOCK_SGI_CYCLE",
[11] = "CLOCK_TAI",
};
static void test_one_clock_gettime(int clock, const char *name)
{
struct timespec start, vdso, end;
int vdso_ret, end_ret;
printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
if (sys_clock_gettime(clock, &start) < 0) {
if (errno == EINVAL) {
vdso_ret = vdso_clock_gettime(clock, &vdso);
if (vdso_ret == -EINVAL) {
printf("[OK]\tNo such clock.\n");
} else {
printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
nerrs++;
}
} else {
printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
}
return;
}
vdso_ret = vdso_clock_gettime(clock, &vdso);
end_ret = sys_clock_gettime(clock, &end);
if (vdso_ret != 0 || end_ret != 0) {
printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
vdso_ret, errno);
nerrs++;
return;
}
printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
(unsigned long long)start.tv_sec, start.tv_nsec,
(unsigned long long)vdso.tv_sec, vdso.tv_nsec,
(unsigned long long)end.tv_sec, end.tv_nsec);
if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
printf("[FAIL]\tTimes are out of sequence\n");
nerrs++;
return;
}
printf("[OK]\tTest Passed.\n");
}
static void test_clock_gettime(void)
{
if (!vdso_clock_gettime) {
printf("[SKIP]\tNo vDSO, so skipping clock_gettime() tests\n");
return;
}
for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
test_one_clock_gettime(clock, clocknames[clock]);
/* Also test some invalid clock ids */
test_one_clock_gettime(-1, "invalid");
test_one_clock_gettime(INT_MIN, "invalid");
test_one_clock_gettime(INT_MAX, "invalid");
}
static void test_one_clock_gettime64(int clock, const char *name)
{
struct __kernel_timespec start, vdso, end;
int vdso_ret, end_ret;
printf("[RUN]\tTesting clock_gettime64 for clock %s (%d)...\n", name, clock);
if (sys_clock_gettime64(clock, &start) < 0) {
if (errno == EINVAL) {
vdso_ret = vdso_clock_gettime64(clock, &vdso);
if (vdso_ret == -EINVAL) {
printf("[OK]\tNo such clock.\n");
} else {
printf("[FAIL]\tNo such clock, but __vdso_clock_gettime64 returned %d\n", vdso_ret);
nerrs++;
}
} else {
printf("[WARN]\t clock_gettime64(%d) syscall returned error %d\n", clock, errno);
}
return;
}
vdso_ret = vdso_clock_gettime64(clock, &vdso);
end_ret = sys_clock_gettime64(clock, &end);
if (vdso_ret != 0 || end_ret != 0) {
printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
vdso_ret, errno);
nerrs++;
return;
}
printf("\t%llu.%09lld %llu.%09lld %llu.%09lld\n",
(unsigned long long)start.tv_sec, start.tv_nsec,
(unsigned long long)vdso.tv_sec, vdso.tv_nsec,
(unsigned long long)end.tv_sec, end.tv_nsec);
if (!ts64_leq(&start, &vdso) || !ts64_leq(&vdso, &end)) {
printf("[FAIL]\tTimes are out of sequence\n");
nerrs++;
return;
}
printf("[OK]\tTest Passed.\n");
}
static void test_clock_gettime64(void)
{
if (!vdso_clock_gettime64) {
printf("[SKIP]\tNo vDSO, so skipping clock_gettime64() tests\n");
return;
}
for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
test_one_clock_gettime64(clock, clocknames[clock]);
/* Also test some invalid clock ids */
test_one_clock_gettime64(-1, "invalid");
test_one_clock_gettime64(INT_MIN, "invalid");
test_one_clock_gettime64(INT_MAX, "invalid");
}
static void test_gettimeofday(void)
{
struct timeval start, vdso, end;
struct timezone sys_tz, vdso_tz;
int vdso_ret, end_ret;
if (!vdso_gettimeofday)
return;
printf("[RUN]\tTesting gettimeofday...\n");
if (sys_gettimeofday(&start, &sys_tz) < 0) {
printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
nerrs++;
return;
}
vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
end_ret = sys_gettimeofday(&end, NULL);
if (vdso_ret != 0 || end_ret != 0) {
printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
vdso_ret, errno);
nerrs++;
return;
}
printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
(unsigned long long)start.tv_sec, start.tv_usec,
(unsigned long long)vdso.tv_sec, vdso.tv_usec,
(unsigned long long)end.tv_sec, end.tv_usec);
if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
printf("[FAIL]\tTimes are out of sequence\n");
nerrs++;
}
if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
} else {
printf("[FAIL]\ttimezones do not match\n");
nerrs++;
}
/* And make sure that passing NULL for tz doesn't crash. */
vdso_gettimeofday(&vdso, NULL);
}
int main(int argc, char **argv)
{
name = (const char **)&names[VDSO_NAMES];
fill_function_pointers();
test_clock_gettime();
test_clock_gettime64();
test_gettimeofday();
/*
* Test getcpu() last so that, if something goes wrong setting affinity,
* we still run the other tests.
*/
test_getcpu();
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/vDSO/vdso_test_correctness.c |
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
* vdso_clock_getres.c: Sample code to test clock_getres.
* Copyright (c) 2019 Arm Ltd.
*
* Compile with:
* gcc -std=gnu99 vdso_clock_getres.c
*
* Tested on ARM, ARM64, MIPS32, x86 (32-bit and 64-bit),
* Power (32-bit and 64-bit), S390x (32-bit and 64-bit).
* Might work on other architectures.
*/
#define _GNU_SOURCE
#include <elf.h>
#include <err.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <unistd.h>
#include <sys/syscall.h>
#include "../kselftest.h"
static long syscall_clock_getres(clockid_t _clkid, struct timespec *_ts)
{
long ret;
ret = syscall(SYS_clock_getres, _clkid, _ts);
return ret;
}
const char *vdso_clock_name[12] = {
"CLOCK_REALTIME",
"CLOCK_MONOTONIC",
"CLOCK_PROCESS_CPUTIME_ID",
"CLOCK_THREAD_CPUTIME_ID",
"CLOCK_MONOTONIC_RAW",
"CLOCK_REALTIME_COARSE",
"CLOCK_MONOTONIC_COARSE",
"CLOCK_BOOTTIME",
"CLOCK_REALTIME_ALARM",
"CLOCK_BOOTTIME_ALARM",
"CLOCK_SGI_CYCLE",
"CLOCK_TAI",
};
/*
* This function calls clock_getres in vdso and by system call
* with different values for clock_id.
*
* Example of output:
*
* clock_id: CLOCK_REALTIME [PASS]
* clock_id: CLOCK_BOOTTIME [PASS]
* clock_id: CLOCK_TAI [PASS]
* clock_id: CLOCK_REALTIME_COARSE [PASS]
* clock_id: CLOCK_MONOTONIC [PASS]
* clock_id: CLOCK_MONOTONIC_RAW [PASS]
* clock_id: CLOCK_MONOTONIC_COARSE [PASS]
*/
static inline int vdso_test_clock(unsigned int clock_id)
{
struct timespec x, y;
printf("clock_id: %s", vdso_clock_name[clock_id]);
clock_getres(clock_id, &x);
syscall_clock_getres(clock_id, &y);
if ((x.tv_sec != y.tv_sec) || (x.tv_nsec != y.tv_nsec)) {
printf(" [FAIL]\n");
return KSFT_FAIL;
}
printf(" [PASS]\n");
return KSFT_PASS;
}
int main(int argc, char **argv)
{
int ret = 0;
#if _POSIX_TIMERS > 0
#ifdef CLOCK_REALTIME
ret += vdso_test_clock(CLOCK_REALTIME);
#endif
#ifdef CLOCK_BOOTTIME
ret += vdso_test_clock(CLOCK_BOOTTIME);
#endif
#ifdef CLOCK_TAI
ret += vdso_test_clock(CLOCK_TAI);
#endif
#ifdef CLOCK_REALTIME_COARSE
ret += vdso_test_clock(CLOCK_REALTIME_COARSE);
#endif
#ifdef CLOCK_MONOTONIC
ret += vdso_test_clock(CLOCK_MONOTONIC);
#endif
#ifdef CLOCK_MONOTONIC_RAW
ret += vdso_test_clock(CLOCK_MONOTONIC_RAW);
#endif
#ifdef CLOCK_MONOTONIC_COARSE
ret += vdso_test_clock(CLOCK_MONOTONIC_COARSE);
#endif
#endif
if (ret > 0)
return KSFT_FAIL;
return KSFT_PASS;
}
| linux-master | tools/testing/selftests/vDSO/vdso_test_clock_getres.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_test_gettimeofday.c: Sample code to test parse_vdso.c and
* vDSO gettimeofday()
* Copyright (c) 2014 Andy Lutomirski
*
* Compile with:
* gcc -std=gnu99 vdso_test_gettimeofday.c parse_vdso_gettimeofday.c
*
* Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
*/
#include <stdint.h>
#include <elf.h>
#include <stdio.h>
#include <sys/auxv.h>
#include <sys/time.h>
#include "../kselftest.h"
#include "parse_vdso.h"
/*
* ARM64's vDSO exports its gettimeofday() implementation with a different
* name and version from other architectures, so we need to handle it as
* a special case.
*/
#if defined(__aarch64__)
const char *version = "LINUX_2.6.39";
const char *name = "__kernel_gettimeofday";
#elif defined(__riscv)
const char *version = "LINUX_4.15";
const char *name = "__vdso_gettimeofday";
#else
const char *version = "LINUX_2.6";
const char *name = "__vdso_gettimeofday";
#endif
int main(int argc, char **argv)
{
unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
if (!sysinfo_ehdr) {
printf("AT_SYSINFO_EHDR is not present!\n");
return KSFT_SKIP;
}
vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
/* Find gettimeofday. */
typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
gtod_t gtod = (gtod_t)vdso_sym(version, name);
if (!gtod) {
printf("Could not find %s\n", name);
return KSFT_SKIP;
}
struct timeval tv;
long ret = gtod(&tv, 0);
if (ret == 0) {
printf("The time is %lld.%06lld\n",
(long long)tv.tv_sec, (long long)tv.tv_usec);
} else {
printf("%s failed\n", name);
return KSFT_FAIL;
}
return 0;
}
| linux-master | tools/testing/selftests/vDSO/vdso_test_gettimeofday.c |
/*
* parse_vdso.c: Linux reference vDSO parser
* Written by Andrew Lutomirski, 2011-2014.
*
* This code is meant to be linked in to various programs that run on Linux.
* As such, it is available with as few restrictions as possible. This file
* is licensed under the Creative Commons Zero License, version 1.0,
* available at http://creativecommons.org/publicdomain/zero/1.0/legalcode
*
* The vDSO is a regular ELF DSO that the kernel maps into user space when
* it starts a program. It works equally well in statically and dynamically
* linked binaries.
*
* This code is tested on x86. In principle it should work on any
* architecture that has a vDSO.
*/
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <limits.h>
#include <elf.h>
#include "parse_vdso.h"
/* And here's the code. */
#ifndef ELF_BITS
# if ULONG_MAX > 0xffffffffUL
# define ELF_BITS 64
# else
# define ELF_BITS 32
# endif
#endif
#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
static struct vdso_info
{
bool valid;
/* Load information */
uintptr_t load_addr;
uintptr_t load_offset; /* load_addr - recorded vaddr */
/* Symbol table */
ELF(Sym) *symtab;
const char *symstrings;
ELF(Word) *bucket, *chain;
ELF(Word) nbucket, nchain;
/* Version table */
ELF(Versym) *versym;
ELF(Verdef) *verdef;
} vdso_info;
/* Straight from the ELF specification. */
static unsigned long elf_hash(const unsigned char *name)
{
unsigned long h = 0, g;
while (*name)
{
h = (h << 4) + *name++;
if (g = h & 0xf0000000)
h ^= g >> 24;
h &= ~g;
}
return h;
}
void vdso_init_from_sysinfo_ehdr(uintptr_t base)
{
size_t i;
bool found_vaddr = false;
vdso_info.valid = false;
vdso_info.load_addr = base;
ELF(Ehdr) *hdr = (ELF(Ehdr)*)base;
if (hdr->e_ident[EI_CLASS] !=
(ELF_BITS == 32 ? ELFCLASS32 : ELFCLASS64)) {
return; /* Wrong ELF class -- check ELF_BITS */
}
ELF(Phdr) *pt = (ELF(Phdr)*)(vdso_info.load_addr + hdr->e_phoff);
ELF(Dyn) *dyn = 0;
/*
* We need two things from the segment table: the load offset
* and the dynamic table.
*/
for (i = 0; i < hdr->e_phnum; i++)
{
if (pt[i].p_type == PT_LOAD && !found_vaddr) {
found_vaddr = true;
vdso_info.load_offset = base
+ (uintptr_t)pt[i].p_offset
- (uintptr_t)pt[i].p_vaddr;
} else if (pt[i].p_type == PT_DYNAMIC) {
dyn = (ELF(Dyn)*)(base + pt[i].p_offset);
}
}
if (!found_vaddr || !dyn)
return; /* Failed */
/*
* Fish out the useful bits of the dynamic table.
*/
ELF(Word) *hash = 0;
vdso_info.symstrings = 0;
vdso_info.symtab = 0;
vdso_info.versym = 0;
vdso_info.verdef = 0;
for (i = 0; dyn[i].d_tag != DT_NULL; i++) {
switch (dyn[i].d_tag) {
case DT_STRTAB:
vdso_info.symstrings = (const char *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_SYMTAB:
vdso_info.symtab = (ELF(Sym) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_HASH:
hash = (ELF(Word) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_VERSYM:
vdso_info.versym = (ELF(Versym) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_VERDEF:
vdso_info.verdef = (ELF(Verdef) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
}
}
if (!vdso_info.symstrings || !vdso_info.symtab || !hash)
return; /* Failed */
if (!vdso_info.verdef)
vdso_info.versym = 0;
/* Parse the hash table header. */
vdso_info.nbucket = hash[0];
vdso_info.nchain = hash[1];
vdso_info.bucket = &hash[2];
vdso_info.chain = &hash[vdso_info.nbucket + 2];
/* That's all we need. */
vdso_info.valid = true;
}
static bool vdso_match_version(ELF(Versym) ver,
const char *name, ELF(Word) hash)
{
/*
* This is a helper function to check if the version indexed by
* ver matches name (which hashes to hash).
*
* The version definition table is a mess, and I don't know how
* to do this in better than linear time without allocating memory
* to build an index. I also don't know why the table has
* variable size entries in the first place.
*
* For added fun, I can't find a comprehensible specification of how
* to parse all the weird flags in the table.
*
* So I just parse the whole table every time.
*/
/* First step: find the version definition */
ver &= 0x7fff; /* Apparently bit 15 means "hidden" */
ELF(Verdef) *def = vdso_info.verdef;
while(true) {
if ((def->vd_flags & VER_FLG_BASE) == 0
&& (def->vd_ndx & 0x7fff) == ver)
break;
if (def->vd_next == 0)
return false; /* No definition. */
def = (ELF(Verdef) *)((char *)def + def->vd_next);
}
/* Now figure out whether it matches. */
ELF(Verdaux) *aux = (ELF(Verdaux)*)((char *)def + def->vd_aux);
return def->vd_hash == hash
&& !strcmp(name, vdso_info.symstrings + aux->vda_name);
}
void *vdso_sym(const char *version, const char *name)
{
unsigned long ver_hash;
if (!vdso_info.valid)
return 0;
ver_hash = elf_hash(version);
ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket];
for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) {
ELF(Sym) *sym = &vdso_info.symtab[chain];
/* Check for a defined global or weak function w/ right name. */
if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
continue;
if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
ELF64_ST_BIND(sym->st_info) != STB_WEAK)
continue;
if (sym->st_shndx == SHN_UNDEF)
continue;
if (strcmp(name, vdso_info.symstrings + sym->st_name))
continue;
/* Check symbol version. */
if (vdso_info.versym
&& !vdso_match_version(vdso_info.versym[chain],
version, ver_hash))
continue;
return (void *)(vdso_info.load_offset + sym->st_value);
}
return 0;
}
void vdso_init_from_auxv(void *auxv)
{
ELF(auxv_t) *elf_auxv = auxv;
for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++)
{
if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) {
vdso_init_from_sysinfo_ehdr(elf_auxv[i].a_un.a_val);
return;
}
}
vdso_info.valid = false;
}
| linux-master | tools/testing/selftests/vDSO/parse_vdso.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_test.c: Sample code to test parse_vdso.c on x86
* Copyright (c) 2011-2014 Andy Lutomirski
*
* You can amuse yourself by compiling with:
* gcc -std=gnu99 -nostdlib
* -Os -fno-asynchronous-unwind-tables -flto -lgcc_s
* vdso_standalone_test_x86.c parse_vdso.c
* to generate a small binary. On x86_64, you can omit -lgcc_s
* if you want the binary to be completely standalone.
*/
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#include <stdint.h>
#include "parse_vdso.h"
/* We need a libc functions... */
int strcmp(const char *a, const char *b)
{
/* This implementation is buggy: it never returns -1. */
while (*a || *b) {
if (*a != *b)
return 1;
if (*a == 0 || *b == 0)
return 1;
a++;
b++;
}
return 0;
}
/* ...and two syscalls. This is x86-specific. */
static inline long x86_syscall3(long nr, long a0, long a1, long a2)
{
long ret;
#ifdef __x86_64__
asm volatile ("syscall" : "=a" (ret) : "a" (nr),
"D" (a0), "S" (a1), "d" (a2) :
"cc", "memory", "rcx",
"r8", "r9", "r10", "r11" );
#else
asm volatile ("int $0x80" : "=a" (ret) : "a" (nr),
"b" (a0), "c" (a1), "d" (a2) :
"cc", "memory" );
#endif
return ret;
}
static inline long linux_write(int fd, const void *data, size_t len)
{
return x86_syscall3(__NR_write, fd, (long)data, (long)len);
}
static inline void linux_exit(int code)
{
x86_syscall3(__NR_exit, code, 0, 0);
}
void to_base10(char *lastdig, time_t n)
{
while (n) {
*lastdig = (n % 10) + '0';
n /= 10;
lastdig--;
}
}
__attribute__((externally_visible)) void c_main(void **stack)
{
/* Parse the stack */
long argc = (long)*stack;
stack += argc + 2;
/* Now we're pointing at the environment. Skip it. */
while(*stack)
stack++;
stack++;
/* Now we're pointing at auxv. Initialize the vDSO parser. */
vdso_init_from_auxv((void *)stack);
/* Find gettimeofday. */
typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
if (!gtod)
linux_exit(1);
struct timeval tv;
long ret = gtod(&tv, 0);
if (ret == 0) {
char buf[] = "The time is .000000\n";
to_base10(buf + 31, tv.tv_sec);
to_base10(buf + 38, tv.tv_usec);
linux_write(1, buf, sizeof(buf) - 1);
} else {
linux_exit(ret);
}
linux_exit(0);
}
/*
* This is the real entry point. It passes the initial stack into
* the C entry point.
*/
asm (
".text\n"
".global _start\n"
".type _start,@function\n"
"_start:\n\t"
#ifdef __x86_64__
"mov %rsp,%rdi\n\t"
"jmp c_main"
#else
"push %esp\n\t"
"call c_main\n\t"
"int $3"
#endif
);
| linux-master | tools/testing/selftests/vDSO/vdso_standalone_test_x86.c |
// SPDX-License-Identifier: GPL-2.0
/*
* vdso_full_test.c: Sample code to test all the timers.
* Copyright (c) 2019 Arm Ltd.
*
* Compile with:
* gcc -std=gnu99 vdso_full_test.c parse_vdso.c
*
*/
#include <stdint.h>
#include <elf.h>
#include <stdio.h>
#include <time.h>
#include <sys/auxv.h>
#include <sys/time.h>
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include "../kselftest.h"
#include "vdso_config.h"
extern void *vdso_sym(const char *version, const char *name);
extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
extern void vdso_init_from_auxv(void *auxv);
static const char *version;
static const char **name;
typedef long (*vdso_gettimeofday_t)(struct timeval *tv, struct timezone *tz);
typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts);
typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts);
typedef time_t (*vdso_time_t)(time_t *t);
#define VDSO_TEST_PASS_MSG() "\n%s(): PASS\n", __func__
#define VDSO_TEST_FAIL_MSG(x) "\n%s(): %s FAIL\n", __func__, x
#define VDSO_TEST_SKIP_MSG(x) "\n%s(): SKIP: Could not find %s\n", __func__, x
static void vdso_test_gettimeofday(void)
{
/* Find gettimeofday. */
vdso_gettimeofday_t vdso_gettimeofday =
(vdso_gettimeofday_t)vdso_sym(version, name[0]);
if (!vdso_gettimeofday) {
ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[0]));
return;
}
struct timeval tv;
long ret = vdso_gettimeofday(&tv, 0);
if (ret == 0) {
ksft_print_msg("The time is %lld.%06lld\n",
(long long)tv.tv_sec, (long long)tv.tv_usec);
ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[0]));
}
}
static void vdso_test_clock_gettime(clockid_t clk_id)
{
/* Find clock_gettime. */
vdso_clock_gettime_t vdso_clock_gettime =
(vdso_clock_gettime_t)vdso_sym(version, name[1]);
if (!vdso_clock_gettime) {
ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[1]));
return;
}
struct timespec ts;
long ret = vdso_clock_gettime(clk_id, &ts);
if (ret == 0) {
ksft_print_msg("The time is %lld.%06lld\n",
(long long)ts.tv_sec, (long long)ts.tv_nsec);
ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[1]));
}
}
static void vdso_test_time(void)
{
/* Find time. */
vdso_time_t vdso_time =
(vdso_time_t)vdso_sym(version, name[2]);
if (!vdso_time) {
ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[2]));
return;
}
long ret = vdso_time(NULL);
if (ret > 0) {
ksft_print_msg("The time in hours since January 1, 1970 is %lld\n",
(long long)(ret / 3600));
ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[2]));
}
}
static void vdso_test_clock_getres(clockid_t clk_id)
{
int clock_getres_fail = 0;
/* Find clock_getres. */
vdso_clock_getres_t vdso_clock_getres =
(vdso_clock_getres_t)vdso_sym(version, name[3]);
if (!vdso_clock_getres) {
ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[3]));
return;
}
struct timespec ts, sys_ts;
long ret = vdso_clock_getres(clk_id, &ts);
if (ret == 0) {
ksft_print_msg("The vdso resolution is %lld %lld\n",
(long long)ts.tv_sec, (long long)ts.tv_nsec);
} else {
clock_getres_fail++;
}
ret = syscall(SYS_clock_getres, clk_id, &sys_ts);
ksft_print_msg("The syscall resolution is %lld %lld\n",
(long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec))
clock_getres_fail++;
if (clock_getres_fail > 0) {
ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[3]));
} else {
ksft_test_result_pass(VDSO_TEST_PASS_MSG());
}
}
const char *vdso_clock_name[12] = {
"CLOCK_REALTIME",
"CLOCK_MONOTONIC",
"CLOCK_PROCESS_CPUTIME_ID",
"CLOCK_THREAD_CPUTIME_ID",
"CLOCK_MONOTONIC_RAW",
"CLOCK_REALTIME_COARSE",
"CLOCK_MONOTONIC_COARSE",
"CLOCK_BOOTTIME",
"CLOCK_REALTIME_ALARM",
"CLOCK_BOOTTIME_ALARM",
"CLOCK_SGI_CYCLE",
"CLOCK_TAI",
};
/*
* This function calls vdso_test_clock_gettime and vdso_test_clock_getres
* with different values for clock_id.
*/
static inline void vdso_test_clock(clockid_t clock_id)
{
ksft_print_msg("\nclock_id: %s\n", vdso_clock_name[clock_id]);
vdso_test_clock_gettime(clock_id);
vdso_test_clock_getres(clock_id);
}
#define VDSO_TEST_PLAN 16
int main(int argc, char **argv)
{
unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
ksft_print_header();
ksft_set_plan(VDSO_TEST_PLAN);
if (!sysinfo_ehdr) {
printf("AT_SYSINFO_EHDR is not present!\n");
return KSFT_SKIP;
}
version = versions[VDSO_VERSION];
name = (const char **)&names[VDSO_NAMES];
printf("[vDSO kselftest] VDSO_VERSION: %s\n", version);
vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
vdso_test_gettimeofday();
#if _POSIX_TIMERS > 0
#ifdef CLOCK_REALTIME
vdso_test_clock(CLOCK_REALTIME);
#endif
#ifdef CLOCK_BOOTTIME
vdso_test_clock(CLOCK_BOOTTIME);
#endif
#ifdef CLOCK_TAI
vdso_test_clock(CLOCK_TAI);
#endif
#ifdef CLOCK_REALTIME_COARSE
vdso_test_clock(CLOCK_REALTIME_COARSE);
#endif
#ifdef CLOCK_MONOTONIC
vdso_test_clock(CLOCK_MONOTONIC);
#endif
#ifdef CLOCK_MONOTONIC_RAW
vdso_test_clock(CLOCK_MONOTONIC_RAW);
#endif
#ifdef CLOCK_MONOTONIC_COARSE
vdso_test_clock(CLOCK_MONOTONIC_COARSE);
#endif
#endif
vdso_test_time();
ksft_print_cnts();
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
| linux-master | tools/testing/selftests/vDSO/vdso_test_abi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This test covers the PR_SET_NAME functionality of prctl calls
*/
#include <errno.h>
#include <sys/prctl.h>
#include <string.h>
#include "../kselftest_harness.h"
#define CHANGE_NAME "changename"
#define EMPTY_NAME ""
#define TASK_COMM_LEN 16
int set_name(char *name)
{
int res;
res = prctl(PR_SET_NAME, name, NULL, NULL, NULL);
if (res < 0)
return -errno;
return res;
}
int check_is_name_correct(char *check_name)
{
char name[TASK_COMM_LEN];
int res;
res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
if (res < 0)
return -errno;
return !strcmp(name, check_name);
}
int check_null_pointer(char *check_name)
{
char *name = NULL;
int res;
res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
return res;
}
TEST(rename_process) {
EXPECT_GE(set_name(CHANGE_NAME), 0);
EXPECT_TRUE(check_is_name_correct(CHANGE_NAME));
EXPECT_GE(set_name(EMPTY_NAME), 0);
EXPECT_TRUE(check_is_name_correct(EMPTY_NAME));
EXPECT_GE(set_name(CHANGE_NAME), 0);
EXPECT_LT(check_null_pointer(CHANGE_NAME), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/prctl/set-process-name.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This test covers the anonymous VMA naming functionality through prctl calls
*/
#include <errno.h>
#include <sys/prctl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <string.h>
#include "../kselftest_harness.h"
#define AREA_SIZE 1024
#define GOOD_NAME "goodname"
#define BAD_NAME "badname\1"
#ifndef PR_SET_VMA
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
#endif
int rename_vma(unsigned long addr, unsigned long size, char *name)
{
int res;
res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, name);
if (res < 0)
return -errno;
return res;
}
int was_renaming_successful(char *target_name, unsigned long ptr)
{
FILE *maps_file;
char line_buf[512], name[128], mode[8];
unsigned long start_addr, end_addr, offset;
unsigned int major_id, minor_id, node_id;
char target_buf[128];
int res = 0, sscanf_res;
// The entry name in maps will be in format [anon:<target_name>]
sprintf(target_buf, "[anon:%s]", target_name);
maps_file = fopen("/proc/self/maps", "r");
if (!maps_file) {
printf("## /proc/self/maps file opening error\n");
return 0;
}
// Parse the maps file to find the entry we renamed
while (fgets(line_buf, sizeof(line_buf), maps_file)) {
sscanf_res = sscanf(line_buf, "%lx-%lx %7s %lx %u:%u %u %s", &start_addr,
&end_addr, mode, &offset, &major_id,
&minor_id, &node_id, name);
if (sscanf_res == EOF) {
res = 0;
printf("## EOF while parsing the maps file\n");
break;
}
if (!strcmp(name, target_buf) && start_addr == ptr) {
res = 1;
break;
}
}
fclose(maps_file);
return res;
}
FIXTURE(vma) {
void *ptr_anon, *ptr_not_anon;
};
FIXTURE_SETUP(vma) {
self->ptr_anon = mmap(NULL, AREA_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
ASSERT_NE(self->ptr_anon, NULL);
self->ptr_not_anon = mmap(NULL, AREA_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE, 0, 0);
ASSERT_NE(self->ptr_not_anon, NULL);
}
FIXTURE_TEARDOWN(vma) {
munmap(self->ptr_anon, AREA_SIZE);
munmap(self->ptr_not_anon, AREA_SIZE);
}
TEST_F(vma, renaming) {
TH_LOG("Try to rename the VMA with correct parameters");
EXPECT_GE(rename_vma((unsigned long)self->ptr_anon, AREA_SIZE, GOOD_NAME), 0);
EXPECT_TRUE(was_renaming_successful(GOOD_NAME, (unsigned long)self->ptr_anon));
TH_LOG("Try to pass invalid name (with non-printable character \\1) to rename the VMA");
EXPECT_EQ(rename_vma((unsigned long)self->ptr_anon, AREA_SIZE, BAD_NAME), -EINVAL);
TH_LOG("Try to rename non-anonymous VMA");
EXPECT_EQ(rename_vma((unsigned long) self->ptr_not_anon, AREA_SIZE, GOOD_NAME), -EINVAL);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/prctl/set-anon-vma-name-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
*
* Tests if the control register is updated correctly
* at context switches
*
* Warning: this test will cause a very high load for a few seconds
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <wait.h>
#include <sys/prctl.h>
#include <linux/prctl.h>
/* Get/set the process' ability to use the timestamp counter instruction */
#ifndef PR_GET_TSC
#define PR_GET_TSC 25
#define PR_SET_TSC 26
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
#endif
static uint64_t rdtsc(void)
{
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
static void sigsegv_expect(int sig)
{
/* */
}
static void segvtask(void)
{
if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0)
{
perror("prctl");
exit(0);
}
signal(SIGSEGV, sigsegv_expect);
alarm(10);
rdtsc();
fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n");
exit(0);
}
static void sigsegv_fail(int sig)
{
fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n");
exit(0);
}
static void rdtsctask(void)
{
if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0)
{
perror("prctl");
exit(0);
}
signal(SIGSEGV, sigsegv_fail);
alarm(10);
for(;;) rdtsc();
}
int main(void)
{
int n_tasks = 100, i;
fprintf(stderr, "[No further output means we're all right]\n");
for (i=0; i<n_tasks; i++)
if (fork() == 0)
{
if (i & 1)
segvtask();
else
rdtsctask();
}
for (i=0; i<n_tasks; i++)
wait(NULL);
exit(0);
}
| linux-master | tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
*
* Basic test to test behaviour of PR_GET_TSC and PR_SET_TSC
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <sys/prctl.h>
#include <linux/prctl.h>
/* Get/set the process' ability to use the timestamp counter instruction */
#ifndef PR_GET_TSC
#define PR_GET_TSC 25
#define PR_SET_TSC 26
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
#endif
const char *tsc_names[] =
{
[0] = "[not set]",
[PR_TSC_ENABLE] = "PR_TSC_ENABLE",
[PR_TSC_SIGSEGV] = "PR_TSC_SIGSEGV",
};
static uint64_t rdtsc(void)
{
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
static void sigsegv_cb(int sig)
{
int tsc_val = 0;
printf("[ SIG_SEGV ]\n");
printf("prctl(PR_GET_TSC, &tsc_val); ");
fflush(stdout);
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
perror("prctl");
printf("tsc_val == %s\n", tsc_names[tsc_val]);
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
perror("prctl");
printf("rdtsc() == ");
}
int main(void)
{
int tsc_val = 0;
signal(SIGSEGV, sigsegv_cb);
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_GET_TSC, &tsc_val); ");
fflush(stdout);
if ( prctl(PR_GET_TSC, &tsc_val) == -1)
perror("prctl");
printf("tsc_val == %s\n", tsc_names[tsc_val]);
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_SET_TSC, PR_TSC_ENABLE)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_ENABLE) == -1)
perror("prctl");
printf("rdtsc() == %llu\n", (unsigned long long)rdtsc());
printf("prctl(PR_SET_TSC, PR_TSC_SIGSEGV)\n");
fflush(stdout);
if ( prctl(PR_SET_TSC, PR_TSC_SIGSEGV) == -1)
perror("prctl");
printf("rdtsc() == ");
fflush(stdout);
printf("%llu\n", (unsigned long long)rdtsc());
fflush(stdout);
exit(EXIT_SUCCESS);
}
| linux-master | tools/testing/selftests/prctl/disable-tsc-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...)
*
* Tests if the control register is updated correctly
* when set with prctl()
*
* Warning: this test will cause a very high load for a few seconds
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <inttypes.h>
#include <wait.h>
#include <sys/prctl.h>
#include <linux/prctl.h>
/* Get/set the process' ability to use the timestamp counter instruction */
#ifndef PR_GET_TSC
#define PR_GET_TSC 25
#define PR_SET_TSC 26
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
#endif
/* snippet from wikipedia :-) */
static uint64_t rdtsc(void)
{
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
int should_segv = 0;
static void sigsegv_cb(int sig)
{
if (!should_segv)
{
fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n");
exit(0);
}
if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0)
{
perror("prctl");
exit(0);
}
should_segv = 0;
rdtsc();
}
static void task(void)
{
signal(SIGSEGV, sigsegv_cb);
alarm(10);
for(;;)
{
rdtsc();
if (should_segv)
{
fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n");
exit(0);
}
if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0)
{
perror("prctl");
exit(0);
}
should_segv = 1;
}
}
int main(void)
{
int n_tasks = 100, i;
fprintf(stderr, "[No further output means we're all right]\n");
for (i=0; i<n_tasks; i++)
if (fork() == 0)
task();
for (i=0; i<n_tasks; i++)
wait(NULL);
exit(0);
}
| linux-master | tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock support - User space test program
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
#define _GNU_SOURCE
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <math.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <linux/ptp_clock.h>
#define DEVICE "/dev/ptp0"
#ifndef ADJ_SETOFFSET
#define ADJ_SETOFFSET 0x0100
#endif
#ifndef CLOCK_INVALID
#define CLOCK_INVALID -1
#endif
#define NSEC_PER_SEC 1000000000LL
/* clock_adjtime is not available in GLIBC < 2.14 */
#if !__GLIBC_PREREQ(2, 14)
#include <sys/syscall.h>
static int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
#endif
static void show_flag_test(int rq_index, unsigned int flags, int err)
{
printf("PTP_EXTTS_REQUEST%c flags 0x%08x : (%d) %s\n",
rq_index ? '1' + rq_index : ' ',
flags, err, strerror(errno));
/* sigh, uClibc ... */
errno = 0;
}
static void do_flag_test(int fd, unsigned int index)
{
struct ptp_extts_request extts_request;
unsigned long request[2] = {
PTP_EXTTS_REQUEST,
PTP_EXTTS_REQUEST2,
};
unsigned int enable_flags[5] = {
PTP_ENABLE_FEATURE,
PTP_ENABLE_FEATURE | PTP_RISING_EDGE,
PTP_ENABLE_FEATURE | PTP_FALLING_EDGE,
PTP_ENABLE_FEATURE | PTP_RISING_EDGE | PTP_FALLING_EDGE,
PTP_ENABLE_FEATURE | (PTP_EXTTS_VALID_FLAGS + 1),
};
int err, i, j;
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = index;
for (i = 0; i < 2; i++) {
for (j = 0; j < 5; j++) {
extts_request.flags = enable_flags[j];
err = ioctl(fd, request[i], &extts_request);
show_flag_test(i, extts_request.flags, err);
extts_request.flags = 0;
err = ioctl(fd, request[i], &extts_request);
}
}
}
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
return (((unsigned int) ~fd) << 3) | CLOCKFD;
}
static long ppb_to_scaled_ppm(int ppb)
{
/*
* The 'freq' field in the 'struct timex' is in parts per
* million, but with a 16 bit binary fractional field.
* Instead of calculating either one of
*
* scaled_ppm = (ppb / 1000) << 16 [1]
* scaled_ppm = (ppb << 16) / 1000 [2]
*
* we simply use double precision math, in order to avoid the
* truncation in [1] and the possible overflow in [2].
*/
return (long) (ppb * 65.536);
}
static int64_t pctns(struct ptp_clock_time *t)
{
return t->sec * NSEC_PER_SEC + t->nsec;
}
static void usage(char *progname)
{
fprintf(stderr,
"usage: %s [options]\n"
" -c query the ptp clock's capabilities\n"
" -d name device to open\n"
" -e val read 'val' external time stamp events\n"
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -g get the ptp clock time\n"
" -h prints this message\n"
" -i val index for event/trigger\n"
" -k val measure the time offset between system and phc clock\n"
" for 'val' times (Maximum 25)\n"
" -l list the current pin configuration\n"
" -L pin,val configure pin index 'pin' with function 'val'\n"
" the channel index is taken from the '-i' option\n"
" 'val' specifies the auxiliary function:\n"
" 0 - none\n"
" 1 - external time stamp\n"
" 2 - periodic output\n"
" -n val shift the ptp clock time by 'val' nanoseconds\n"
" -o val phase offset (in nanoseconds) to be provided to the PHC servo\n"
" -p val enable output with a period of 'val' nanoseconds\n"
" -H val set output phase to 'val' nanoseconds (requires -p)\n"
" -w val set output pulse width to 'val' nanoseconds (requires -p)\n"
" -P val enable or disable (val=1|0) the system clock PPS\n"
" -s set the ptp clock time from the system time\n"
" -S set the system time from the ptp clock time\n"
" -t val shift the ptp clock time by 'val' seconds\n"
" -T val set the ptp clock time to 'val' seconds\n"
" -x val get an extended ptp clock time with the desired number of samples (up to %d)\n"
" -X get a ptp clock cross timestamp\n"
" -z test combinations of rising/falling external time stamp flags\n",
progname, PTP_MAX_SAMPLES);
}
int main(int argc, char *argv[])
{
struct ptp_clock_caps caps;
struct ptp_extts_event event;
struct ptp_extts_request extts_request;
struct ptp_perout_request perout_request;
struct ptp_pin_desc desc;
struct timespec ts;
struct timex tx;
struct ptp_clock_time *pct;
struct ptp_sys_offset *sysoff;
struct ptp_sys_offset_extended *soe;
struct ptp_sys_offset_precise *xts;
char *progname;
unsigned int i;
int c, cnt, fd;
char *device = DEVICE;
clockid_t clkid;
int adjfreq = 0x7fffffff;
int adjtime = 0;
int adjns = 0;
int adjphase = 0;
int capabilities = 0;
int extts = 0;
int flagtest = 0;
int gettime = 0;
int index = 0;
int list_pins = 0;
int pct_offset = 0;
int getextended = 0;
int getcross = 0;
int n_samples = 0;
int pin_index = -1, pin_func;
int pps = -1;
int seconds = 0;
int settime = 0;
int64_t t1, t2, tp;
int64_t interval, offset;
int64_t perout_phase = -1;
int64_t pulsewidth = -1;
int64_t perout = -1;
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:n:o:p:P:sSt:T:w:x:Xz"))) {
switch (c) {
case 'c':
capabilities = 1;
break;
case 'd':
device = optarg;
break;
case 'e':
extts = atoi(optarg);
break;
case 'f':
adjfreq = atoi(optarg);
break;
case 'g':
gettime = 1;
break;
case 'H':
perout_phase = atoll(optarg);
break;
case 'i':
index = atoi(optarg);
break;
case 'k':
pct_offset = 1;
n_samples = atoi(optarg);
break;
case 'l':
list_pins = 1;
break;
case 'L':
cnt = sscanf(optarg, "%d,%d", &pin_index, &pin_func);
if (cnt != 2) {
usage(progname);
return -1;
}
break;
case 'n':
adjns = atoi(optarg);
break;
case 'o':
adjphase = atoi(optarg);
break;
case 'p':
perout = atoll(optarg);
break;
case 'P':
pps = atoi(optarg);
break;
case 's':
settime = 1;
break;
case 'S':
settime = 2;
break;
case 't':
adjtime = atoi(optarg);
break;
case 'T':
settime = 3;
seconds = atoi(optarg);
break;
case 'w':
pulsewidth = atoi(optarg);
break;
case 'x':
getextended = atoi(optarg);
if (getextended < 1 || getextended > PTP_MAX_SAMPLES) {
fprintf(stderr,
"number of extended timestamp samples must be between 1 and %d; was asked for %d\n",
PTP_MAX_SAMPLES, getextended);
return -1;
}
break;
case 'X':
getcross = 1;
break;
case 'z':
flagtest = 1;
break;
case 'h':
usage(progname);
return 0;
case '?':
default:
usage(progname);
return -1;
}
}
fd = open(device, O_RDWR);
if (fd < 0) {
fprintf(stderr, "opening %s: %s\n", device, strerror(errno));
return -1;
}
clkid = get_clockid(fd);
if (CLOCK_INVALID == clkid) {
fprintf(stderr, "failed to read clock id\n");
return -1;
}
if (capabilities) {
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
perror("PTP_CLOCK_GETCAPS");
} else {
printf("capabilities:\n"
" %d maximum frequency adjustment (ppb)\n"
" %d programmable alarms\n"
" %d external time stamp channels\n"
" %d programmable periodic signals\n"
" %d pulse per second\n"
" %d programmable pins\n"
" %d cross timestamping\n"
" %d adjust_phase\n"
" %d maximum phase adjustment (ns)\n",
caps.max_adj,
caps.n_alarm,
caps.n_ext_ts,
caps.n_per_out,
caps.pps,
caps.n_pins,
caps.cross_timestamping,
caps.adjust_phase,
caps.max_phase_adj);
}
}
if (0x7fffffff != adjfreq) {
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_FREQUENCY;
tx.freq = ppb_to_scaled_ppm(adjfreq);
if (clock_adjtime(clkid, &tx)) {
perror("clock_adjtime");
} else {
puts("frequency adjustment okay");
}
}
if (adjtime || adjns) {
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_SETOFFSET | ADJ_NANO;
tx.time.tv_sec = adjtime;
tx.time.tv_usec = adjns;
while (tx.time.tv_usec < 0) {
tx.time.tv_sec -= 1;
tx.time.tv_usec += NSEC_PER_SEC;
}
if (clock_adjtime(clkid, &tx) < 0) {
perror("clock_adjtime");
} else {
puts("time shift okay");
}
}
if (adjphase) {
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_OFFSET | ADJ_NANO;
tx.offset = adjphase;
if (clock_adjtime(clkid, &tx) < 0) {
perror("clock_adjtime");
} else {
puts("phase adjustment okay");
}
}
if (gettime) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
} else {
printf("clock time: %ld.%09ld or %s",
ts.tv_sec, ts.tv_nsec, ctime(&ts.tv_sec));
}
}
if (settime == 1) {
clock_gettime(CLOCK_REALTIME, &ts);
if (clock_settime(clkid, &ts)) {
perror("clock_settime");
} else {
puts("set time okay");
}
}
if (settime == 2) {
clock_gettime(clkid, &ts);
if (clock_settime(CLOCK_REALTIME, &ts)) {
perror("clock_settime");
} else {
puts("set time okay");
}
}
if (settime == 3) {
ts.tv_sec = seconds;
ts.tv_nsec = 0;
if (clock_settime(clkid, &ts)) {
perror("clock_settime");
} else {
puts("set time okay");
}
}
if (pin_index >= 0) {
memset(&desc, 0, sizeof(desc));
desc.index = pin_index;
desc.func = pin_func;
desc.chan = index;
if (ioctl(fd, PTP_PIN_SETFUNC, &desc)) {
perror("PTP_PIN_SETFUNC");
} else {
puts("set pin function okay");
}
}
if (extts) {
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = index;
extts_request.flags = PTP_ENABLE_FEATURE;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
extts = 0;
} else {
puts("external time stamp request okay");
}
for (; extts; extts--) {
cnt = read(fd, &event, sizeof(event));
if (cnt != sizeof(event)) {
perror("read");
break;
}
printf("event index %u at %lld.%09u\n", event.index,
event.t.sec, event.t.nsec);
fflush(stdout);
}
/* Disable the feature again. */
extts_request.flags = 0;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
}
}
if (flagtest) {
do_flag_test(fd, index);
}
if (list_pins) {
int n_pins = 0;
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
perror("PTP_CLOCK_GETCAPS");
} else {
n_pins = caps.n_pins;
}
for (i = 0; i < n_pins; i++) {
desc.index = i;
if (ioctl(fd, PTP_PIN_GETFUNC, &desc)) {
perror("PTP_PIN_GETFUNC");
break;
}
printf("name %s index %u func %u chan %u\n",
desc.name, desc.index, desc.func, desc.chan);
}
}
if (pulsewidth >= 0 && perout < 0) {
puts("-w can only be specified together with -p");
return -1;
}
if (perout_phase >= 0 && perout < 0) {
puts("-H can only be specified together with -p");
return -1;
}
if (perout >= 0) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
return -1;
}
memset(&perout_request, 0, sizeof(perout_request));
perout_request.index = index;
perout_request.period.sec = perout / NSEC_PER_SEC;
perout_request.period.nsec = perout % NSEC_PER_SEC;
perout_request.flags = 0;
if (pulsewidth >= 0) {
perout_request.flags |= PTP_PEROUT_DUTY_CYCLE;
perout_request.on.sec = pulsewidth / NSEC_PER_SEC;
perout_request.on.nsec = pulsewidth % NSEC_PER_SEC;
}
if (perout_phase >= 0) {
perout_request.flags |= PTP_PEROUT_PHASE;
perout_request.phase.sec = perout_phase / NSEC_PER_SEC;
perout_request.phase.nsec = perout_phase % NSEC_PER_SEC;
} else {
perout_request.start.sec = ts.tv_sec + 2;
perout_request.start.nsec = 0;
}
if (ioctl(fd, PTP_PEROUT_REQUEST2, &perout_request)) {
perror("PTP_PEROUT_REQUEST");
} else {
puts("periodic output request okay");
}
}
if (pps != -1) {
int enable = pps ? 1 : 0;
if (ioctl(fd, PTP_ENABLE_PPS, enable)) {
perror("PTP_ENABLE_PPS");
} else {
puts("pps for system time request okay");
}
}
if (pct_offset) {
if (n_samples <= 0 || n_samples > 25) {
puts("n_samples should be between 1 and 25");
usage(progname);
return -1;
}
sysoff = calloc(1, sizeof(*sysoff));
if (!sysoff) {
perror("calloc");
return -1;
}
sysoff->n_samples = n_samples;
if (ioctl(fd, PTP_SYS_OFFSET, sysoff))
perror("PTP_SYS_OFFSET");
else
puts("system and phc clock time offset request okay");
pct = &sysoff->ts[0];
for (i = 0; i < sysoff->n_samples; i++) {
t1 = pctns(pct+2*i);
tp = pctns(pct+2*i+1);
t2 = pctns(pct+2*i+2);
interval = t2 - t1;
offset = (t2 + t1) / 2 - tp;
printf("system time: %lld.%09u\n",
(pct+2*i)->sec, (pct+2*i)->nsec);
printf("phc time: %lld.%09u\n",
(pct+2*i+1)->sec, (pct+2*i+1)->nsec);
printf("system time: %lld.%09u\n",
(pct+2*i+2)->sec, (pct+2*i+2)->nsec);
printf("system/phc clock time offset is %" PRId64 " ns\n"
"system clock time delay is %" PRId64 " ns\n",
offset, interval);
}
free(sysoff);
}
if (getextended) {
soe = calloc(1, sizeof(*soe));
if (!soe) {
perror("calloc");
return -1;
}
soe->n_samples = getextended;
if (ioctl(fd, PTP_SYS_OFFSET_EXTENDED, soe)) {
perror("PTP_SYS_OFFSET_EXTENDED");
} else {
printf("extended timestamp request returned %d samples\n",
getextended);
for (i = 0; i < getextended; i++) {
printf("sample #%2d: system time before: %lld.%09u\n",
i, soe->ts[i][0].sec, soe->ts[i][0].nsec);
printf(" phc time: %lld.%09u\n",
soe->ts[i][1].sec, soe->ts[i][1].nsec);
printf(" system time after: %lld.%09u\n",
soe->ts[i][2].sec, soe->ts[i][2].nsec);
}
}
free(soe);
}
if (getcross) {
xts = calloc(1, sizeof(*xts));
if (!xts) {
perror("calloc");
return -1;
}
if (ioctl(fd, PTP_SYS_OFFSET_PRECISE, xts)) {
perror("PTP_SYS_OFFSET_PRECISE");
} else {
puts("system and phc crosstimestamping request okay");
printf("device time: %lld.%09u\n",
xts->device.sec, xts->device.nsec);
printf("system time: %lld.%09u\n",
xts->sys_realtime.sec, xts->sys_realtime.nsec);
printf("monoraw time: %lld.%09u\n",
xts->sys_monoraw.sec, xts->sys_monoraw.nsec);
}
free(xts);
}
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/ptp/testptp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* User Events Dyn Events Test Program
*
* Copyright (c) 2021 Beau Belgrave <[email protected]>
*/
#include <errno.h>
#include <linux/user_events.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#include "user_events_selftests.h"
const char *abi_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable";
static bool wait_for_delete(void)
{
int i;
for (i = 0; i < 1000; ++i) {
int fd = open(enable_file, O_RDONLY);
if (fd == -1)
return true;
close(fd);
usleep(1000);
}
return false;
}
static int reg_event(int fd, int *check, int bit, const char *value)
{
struct user_reg reg = {0};
reg.size = sizeof(reg);
reg.name_args = (__u64)value;
reg.enable_bit = bit;
reg.enable_addr = (__u64)check;
reg.enable_size = sizeof(*check);
if (ioctl(fd, DIAG_IOCSREG, ®) == -1)
return -1;
return 0;
}
static int unreg_event(int fd, int *check, int bit)
{
struct user_unreg unreg = {0};
unreg.size = sizeof(unreg);
unreg.disable_bit = bit;
unreg.disable_addr = (__u64)check;
return ioctl(fd, DIAG_IOCSUNREG, &unreg);
}
static int parse(int *check, const char *value)
{
int fd = open(abi_file, O_RDWR);
int ret;
if (fd == -1)
return -1;
/* Until we have persist flags via dynamic events, use the base name */
if (value[0] != 'u' || value[1] != ':') {
close(fd);
return -1;
}
ret = reg_event(fd, check, 31, value + 2);
if (ret != -1) {
if (unreg_event(fd, check, 31) == -1)
printf("WARN: Couldn't unreg event\n");
}
close(fd);
return ret;
}
static int check_match(int *check, const char *first, const char *second, bool *match)
{
int fd = open(abi_file, O_RDWR);
int ret = -1;
if (fd == -1)
return -1;
if (reg_event(fd, check, 31, first) == -1)
goto cleanup;
if (reg_event(fd, check, 30, second) == -1) {
if (errno == EADDRINUSE) {
/* Name is in use, with different fields */
*match = false;
ret = 0;
}
goto cleanup;
}
*match = true;
ret = 0;
cleanup:
unreg_event(fd, check, 31);
unreg_event(fd, check, 30);
close(fd);
wait_for_delete();
return ret;
}
#define TEST_MATCH(x, y) \
do { \
bool match; \
ASSERT_NE(-1, check_match(&self->check, x, y, &match)); \
ASSERT_EQ(true, match); \
} while (0)
#define TEST_NMATCH(x, y) \
do { \
bool match; \
ASSERT_NE(-1, check_match(&self->check, x, y, &match)); \
ASSERT_EQ(false, match); \
} while (0)
#define TEST_PARSE(x) ASSERT_NE(-1, parse(&self->check, x))
#define TEST_NPARSE(x) ASSERT_EQ(-1, parse(&self->check, x))
FIXTURE(user) {
int check;
};
FIXTURE_SETUP(user) {
USER_EVENT_FIXTURE_SETUP(return);
}
FIXTURE_TEARDOWN(user) {
wait_for_delete();
}
TEST_F(user, basic_types) {
/* All should work */
TEST_PARSE("u:__test_event u64 a");
TEST_PARSE("u:__test_event u32 a");
TEST_PARSE("u:__test_event u16 a");
TEST_PARSE("u:__test_event u8 a");
TEST_PARSE("u:__test_event char a");
TEST_PARSE("u:__test_event unsigned char a");
TEST_PARSE("u:__test_event int a");
TEST_PARSE("u:__test_event unsigned int a");
TEST_PARSE("u:__test_event short a");
TEST_PARSE("u:__test_event unsigned short a");
TEST_PARSE("u:__test_event char[20] a");
TEST_PARSE("u:__test_event unsigned char[20] a");
TEST_PARSE("u:__test_event char[0x14] a");
TEST_PARSE("u:__test_event unsigned char[0x14] a");
/* Bad size format should fail */
TEST_NPARSE("u:__test_event char[aa] a");
/* Large size should fail */
TEST_NPARSE("u:__test_event char[9999] a");
/* Long size string should fail */
TEST_NPARSE("u:__test_event char[0x0000000000001] a");
}
TEST_F(user, loc_types) {
/* All should work */
TEST_PARSE("u:__test_event __data_loc char[] a");
TEST_PARSE("u:__test_event __data_loc unsigned char[] a");
TEST_PARSE("u:__test_event __rel_loc char[] a");
TEST_PARSE("u:__test_event __rel_loc unsigned char[] a");
}
TEST_F(user, size_types) {
/* Should work */
TEST_PARSE("u:__test_event struct custom a 20");
/* Size not specified on struct should fail */
TEST_NPARSE("u:__test_event struct custom a");
/* Size specified on non-struct should fail */
TEST_NPARSE("u:__test_event char a 20");
}
TEST_F(user, matching) {
/* Single name matches */
TEST_MATCH("__test_event u32 a",
"__test_event u32 a");
/* Multiple names match */
TEST_MATCH("__test_event u32 a; u32 b",
"__test_event u32 a; u32 b");
/* Multiple names match with dangling ; */
TEST_MATCH("__test_event u32 a; u32 b",
"__test_event u32 a; u32 b;");
/* Single name doesn't match */
TEST_NMATCH("__test_event u32 a",
"__test_event u32 b");
/* Multiple names don't match */
TEST_NMATCH("__test_event u32 a; u32 b",
"__test_event u32 b; u32 a");
/* Types don't match */
TEST_NMATCH("__test_event u64 a; u64 b",
"__test_event u32 a; u32 b");
/* Struct name and size matches */
TEST_MATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct a 20");
/* Struct name don't match */
TEST_NMATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct b 20");
/* Struct size don't match */
TEST_NMATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct a 21");
}
int main(int argc, char **argv)
{
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/user_events/dyn_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* User Events Perf Events Test Program
*
* Copyright (c) 2021 Beau Belgrave <[email protected]>
*/
#include <errno.h>
#include <linux/user_events.h>
#include <linux/perf_event.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <asm/unistd.h>
#include "../kselftest_harness.h"
#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *id_file = "/sys/kernel/tracing/events/user_events/__test_event/id";
const char *fmt_file = "/sys/kernel/tracing/events/user_events/__test_event/format";
struct event {
__u32 index;
__u32 field1;
__u32 field2;
};
static long perf_event_open(struct perf_event_attr *pe, pid_t pid,
int cpu, int group_fd, unsigned long flags)
{
return syscall(__NR_perf_event_open, pe, pid, cpu, group_fd, flags);
}
static int get_id(void)
{
FILE *fp = fopen(id_file, "r");
int ret, id = 0;
if (!fp)
return -1;
ret = fscanf(fp, "%d", &id);
fclose(fp);
if (ret != 1)
return -1;
return id;
}
static int get_offset(void)
{
FILE *fp = fopen(fmt_file, "r");
int ret, c, last = 0, offset = 0;
if (!fp)
return -1;
/* Read until empty line */
while (true) {
c = getc(fp);
if (c == EOF)
break;
if (last == '\n' && c == '\n')
break;
last = c;
}
ret = fscanf(fp, "\tfield:u32 field1;\toffset:%d;", &offset);
fclose(fp);
if (ret != 1)
return -1;
return offset;
}
static int clear(int *check)
{
struct user_unreg unreg = {0};
unreg.size = sizeof(unreg);
unreg.disable_bit = 31;
unreg.disable_addr = (__u64)check;
int fd = open(data_file, O_RDWR);
if (fd == -1)
return -1;
if (ioctl(fd, DIAG_IOCSUNREG, &unreg) == -1)
if (errno != ENOENT)
return -1;
if (ioctl(fd, DIAG_IOCSDEL, "__test_event") == -1)
if (errno != ENOENT)
return -1;
close(fd);
return 0;
}
FIXTURE(user) {
int data_fd;
int check;
};
FIXTURE_SETUP(user) {
USER_EVENT_FIXTURE_SETUP(return);
self->data_fd = open(data_file, O_RDWR);
ASSERT_NE(-1, self->data_fd);
}
FIXTURE_TEARDOWN(user) {
close(self->data_fd);
if (clear(&self->check) != 0)
printf("WARNING: Clear didn't work!\n");
}
TEST_F(user, perf_write) {
struct perf_event_attr pe = {0};
struct user_reg reg = {0};
struct event event;
struct perf_event_mmap_page *perf_page;
int page_size = sysconf(_SC_PAGESIZE);
int id, fd, offset;
__u32 *val;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event u32 field1; u32 field2";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
ASSERT_EQ(0, self->check);
/* Id should be there */
id = get_id();
ASSERT_NE(-1, id);
offset = get_offset();
ASSERT_NE(-1, offset);
pe.type = PERF_TYPE_TRACEPOINT;
pe.size = sizeof(pe);
pe.config = id;
pe.sample_type = PERF_SAMPLE_RAW;
pe.sample_period = 1;
pe.wakeup_events = 1;
/* Tracepoint attach should work */
fd = perf_event_open(&pe, 0, -1, -1, 0);
ASSERT_NE(-1, fd);
perf_page = mmap(NULL, page_size * 2, PROT_READ, MAP_SHARED, fd, 0);
ASSERT_NE(MAP_FAILED, perf_page);
/* Status should be updated */
ASSERT_EQ(1 << reg.enable_bit, self->check);
event.index = reg.write_index;
event.field1 = 0xc001;
event.field2 = 0xc01a;
/* Ensure write shows up at correct offset */
ASSERT_NE(-1, write(self->data_fd, &event, sizeof(event)));
val = (void *)(((char *)perf_page) + perf_page->data_offset);
ASSERT_EQ(PERF_RECORD_SAMPLE, *val);
/* Skip over header and size, move to offset */
val += 3;
val = (void *)((char *)val) + offset;
/* Ensure correct */
ASSERT_EQ(event.field1, *val++);
ASSERT_EQ(event.field2, *val++);
munmap(perf_page, page_size * 2);
close(fd);
/* Status should be updated */
ASSERT_EQ(0, self->check);
}
TEST_F(user, perf_empty_events) {
struct perf_event_attr pe = {0};
struct user_reg reg = {0};
struct perf_event_mmap_page *perf_page;
int page_size = sysconf(_SC_PAGESIZE);
int id, fd;
__u32 *val;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
ASSERT_EQ(0, self->check);
/* Id should be there */
id = get_id();
ASSERT_NE(-1, id);
pe.type = PERF_TYPE_TRACEPOINT;
pe.size = sizeof(pe);
pe.config = id;
pe.sample_type = PERF_SAMPLE_RAW;
pe.sample_period = 1;
pe.wakeup_events = 1;
/* Tracepoint attach should work */
fd = perf_event_open(&pe, 0, -1, -1, 0);
ASSERT_NE(-1, fd);
perf_page = mmap(NULL, page_size * 2, PROT_READ, MAP_SHARED, fd, 0);
ASSERT_NE(MAP_FAILED, perf_page);
/* Status should be updated */
ASSERT_EQ(1 << reg.enable_bit, self->check);
/* Ensure write shows up at correct offset */
ASSERT_NE(-1, write(self->data_fd, ®.write_index,
sizeof(reg.write_index)));
val = (void *)(((char *)perf_page) + perf_page->data_offset);
ASSERT_EQ(PERF_RECORD_SAMPLE, *val);
munmap(perf_page, page_size * 2);
close(fd);
/* Status should be updated */
ASSERT_EQ(0, self->check);
}
int main(int argc, char **argv)
{
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/user_events/perf_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* User Events FTrace Test Program
*
* Copyright (c) 2021 Beau Belgrave <[email protected]>
*/
#include <errno.h>
#include <linux/user_events.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *status_file = "/sys/kernel/tracing/user_events_status";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable";
const char *trace_file = "/sys/kernel/tracing/trace";
const char *fmt_file = "/sys/kernel/tracing/events/user_events/__test_event/format";
static int trace_bytes(void)
{
int fd = open(trace_file, O_RDONLY);
char buf[256];
int bytes = 0, got;
if (fd == -1)
return -1;
while (true) {
got = read(fd, buf, sizeof(buf));
if (got == -1)
return -1;
if (got == 0)
break;
bytes += got;
}
close(fd);
return bytes;
}
static int skip_until_empty_line(FILE *fp)
{
int c, last = 0;
while (true) {
c = getc(fp);
if (c == EOF)
break;
if (last == '\n' && c == '\n')
return 0;
last = c;
}
return -1;
}
static int get_print_fmt(char *buffer, int len)
{
FILE *fp = fopen(fmt_file, "r");
char *newline;
if (!fp)
return -1;
/* Read until empty line (Skip Common) */
if (skip_until_empty_line(fp) < 0)
goto err;
/* Read until empty line (Skip Properties) */
if (skip_until_empty_line(fp) < 0)
goto err;
/* Read in print_fmt: */
if (fgets(buffer, len, fp) == NULL)
goto err;
newline = strchr(buffer, '\n');
if (newline)
*newline = '\0';
fclose(fp);
return 0;
err:
fclose(fp);
return -1;
}
static bool wait_for_delete(void)
{
int i;
for (i = 0; i < 1000; ++i) {
int fd = open(enable_file, O_RDONLY);
if (fd == -1)
return true;
close(fd);
usleep(1000);
}
return false;
}
static int clear(int *check)
{
struct user_unreg unreg = {0};
int fd;
unreg.size = sizeof(unreg);
unreg.disable_bit = 31;
unreg.disable_addr = (__u64)check;
fd = open(data_file, O_RDWR);
if (fd == -1)
return -1;
if (ioctl(fd, DIAG_IOCSUNREG, &unreg) == -1)
if (errno != ENOENT)
goto fail;
if (ioctl(fd, DIAG_IOCSDEL, "__test_event") == -1) {
if (errno == EBUSY) {
if (!wait_for_delete())
goto fail;
} else if (errno != ENOENT)
goto fail;
}
close(fd);
return 0;
fail:
close(fd);
return -1;
}
static int check_print_fmt(const char *event, const char *expected, int *check)
{
struct user_reg reg = {0};
char print_fmt[256];
int ret;
int fd;
/* Ensure cleared */
ret = clear(check);
if (ret != 0)
return ret;
fd = open(data_file, O_RDWR);
if (fd == -1)
return fd;
reg.size = sizeof(reg);
reg.name_args = (__u64)event;
reg.enable_bit = 31;
reg.enable_addr = (__u64)check;
reg.enable_size = sizeof(*check);
/* Register should work */
ret = ioctl(fd, DIAG_IOCSREG, ®);
if (ret != 0) {
close(fd);
printf("Reg failed in fmt\n");
return ret;
}
/* Ensure correct print_fmt */
ret = get_print_fmt(print_fmt, sizeof(print_fmt));
close(fd);
if (ret != 0)
return ret;
return strcmp(print_fmt, expected);
}
FIXTURE(user) {
int status_fd;
int data_fd;
int enable_fd;
int check;
};
FIXTURE_SETUP(user) {
USER_EVENT_FIXTURE_SETUP(return);
self->status_fd = open(status_file, O_RDONLY);
ASSERT_NE(-1, self->status_fd);
self->data_fd = open(data_file, O_RDWR);
ASSERT_NE(-1, self->data_fd);
self->enable_fd = -1;
}
FIXTURE_TEARDOWN(user) {
close(self->status_fd);
close(self->data_fd);
if (self->enable_fd != -1) {
write(self->enable_fd, "0", sizeof("0"));
close(self->enable_fd);
}
if (clear(&self->check) != 0)
printf("WARNING: Clear didn't work!\n");
}
TEST_F(user, register_events) {
struct user_reg reg = {0};
struct user_unreg unreg = {0};
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event u32 field1; u32 field2";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
unreg.size = sizeof(unreg);
unreg.disable_bit = 31;
unreg.disable_addr = (__u64)&self->check;
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
/* Multiple registers to the same addr + bit should fail */
ASSERT_EQ(-1, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(EADDRINUSE, errno);
/* Multiple registers to same name should result in same index */
reg.enable_bit = 30;
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
/* Multiple registers to same name but different args should fail */
reg.enable_bit = 29;
reg.name_args = (__u64)"__test_event u32 field1;";
ASSERT_EQ(-1, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(EADDRINUSE, errno);
/* Ensure disabled */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, self->enable_fd);
ASSERT_NE(-1, write(self->enable_fd, "0", sizeof("0")))
/* Enable event and ensure bits updated in status */
ASSERT_NE(-1, write(self->enable_fd, "1", sizeof("1")))
ASSERT_EQ(1 << reg.enable_bit, self->check);
/* Disable event and ensure bits updated in status */
ASSERT_NE(-1, write(self->enable_fd, "0", sizeof("0")))
ASSERT_EQ(0, self->check);
/* File still open should return -EBUSY for delete */
ASSERT_EQ(-1, ioctl(self->data_fd, DIAG_IOCSDEL, "__test_event"));
ASSERT_EQ(EBUSY, errno);
/* Unregister */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSUNREG, &unreg));
unreg.disable_bit = 30;
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSUNREG, &unreg));
/* Delete should have been auto-done after close and unregister */
close(self->data_fd);
ASSERT_EQ(true, wait_for_delete());
}
TEST_F(user, write_events) {
struct user_reg reg = {0};
struct iovec io[3];
__u32 field1, field2;
int before = 0, after = 0;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event u32 field1; u32 field2";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
field1 = 1;
field2 = 2;
io[0].iov_base = ®.write_index;
io[0].iov_len = sizeof(reg.write_index);
io[1].iov_base = &field1;
io[1].iov_len = sizeof(field1);
io[2].iov_base = &field2;
io[2].iov_len = sizeof(field2);
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
ASSERT_EQ(0, self->check);
/* Write should fail on invalid slot with ENOENT */
io[0].iov_base = &field2;
io[0].iov_len = sizeof(field2);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(ENOENT, errno);
io[0].iov_base = ®.write_index;
io[0].iov_len = sizeof(reg.write_index);
/* Write should return -EBADF when event is not enabled */
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EBADF, errno);
/* Enable event */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, write(self->enable_fd, "1", sizeof("1")))
/* Event should now be enabled */
ASSERT_NE(1 << reg.enable_bit, self->check);
/* Write should make it out to ftrace buffers */
before = trace_bytes();
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 3));
after = trace_bytes();
ASSERT_GT(after, before);
/* Negative index should fail with EINVAL */
reg.write_index = -1;
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EINVAL, errno);
}
TEST_F(user, write_empty_events) {
struct user_reg reg = {0};
struct iovec io[1];
int before = 0, after = 0;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
io[0].iov_base = ®.write_index;
io[0].iov_len = sizeof(reg.write_index);
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
ASSERT_EQ(0, self->check);
/* Enable event */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, write(self->enable_fd, "1", sizeof("1")))
/* Event should now be enabled */
ASSERT_EQ(1 << reg.enable_bit, self->check);
/* Write should make it out to ftrace buffers */
before = trace_bytes();
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 1));
after = trace_bytes();
ASSERT_GT(after, before);
}
TEST_F(user, write_fault) {
struct user_reg reg = {0};
struct iovec io[2];
int l = sizeof(__u64);
void *anon;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event u64 anon";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
anon = mmap(NULL, l, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, anon);
io[0].iov_base = ®.write_index;
io[0].iov_len = sizeof(reg.write_index);
io[1].iov_base = anon;
io[1].iov_len = l;
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
/* Enable event */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, write(self->enable_fd, "1", sizeof("1")))
/* Write should work normally */
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 2));
/* Faulted data should zero fill and work */
ASSERT_EQ(0, madvise(anon, l, MADV_DONTNEED));
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 2));
ASSERT_EQ(0, munmap(anon, l));
}
TEST_F(user, write_validator) {
struct user_reg reg = {0};
struct iovec io[3];
int loc, bytes;
char data[8];
int before = 0, after = 0;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__test_event __rel_loc char[] data";
reg.enable_bit = 31;
reg.enable_addr = (__u64)&self->check;
reg.enable_size = sizeof(self->check);
/* Register should work */
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
ASSERT_EQ(0, self->check);
io[0].iov_base = ®.write_index;
io[0].iov_len = sizeof(reg.write_index);
io[1].iov_base = &loc;
io[1].iov_len = sizeof(loc);
io[2].iov_base = data;
bytes = snprintf(data, sizeof(data), "Test") + 1;
io[2].iov_len = bytes;
/* Undersized write should fail */
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 1));
ASSERT_EQ(EINVAL, errno);
/* Enable event */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, write(self->enable_fd, "1", sizeof("1")))
/* Event should now be enabled */
ASSERT_EQ(1 << reg.enable_bit, self->check);
/* Full in-bounds write should work */
before = trace_bytes();
loc = DYN_LOC(0, bytes);
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 3));
after = trace_bytes();
ASSERT_GT(after, before);
/* Out of bounds write should fault (offset way out) */
loc = DYN_LOC(1024, bytes);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EFAULT, errno);
/* Out of bounds write should fault (offset 1 byte out) */
loc = DYN_LOC(1, bytes);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EFAULT, errno);
/* Out of bounds write should fault (size way out) */
loc = DYN_LOC(0, bytes + 1024);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EFAULT, errno);
/* Out of bounds write should fault (size 1 byte out) */
loc = DYN_LOC(0, bytes + 1);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EFAULT, errno);
/* Non-Null should fault */
memset(data, 'A', sizeof(data));
loc = DYN_LOC(0, bytes);
ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
ASSERT_EQ(EFAULT, errno);
}
TEST_F(user, print_fmt) {
int ret;
ret = check_print_fmt("__test_event __rel_loc char[] data",
"print fmt: \"data=%s\", __get_rel_str(data)",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event __data_loc char[] data",
"print fmt: \"data=%s\", __get_str(data)",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event s64 data",
"print fmt: \"data=%lld\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event u64 data",
"print fmt: \"data=%llu\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event s32 data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event u32 data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event int data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event unsigned int data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event s16 data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event u16 data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event short data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event unsigned short data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event s8 data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event u8 data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event char data",
"print fmt: \"data=%d\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event unsigned char data",
"print fmt: \"data=%u\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
ret = check_print_fmt("__test_event char[4] data",
"print fmt: \"data=%s\", REC->data",
&self->check);
ASSERT_EQ(0, ret);
}
int main(int argc, char **argv)
{
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/user_events/ftrace_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* User Events ABI Test Program
*
* Copyright (c) 2022 Beau Belgrave <[email protected]>
*/
#define _GNU_SOURCE
#include <sched.h>
#include <errno.h>
#include <linux/user_events.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <asm/unistd.h>
#include "../kselftest_harness.h"
#include "user_events_selftests.h"
const char *data_file = "/sys/kernel/tracing/user_events_data";
const char *enable_file = "/sys/kernel/tracing/events/user_events/__abi_event/enable";
static int change_event(bool enable)
{
int fd = open(enable_file, O_RDWR);
int ret;
if (fd < 0)
return -1;
if (enable)
ret = write(fd, "1", 1);
else
ret = write(fd, "0", 1);
close(fd);
if (ret == 1)
ret = 0;
else
ret = -1;
return ret;
}
static int reg_enable(long *enable, int size, int bit)
{
struct user_reg reg = {0};
int fd = open(data_file, O_RDWR);
int ret;
if (fd < 0)
return -1;
reg.size = sizeof(reg);
reg.name_args = (__u64)"__abi_event";
reg.enable_bit = bit;
reg.enable_addr = (__u64)enable;
reg.enable_size = size;
ret = ioctl(fd, DIAG_IOCSREG, ®);
close(fd);
return ret;
}
static int reg_disable(long *enable, int bit)
{
struct user_unreg reg = {0};
int fd = open(data_file, O_RDWR);
int ret;
if (fd < 0)
return -1;
reg.size = sizeof(reg);
reg.disable_bit = bit;
reg.disable_addr = (__u64)enable;
ret = ioctl(fd, DIAG_IOCSUNREG, ®);
close(fd);
return ret;
}
FIXTURE(user) {
long check;
};
FIXTURE_SETUP(user) {
USER_EVENT_FIXTURE_SETUP(return);
change_event(false);
self->check = 0;
}
FIXTURE_TEARDOWN(user) {
}
TEST_F(user, enablement) {
/* Changes should be reflected immediately */
ASSERT_EQ(0, self->check);
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 0));
ASSERT_EQ(0, change_event(true));
ASSERT_EQ(1, self->check);
ASSERT_EQ(0, change_event(false));
ASSERT_EQ(0, self->check);
/* Ensure kernel clears bit after disable */
ASSERT_EQ(0, change_event(true));
ASSERT_EQ(1, self->check);
ASSERT_EQ(0, reg_disable(&self->check, 0));
ASSERT_EQ(0, self->check);
/* Ensure doesn't change after unreg */
ASSERT_EQ(0, change_event(true));
ASSERT_EQ(0, self->check);
ASSERT_EQ(0, change_event(false));
}
TEST_F(user, bit_sizes) {
/* Allow 0-31 bits for 32-bit */
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 0));
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 31));
ASSERT_NE(0, reg_enable(&self->check, sizeof(int), 32));
ASSERT_EQ(0, reg_disable(&self->check, 0));
ASSERT_EQ(0, reg_disable(&self->check, 31));
#if BITS_PER_LONG == 8
/* Allow 0-64 bits for 64-bit */
ASSERT_EQ(0, reg_enable(&self->check, sizeof(long), 63));
ASSERT_NE(0, reg_enable(&self->check, sizeof(long), 64));
ASSERT_EQ(0, reg_disable(&self->check, 63));
#endif
/* Disallowed sizes (everything beside 4 and 8) */
ASSERT_NE(0, reg_enable(&self->check, 1, 0));
ASSERT_NE(0, reg_enable(&self->check, 2, 0));
ASSERT_NE(0, reg_enable(&self->check, 3, 0));
ASSERT_NE(0, reg_enable(&self->check, 5, 0));
ASSERT_NE(0, reg_enable(&self->check, 6, 0));
ASSERT_NE(0, reg_enable(&self->check, 7, 0));
ASSERT_NE(0, reg_enable(&self->check, 9, 0));
ASSERT_NE(0, reg_enable(&self->check, 128, 0));
}
TEST_F(user, forks) {
int i;
/* Ensure COW pages get updated after fork */
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 0));
ASSERT_EQ(0, self->check);
if (fork() == 0) {
/* Force COW */
self->check = 0;
/* Up to 1 sec for enablement */
for (i = 0; i < 10; ++i) {
usleep(100000);
if (self->check)
exit(0);
}
exit(1);
}
/* Allow generous time for COW, then enable */
usleep(100000);
ASSERT_EQ(0, change_event(true));
ASSERT_NE(-1, wait(&i));
ASSERT_EQ(0, WEXITSTATUS(i));
/* Ensure child doesn't disable parent */
if (fork() == 0)
exit(reg_disable(&self->check, 0));
ASSERT_NE(-1, wait(&i));
ASSERT_EQ(0, WEXITSTATUS(i));
ASSERT_EQ(1, self->check);
ASSERT_EQ(0, change_event(false));
ASSERT_EQ(0, self->check);
}
/* Waits up to 1 sec for enablement */
static int clone_check(void *check)
{
int i;
for (i = 0; i < 10; ++i) {
usleep(100000);
if (*(long *)check)
return 0;
}
return 1;
}
TEST_F(user, clones) {
int i, stack_size = 4096;
void *stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK,
-1, 0);
ASSERT_NE(MAP_FAILED, stack);
ASSERT_EQ(0, reg_enable(&self->check, sizeof(int), 0));
ASSERT_EQ(0, self->check);
/* Shared VM should see enablements */
ASSERT_NE(-1, clone(&clone_check, stack + stack_size,
CLONE_VM | SIGCHLD, &self->check));
ASSERT_EQ(0, change_event(true));
ASSERT_NE(-1, wait(&i));
ASSERT_EQ(0, WEXITSTATUS(i));
munmap(stack, stack_size);
ASSERT_EQ(0, change_event(false));
}
int main(int argc, char **argv)
{
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/user_events/abi_test.c |
// SPDX-License-Identifier: GPL-2.0
// test ir decoder
//
// Copyright (C) 2018 Sean Young <[email protected]>
// When sending LIRC_MODE_SCANCODE, the IR will be encoded. rc-loopback
// will send this IR to the receiver side, where we try to read the decoded
// IR. Decoding happens in a separate kernel thread, so we will need to
// wait until that is scheduled, hence we use poll to check for read
// readiness.
#include <linux/lirc.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <poll.h>
#include <time.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <dirent.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "../kselftest.h"
#define TEST_SCANCODES 10
#define SYSFS_PATH_MAX 256
#define DNAME_PATH_MAX 256
/*
* Support ancient lirc.h which does not have these values. Can be removed
* once RHEL 8 is no longer a relevant testing platform.
*/
#if RC_PROTO_MAX < 26
#define RC_PROTO_RCMM12 24
#define RC_PROTO_RCMM24 25
#define RC_PROTO_RCMM32 26
#endif
static const struct {
enum rc_proto proto;
const char *name;
unsigned int mask;
const char *decoder;
} protocols[] = {
{ RC_PROTO_RC5, "rc-5", 0x1f7f, "rc-5" },
{ RC_PROTO_RC5X_20, "rc-5x-20", 0x1f7f3f, "rc-5" },
{ RC_PROTO_RC5_SZ, "rc-5-sz", 0x2fff, "rc-5-sz" },
{ RC_PROTO_JVC, "jvc", 0xffff, "jvc" },
{ RC_PROTO_SONY12, "sony-12", 0x1f007f, "sony" },
{ RC_PROTO_SONY15, "sony-15", 0xff007f, "sony" },
{ RC_PROTO_SONY20, "sony-20", 0x1fff7f, "sony" },
{ RC_PROTO_NEC, "nec", 0xffff, "nec" },
{ RC_PROTO_NECX, "nec-x", 0xffffff, "nec" },
{ RC_PROTO_NEC32, "nec-32", 0xffffffff, "nec" },
{ RC_PROTO_SANYO, "sanyo", 0x1fffff, "sanyo" },
{ RC_PROTO_RC6_0, "rc-6-0", 0xffff, "rc-6" },
{ RC_PROTO_RC6_6A_20, "rc-6-6a-20", 0xfffff, "rc-6" },
{ RC_PROTO_RC6_6A_24, "rc-6-6a-24", 0xffffff, "rc-6" },
{ RC_PROTO_RC6_6A_32, "rc-6-6a-32", 0xffffffff, "rc-6" },
{ RC_PROTO_RC6_MCE, "rc-6-mce", 0x00007fff, "rc-6" },
{ RC_PROTO_SHARP, "sharp", 0x1fff, "sharp" },
{ RC_PROTO_IMON, "imon", 0x7fffffff, "imon" },
{ RC_PROTO_RCMM12, "rcmm-12", 0x00000fff, "rc-mm" },
{ RC_PROTO_RCMM24, "rcmm-24", 0x00ffffff, "rc-mm" },
{ RC_PROTO_RCMM32, "rcmm-32", 0xffffffff, "rc-mm" },
};
int lirc_open(const char *rc)
{
struct dirent *dent;
char buf[SYSFS_PATH_MAX + DNAME_PATH_MAX];
DIR *d;
int fd;
snprintf(buf, sizeof(buf), "/sys/class/rc/%s", rc);
d = opendir(buf);
if (!d)
ksft_exit_fail_msg("cannot open %s: %m\n", buf);
while ((dent = readdir(d)) != NULL) {
if (!strncmp(dent->d_name, "lirc", 4)) {
snprintf(buf, sizeof(buf), "/dev/%s", dent->d_name);
break;
}
}
if (!dent)
ksft_exit_skip("cannot find lirc device for %s\n", rc);
closedir(d);
fd = open(buf, O_RDWR | O_NONBLOCK);
if (fd == -1)
ksft_exit_fail_msg("cannot open: %s: %m\n", buf);
return fd;
}
int main(int argc, char **argv)
{
unsigned int mode;
char buf[100];
int rlircfd, wlircfd, protocolfd, i, n;
srand(time(NULL));
if (argc != 3)
ksft_exit_fail_msg("Usage: %s <write rcN> <read rcN>\n",
argv[0]);
rlircfd = lirc_open(argv[2]);
mode = LIRC_MODE_SCANCODE;
if (ioctl(rlircfd, LIRC_SET_REC_MODE, &mode))
ksft_exit_fail_msg("failed to set scancode rec mode %s: %m\n",
argv[2]);
wlircfd = lirc_open(argv[1]);
if (ioctl(wlircfd, LIRC_SET_SEND_MODE, &mode))
ksft_exit_fail_msg("failed to set scancode send mode %s: %m\n",
argv[1]);
snprintf(buf, sizeof(buf), "/sys/class/rc/%s/protocols", argv[2]);
protocolfd = open(buf, O_WRONLY);
if (protocolfd == -1)
ksft_exit_fail_msg("failed to open %s: %m\n", buf);
printf("Sending IR on %s and receiving IR on %s.\n", argv[1], argv[2]);
for (i = 0; i < ARRAY_SIZE(protocols); i++) {
if (write(protocolfd, protocols[i].decoder,
strlen(protocols[i].decoder)) == -1)
ksft_exit_fail_msg("failed to set write decoder\n");
printf("Testing protocol %s for decoder %s (%d/%d)...\n",
protocols[i].name, protocols[i].decoder,
i + 1, (int)ARRAY_SIZE(protocols));
for (n = 0; n < TEST_SCANCODES; n++) {
unsigned int scancode = rand() & protocols[i].mask;
unsigned int rc_proto = protocols[i].proto;
if (rc_proto == RC_PROTO_RC6_MCE)
scancode |= 0x800f0000;
if (rc_proto == RC_PROTO_NECX &&
(((scancode >> 16) ^ ~(scancode >> 8)) & 0xff) == 0)
continue;
if (rc_proto == RC_PROTO_NEC32 &&
(((scancode >> 8) ^ ~scancode) & 0xff) == 0)
continue;
if (rc_proto == RC_PROTO_RCMM32 &&
(scancode & 0x000c0000) != 0x000c0000 &&
scancode & 0x00008000)
continue;
struct lirc_scancode lsc = {
.rc_proto = rc_proto,
.scancode = scancode
};
printf("Testing scancode:%x\n", scancode);
while (write(wlircfd, &lsc, sizeof(lsc)) < 0) {
if (errno == EINTR)
continue;
ksft_exit_fail_msg("failed to send ir: %m\n");
}
struct pollfd pfd = { .fd = rlircfd, .events = POLLIN };
struct lirc_scancode lsc2;
poll(&pfd, 1, 1000);
bool decoded = true;
while (read(rlircfd, &lsc2, sizeof(lsc2)) < 0) {
if (errno == EINTR)
continue;
ksft_test_result_error("no scancode decoded: %m\n");
decoded = false;
break;
}
if (!decoded)
continue;
if (lsc.rc_proto != lsc2.rc_proto)
ksft_test_result_error("decoded protocol is different: %d\n",
lsc2.rc_proto);
else if (lsc.scancode != lsc2.scancode)
ksft_test_result_error("decoded scancode is different: %llx\n",
lsc2.scancode);
else
ksft_inc_pass_cnt();
}
printf("OK\n");
}
close(rlircfd);
close(wlircfd);
close(protocolfd);
if (ksft_get_fail_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
return 0;
}
| linux-master | tools/testing/selftests/ir/ir_loopback.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat, Inc., Frederic Weisbecker <[email protected]>
*
* Selftests for a few posix timers interface.
*
* Kernel loop code stolen from Steven Rostedt <[email protected]>
*/
#include <sys/time.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include <time.h>
#include <pthread.h>
#include "../kselftest.h"
#define DELAY 2
#define USECS_PER_SEC 1000000
static volatile int done;
/* Busy loop in userspace to elapse ITIMER_VIRTUAL */
static void user_loop(void)
{
while (!done);
}
/*
* Try to spend as much time as possible in kernelspace
* to elapse ITIMER_PROF.
*/
static void kernel_loop(void)
{
void *addr = sbrk(0);
int err = 0;
while (!done && !err) {
err = brk(addr + 4096);
err |= brk(addr);
}
}
/*
* Sleep until ITIMER_REAL expiration.
*/
static void idle_loop(void)
{
pause();
}
static void sig_handler(int nr)
{
done = 1;
}
/*
* Check the expected timer expiration matches the GTOD elapsed delta since
* we armed the timer. Keep a 0.5 sec error margin due to various jitter.
*/
static int check_diff(struct timeval start, struct timeval end)
{
long long diff;
diff = end.tv_usec - start.tv_usec;
diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff);
return -1;
}
return 0;
}
static int check_itimer(int which)
{
int err;
struct timeval start, end;
struct itimerval val = {
.it_value.tv_sec = DELAY,
};
printf("Check itimer ");
if (which == ITIMER_VIRTUAL)
printf("virtual... ");
else if (which == ITIMER_PROF)
printf("prof... ");
else if (which == ITIMER_REAL)
printf("real... ");
fflush(stdout);
done = 0;
if (which == ITIMER_VIRTUAL)
signal(SIGVTALRM, sig_handler);
else if (which == ITIMER_PROF)
signal(SIGPROF, sig_handler);
else if (which == ITIMER_REAL)
signal(SIGALRM, sig_handler);
err = gettimeofday(&start, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
}
err = setitimer(which, &val, NULL);
if (err < 0) {
perror("Can't set timer\n");
return -1;
}
if (which == ITIMER_VIRTUAL)
user_loop();
else if (which == ITIMER_PROF)
kernel_loop();
else if (which == ITIMER_REAL)
idle_loop();
err = gettimeofday(&end, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
}
if (!check_diff(start, end))
printf("[OK]\n");
else
printf("[FAIL]\n");
return 0;
}
static int check_timer_create(int which)
{
int err;
timer_t id;
struct timeval start, end;
struct itimerspec val = {
.it_value.tv_sec = DELAY,
};
printf("Check timer_create() ");
if (which == CLOCK_THREAD_CPUTIME_ID) {
printf("per thread... ");
} else if (which == CLOCK_PROCESS_CPUTIME_ID) {
printf("per process... ");
}
fflush(stdout);
done = 0;
err = timer_create(which, NULL, &id);
if (err < 0) {
perror("Can't create timer\n");
return -1;
}
signal(SIGALRM, sig_handler);
err = gettimeofday(&start, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
}
err = timer_settime(id, 0, &val, NULL);
if (err < 0) {
perror("Can't set timer\n");
return -1;
}
user_loop();
err = gettimeofday(&end, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
}
if (!check_diff(start, end))
printf("[OK]\n");
else
printf("[FAIL]\n");
return 0;
}
int remain;
__thread int got_signal;
static void *distribution_thread(void *arg)
{
while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
return NULL;
}
static void distribution_handler(int nr)
{
if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
__atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
}
/*
* Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
* timer signals. This primarily tests that the kernel does not favour any one.
*/
static int check_timer_distribution(void)
{
int err, i;
timer_t id;
const int nthreads = 10;
pthread_t threads[nthreads];
struct itimerspec val = {
.it_value.tv_sec = 0,
.it_value.tv_nsec = 1000 * 1000,
.it_interval.tv_sec = 0,
.it_interval.tv_nsec = 1000 * 1000,
};
printf("Check timer_create() per process signal distribution... ");
fflush(stdout);
remain = nthreads + 1; /* worker threads + this thread */
signal(SIGALRM, distribution_handler);
err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
if (err < 0) {
perror("Can't create timer\n");
return -1;
}
err = timer_settime(id, 0, &val, NULL);
if (err < 0) {
perror("Can't set timer\n");
return -1;
}
for (i = 0; i < nthreads; i++) {
if (pthread_create(&threads[i], NULL, distribution_thread, NULL)) {
perror("Can't create thread\n");
return -1;
}
}
/* Wait for all threads to receive the signal. */
while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
for (i = 0; i < nthreads; i++) {
if (pthread_join(threads[i], NULL)) {
perror("Can't join thread\n");
return -1;
}
}
if (timer_delete(id)) {
perror("Can't delete timer\n");
return -1;
}
printf("[OK]\n");
return 0;
}
int main(int argc, char **argv)
{
printf("Testing posix timers. False negative may happen on CPU execution \n");
printf("based timers if other threads run on the CPU...\n");
if (check_itimer(ITIMER_VIRTUAL) < 0)
return ksft_exit_fail();
if (check_itimer(ITIMER_PROF) < 0)
return ksft_exit_fail();
if (check_itimer(ITIMER_REAL) < 0)
return ksft_exit_fail();
if (check_timer_create(CLOCK_THREAD_CPUTIME_ID) < 0)
return ksft_exit_fail();
/*
* It's unfortunately hard to reliably test a timer expiration
* on parallel multithread cputime. We could arm it to expire
* on DELAY * nr_threads, with nr_threads busy looping, then wait
* the normal DELAY since the time is elapsing nr_threads faster.
* But for that we need to ensure we have real physical free CPUs
* to ensure true parallelism. So test only one thread until we
* find a better solution.
*/
if (check_timer_create(CLOCK_PROCESS_CPUTIME_ID) < 0)
return ksft_exit_fail();
if (check_timer_distribution() < 0)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/posix_timers.c |
/* set_timer latency test
* John Stultz ([email protected])
* (C) Copyright Linaro 2014
* Licensed under the GPLv2
*
* This test makes sure the set_timer api is correct
*
* To build:
* $ gcc set-timer-lat.c -o set-timer-lat -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <pthread.h>
#include "../kselftest.h"
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_HWSPECIFIC 10
#define CLOCK_TAI 11
#define NR_CLOCKIDS 12
#define NSEC_PER_SEC 1000000000ULL
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
#define TIMER_SECS 1
int alarmcount;
int clock_id;
struct timespec start_time;
long long max_latency_ns;
int timer_fired_early;
char *clockstring(int clockid)
{
switch (clockid) {
case CLOCK_REALTIME:
return "CLOCK_REALTIME";
case CLOCK_MONOTONIC:
return "CLOCK_MONOTONIC";
case CLOCK_PROCESS_CPUTIME_ID:
return "CLOCK_PROCESS_CPUTIME_ID";
case CLOCK_THREAD_CPUTIME_ID:
return "CLOCK_THREAD_CPUTIME_ID";
case CLOCK_MONOTONIC_RAW:
return "CLOCK_MONOTONIC_RAW";
case CLOCK_REALTIME_COARSE:
return "CLOCK_REALTIME_COARSE";
case CLOCK_MONOTONIC_COARSE:
return "CLOCK_MONOTONIC_COARSE";
case CLOCK_BOOTTIME:
return "CLOCK_BOOTTIME";
case CLOCK_REALTIME_ALARM:
return "CLOCK_REALTIME_ALARM";
case CLOCK_BOOTTIME_ALARM:
return "CLOCK_BOOTTIME_ALARM";
case CLOCK_TAI:
return "CLOCK_TAI";
};
return "UNKNOWN_CLOCKID";
}
long long timespec_sub(struct timespec a, struct timespec b)
{
long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
return ret;
}
void sigalarm(int signo)
{
long long delta_ns;
struct timespec ts;
clock_gettime(clock_id, &ts);
alarmcount++;
delta_ns = timespec_sub(start_time, ts);
delta_ns -= NSEC_PER_SEC * TIMER_SECS * alarmcount;
if (delta_ns < 0)
timer_fired_early = 1;
if (delta_ns > max_latency_ns)
max_latency_ns = delta_ns;
}
void describe_timer(int flags, int interval)
{
printf("%-22s %s %s ",
clockstring(clock_id),
flags ? "ABSTIME":"RELTIME",
interval ? "PERIODIC":"ONE-SHOT");
}
int setup_timer(int clock_id, int flags, int interval, timer_t *tm1)
{
struct sigevent se;
struct itimerspec its1, its2;
int err;
/* Set up timer: */
memset(&se, 0, sizeof(se));
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = SIGRTMAX;
se.sigev_value.sival_int = 0;
max_latency_ns = 0;
alarmcount = 0;
timer_fired_early = 0;
err = timer_create(clock_id, &se, tm1);
if (err) {
if ((clock_id == CLOCK_REALTIME_ALARM) ||
(clock_id == CLOCK_BOOTTIME_ALARM)) {
printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n",
clockstring(clock_id),
flags ? "ABSTIME":"RELTIME");
/* Indicate timer isn't set, so caller doesn't wait */
return 1;
}
printf("%s - timer_create() failed\n", clockstring(clock_id));
return -1;
}
clock_gettime(clock_id, &start_time);
if (flags) {
its1.it_value = start_time;
its1.it_value.tv_sec += TIMER_SECS;
} else {
its1.it_value.tv_sec = TIMER_SECS;
its1.it_value.tv_nsec = 0;
}
its1.it_interval.tv_sec = interval;
its1.it_interval.tv_nsec = 0;
err = timer_settime(*tm1, flags, &its1, &its2);
if (err) {
printf("%s - timer_settime() failed\n", clockstring(clock_id));
return -1;
}
return 0;
}
int check_timer_latency(int flags, int interval)
{
int err = 0;
describe_timer(flags, interval);
printf("timer fired early: %7d : ", timer_fired_early);
if (!timer_fired_early) {
printf("[OK]\n");
} else {
printf("[FAILED]\n");
err = -1;
}
describe_timer(flags, interval);
printf("max latency: %10lld ns : ", max_latency_ns);
if (max_latency_ns < UNRESONABLE_LATENCY) {
printf("[OK]\n");
} else {
printf("[FAILED]\n");
err = -1;
}
return err;
}
int check_alarmcount(int flags, int interval)
{
describe_timer(flags, interval);
printf("count: %19d : ", alarmcount);
if (alarmcount == 1) {
printf("[OK]\n");
return 0;
}
printf("[FAILED]\n");
return -1;
}
int do_timer(int clock_id, int flags)
{
timer_t tm1;
const int interval = TIMER_SECS;
int err;
err = setup_timer(clock_id, flags, interval, &tm1);
/* Unsupported case - return 0 to not fail the test */
if (err)
return err == 1 ? 0 : err;
while (alarmcount < 5)
sleep(1);
timer_delete(tm1);
return check_timer_latency(flags, interval);
}
int do_timer_oneshot(int clock_id, int flags)
{
timer_t tm1;
const int interval = 0;
struct timeval timeout;
int err;
err = setup_timer(clock_id, flags, interval, &tm1);
/* Unsupported case - return 0 to not fail the test */
if (err)
return err == 1 ? 0 : err;
memset(&timeout, 0, sizeof(timeout));
timeout.tv_sec = 5;
do {
err = select(0, NULL, NULL, NULL, &timeout);
} while (err == -1 && errno == EINTR);
timer_delete(tm1);
err = check_timer_latency(flags, interval);
err |= check_alarmcount(flags, interval);
return err;
}
int main(void)
{
struct sigaction act;
int signum = SIGRTMAX;
int ret = 0;
/* Set up signal handler: */
sigfillset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = sigalarm;
sigaction(signum, &act, NULL);
printf("Setting timers for every %i seconds\n", TIMER_SECS);
for (clock_id = 0; clock_id < NR_CLOCKIDS; clock_id++) {
if ((clock_id == CLOCK_PROCESS_CPUTIME_ID) ||
(clock_id == CLOCK_THREAD_CPUTIME_ID) ||
(clock_id == CLOCK_MONOTONIC_RAW) ||
(clock_id == CLOCK_REALTIME_COARSE) ||
(clock_id == CLOCK_MONOTONIC_COARSE) ||
(clock_id == CLOCK_HWSPECIFIC))
continue;
ret |= do_timer(clock_id, TIMER_ABSTIME);
ret |= do_timer(clock_id, 0);
ret |= do_timer_oneshot(clock_id, TIMER_ABSTIME);
ret |= do_timer_oneshot(clock_id, 0);
}
if (ret)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/set-timer-lat.c |
/* ADJ_FREQ Skew consistency test
* by: john stultz ([email protected])
* (C) Copyright IBM 2012
* Licensed under the GPLv2
*
* NOTE: This is a meta-test which cranks the ADJ_FREQ knob back
* and forth and watches for consistency problems. Thus this test requires
* that the inconsistency-check tests be present in the same directory it
* is run from.
*
* To build:
* $ gcc skew_consistency.c -o skew_consistency -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <sys/wait.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000LL
int main(int argc, char **argv)
{
struct timex tx;
int ret, ppm;
pid_t pid;
printf("Running Asynchronous Frequency Changing Tests...\n");
pid = fork();
if (!pid)
return system("./inconsistency-check -c 1 -t 600");
ppm = 500;
ret = 0;
while (pid != waitpid(pid, &ret, WNOHANG)) {
ppm = -ppm;
tx.modes = ADJ_FREQUENCY;
tx.freq = ppm << 16;
adjtimex(&tx);
usleep(500000);
}
/* Set things back */
tx.modes = ADJ_FREQUENCY;
tx.offset = 0;
adjtimex(&tx);
if (ret) {
printf("[FAILED]\n");
return ksft_exit_fail();
}
printf("[OK]\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/skew_consistency.c |
/* valid adjtimex test
* by: John Stultz <[email protected]>
* (C) Copyright Linaro 2015
* Licensed under the GPLv2
*
* This test validates adjtimex interface with valid
* and invalid test data.
*
* Usage: valid-adjtimex
*
* To build:
* $ gcc valid-adjtimex.c -o valid-adjtimex -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000LL
#define USEC_PER_SEC 1000000LL
#define ADJ_SETOFFSET 0x0100
#include <sys/syscall.h>
int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
/* clear NTP time_status & time_state */
int clear_time_state(void)
{
struct timex tx;
int ret;
tx.modes = ADJ_STATUS;
tx.status = 0;
ret = adjtimex(&tx);
return ret;
}
#define NUM_FREQ_VALID 32
#define NUM_FREQ_OUTOFRANGE 4
#define NUM_FREQ_INVALID 2
long valid_freq[NUM_FREQ_VALID] = {
-499<<16,
-450<<16,
-400<<16,
-350<<16,
-300<<16,
-250<<16,
-200<<16,
-150<<16,
-100<<16,
-75<<16,
-50<<16,
-25<<16,
-10<<16,
-5<<16,
-1<<16,
-1000,
1<<16,
5<<16,
10<<16,
25<<16,
50<<16,
75<<16,
100<<16,
150<<16,
200<<16,
250<<16,
300<<16,
350<<16,
400<<16,
450<<16,
499<<16,
};
long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
-1000<<16,
-550<<16,
550<<16,
1000<<16,
};
#define LONG_MAX (~0UL>>1)
#define LONG_MIN (-LONG_MAX - 1)
long invalid_freq[NUM_FREQ_INVALID] = {
LONG_MAX,
LONG_MIN,
};
int validate_freq(void)
{
struct timex tx;
int ret, pass = 0;
int i;
clear_time_state();
memset(&tx, 0, sizeof(struct timex));
/* Set the leap second insert flag */
printf("Testing ADJ_FREQ... ");
fflush(stdout);
for (i = 0; i < NUM_FREQ_VALID; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = valid_freq[i];
ret = adjtimex(&tx);
if (ret < 0) {
printf("[FAIL]\n");
printf("Error: adjtimex(ADJ_FREQ, %ld - %ld ppm\n",
valid_freq[i], valid_freq[i]>>16);
pass = -1;
goto out;
}
tx.modes = 0;
ret = adjtimex(&tx);
if (tx.freq != valid_freq[i]) {
printf("Warning: freq value %ld not what we set it (%ld)!\n",
tx.freq, valid_freq[i]);
}
}
for (i = 0; i < NUM_FREQ_OUTOFRANGE; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = outofrange_freq[i];
ret = adjtimex(&tx);
if (ret < 0) {
printf("[FAIL]\n");
printf("Error: adjtimex(ADJ_FREQ, %ld - %ld ppm\n",
outofrange_freq[i], outofrange_freq[i]>>16);
pass = -1;
goto out;
}
tx.modes = 0;
ret = adjtimex(&tx);
if (tx.freq == outofrange_freq[i]) {
printf("[FAIL]\n");
printf("ERROR: out of range value %ld actually set!\n",
tx.freq);
pass = -1;
goto out;
}
}
if (sizeof(long) == 8) { /* this case only applies to 64bit systems */
for (i = 0; i < NUM_FREQ_INVALID; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = invalid_freq[i];
ret = adjtimex(&tx);
if (ret >= 0) {
printf("[FAIL]\n");
printf("Error: No failure on invalid ADJ_FREQUENCY %ld\n",
invalid_freq[i]);
pass = -1;
goto out;
}
}
}
printf("[OK]\n");
out:
/* reset freq to zero */
tx.modes = ADJ_FREQUENCY;
tx.freq = 0;
ret = adjtimex(&tx);
return pass;
}
int set_offset(long long offset, int use_nano)
{
struct timex tmx = {};
int ret;
tmx.modes = ADJ_SETOFFSET;
if (use_nano) {
tmx.modes |= ADJ_NANO;
tmx.time.tv_sec = offset / NSEC_PER_SEC;
tmx.time.tv_usec = offset % NSEC_PER_SEC;
if (offset < 0 && tmx.time.tv_usec) {
tmx.time.tv_sec -= 1;
tmx.time.tv_usec += NSEC_PER_SEC;
}
} else {
tmx.time.tv_sec = offset / USEC_PER_SEC;
tmx.time.tv_usec = offset % USEC_PER_SEC;
if (offset < 0 && tmx.time.tv_usec) {
tmx.time.tv_sec -= 1;
tmx.time.tv_usec += USEC_PER_SEC;
}
}
ret = clock_adjtime(CLOCK_REALTIME, &tmx);
if (ret < 0) {
printf("(sec: %ld usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec);
printf("[FAIL]\n");
return -1;
}
return 0;
}
int set_bad_offset(long sec, long usec, int use_nano)
{
struct timex tmx = {};
int ret;
tmx.modes = ADJ_SETOFFSET;
if (use_nano)
tmx.modes |= ADJ_NANO;
tmx.time.tv_sec = sec;
tmx.time.tv_usec = usec;
ret = clock_adjtime(CLOCK_REALTIME, &tmx);
if (ret >= 0) {
printf("Invalid (sec: %ld usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec);
printf("[FAIL]\n");
return -1;
}
return 0;
}
int validate_set_offset(void)
{
printf("Testing ADJ_SETOFFSET... ");
fflush(stdout);
/* Test valid values */
if (set_offset(NSEC_PER_SEC - 1, 1))
return -1;
if (set_offset(-NSEC_PER_SEC + 1, 1))
return -1;
if (set_offset(-NSEC_PER_SEC - 1, 1))
return -1;
if (set_offset(5 * NSEC_PER_SEC, 1))
return -1;
if (set_offset(-5 * NSEC_PER_SEC, 1))
return -1;
if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1))
return -1;
if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1))
return -1;
if (set_offset(USEC_PER_SEC - 1, 0))
return -1;
if (set_offset(-USEC_PER_SEC + 1, 0))
return -1;
if (set_offset(-USEC_PER_SEC - 1, 0))
return -1;
if (set_offset(5 * USEC_PER_SEC, 0))
return -1;
if (set_offset(-5 * USEC_PER_SEC, 0))
return -1;
if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0))
return -1;
if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0))
return -1;
/* Test invalid values */
if (set_bad_offset(0, -1, 1))
return -1;
if (set_bad_offset(0, -1, 0))
return -1;
if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, 2 * USEC_PER_SEC, 0))
return -1;
if (set_bad_offset(0, NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, USEC_PER_SEC, 0))
return -1;
if (set_bad_offset(0, -NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, -USEC_PER_SEC, 0))
return -1;
printf("[OK]\n");
return 0;
}
int main(int argc, char **argv)
{
if (validate_freq())
return ksft_exit_fail();
if (validate_set_offset())
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/valid-adjtimex.c |
/* Set tai offset
* by: John Stultz <[email protected]>
* (C) Copyright Linaro 2013
* Licensed under the GPLv2
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "../kselftest.h"
int set_tai(int offset)
{
struct timex tx;
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_TAI;
tx.constant = offset;
return adjtimex(&tx);
}
int get_tai(void)
{
struct timex tx;
memset(&tx, 0, sizeof(tx));
adjtimex(&tx);
return tx.tai;
}
int main(int argc, char **argv)
{
int i, ret;
ret = get_tai();
printf("tai offset started at %i\n", ret);
printf("Checking tai offsets can be properly set: ");
fflush(stdout);
for (i = 1; i <= 60; i++) {
ret = set_tai(i);
ret = get_tai();
if (ret != i) {
printf("[FAILED] expected: %i got %i\n", i, ret);
return ksft_exit_fail();
}
}
printf("[OK]\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/set-tai.c |
/* adjtimex() tick adjustment test
* by: John Stultz <[email protected]>
* (C) Copyright Linaro Limited 2015
* Licensed under the GPLv2
*
* To build:
* $ gcc adjtick.c -o adjtick -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
#include "../kselftest.h"
#define CLOCK_MONOTONIC_RAW 4
#define NSEC_PER_SEC 1000000000LL
#define USEC_PER_SEC 1000000
#define MILLION 1000000
long systick;
long long llabs(long long val)
{
if (val < 0)
val = -val;
return val;
}
unsigned long long ts_to_nsec(struct timespec ts)
{
return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
}
struct timespec nsec_to_ts(long long ns)
{
struct timespec ts;
ts.tv_sec = ns/NSEC_PER_SEC;
ts.tv_nsec = ns%NSEC_PER_SEC;
return ts;
}
long long diff_timespec(struct timespec start, struct timespec end)
{
long long start_ns, end_ns;
start_ns = ts_to_nsec(start);
end_ns = ts_to_nsec(end);
return end_ns - start_ns;
}
void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
{
struct timespec start, mid, end;
long long diff = 0, tmp;
int i;
clock_gettime(CLOCK_MONOTONIC, mon);
clock_gettime(CLOCK_MONOTONIC_RAW, raw);
/* Try to get a more tightly bound pairing */
for (i = 0; i < 3; i++) {
long long newdiff;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
clock_gettime(CLOCK_MONOTONIC, &end);
newdiff = diff_timespec(start, end);
if (diff == 0 || newdiff < diff) {
diff = newdiff;
*raw = mid;
tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
*mon = nsec_to_ts(tmp);
}
}
}
long long get_ppm_drift(void)
{
struct timespec mon_start, raw_start, mon_end, raw_end;
long long delta1, delta2, eppm;
get_monotonic_and_raw(&mon_start, &raw_start);
sleep(15);
get_monotonic_and_raw(&mon_end, &raw_end);
delta1 = diff_timespec(mon_start, mon_end);
delta2 = diff_timespec(raw_start, raw_end);
eppm = (delta1*MILLION)/delta2 - MILLION;
return eppm;
}
int check_tick_adj(long tickval)
{
long long eppm, ppm;
struct timex tx1;
tx1.modes = ADJ_TICK;
tx1.modes |= ADJ_OFFSET;
tx1.modes |= ADJ_FREQUENCY;
tx1.modes |= ADJ_STATUS;
tx1.status = STA_PLL;
tx1.offset = 0;
tx1.freq = 0;
tx1.tick = tickval;
adjtimex(&tx1);
sleep(1);
ppm = ((long long)tickval * MILLION)/systick - MILLION;
printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm);
eppm = get_ppm_drift();
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
fflush(stdout);
tx1.modes = 0;
adjtimex(&tx1);
if (tx1.offset || tx1.freq || tx1.tick != tickval) {
printf(" [ERROR]\n");
printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n");
return -1;
}
/*
* Here we use 100ppm difference as an error bound.
* We likely should see better, but some coarse clocksources
* cannot match the HZ tick size accurately, so we have a
* internal correction factor that doesn't scale exactly
* with the adjustment, resulting in > 10ppm error during
* a 10% adjustment. 100ppm also gives us more breathing
* room for interruptions during the measurement.
*/
if (llabs(eppm - ppm) > 100) {
printf(" [FAILED]\n");
return -1;
}
printf(" [OK]\n");
return 0;
}
int main(int argc, char **argv)
{
struct timespec raw;
long tick, max, interval, err;
struct timex tx1;
err = 0;
setbuf(stdout, NULL);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
return -1;
}
printf("Each iteration takes about 15 seconds\n");
systick = sysconf(_SC_CLK_TCK);
systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK);
max = systick/10; /* +/- 10% */
interval = max/4; /* in 4 steps each side */
for (tick = (systick - max); tick < (systick + max); tick += interval) {
if (check_tick_adj(tick)) {
err = 1;
break;
}
}
/* Reset things to zero */
tx1.modes = ADJ_TICK;
tx1.modes |= ADJ_OFFSET;
tx1.modes |= ADJ_FREQUENCY;
tx1.offset = 0;
tx1.freq = 0;
tx1.tick = systick;
adjtimex(&tx1);
if (err)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/adjtick.c |
/* Measure nanosleep timer latency
* by: john stultz ([email protected])
* (C) Copyright Linaro 2013
* Licensed under the GPLv2
*
* To build:
* $ gcc nsleep-lat.c -o nsleep-lat -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000ULL
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_HWSPECIFIC 10
#define CLOCK_TAI 11
#define NR_CLOCKIDS 12
#define UNSUPPORTED 0xf00f
char *clockstring(int clockid)
{
switch (clockid) {
case CLOCK_REALTIME:
return "CLOCK_REALTIME";
case CLOCK_MONOTONIC:
return "CLOCK_MONOTONIC";
case CLOCK_PROCESS_CPUTIME_ID:
return "CLOCK_PROCESS_CPUTIME_ID";
case CLOCK_THREAD_CPUTIME_ID:
return "CLOCK_THREAD_CPUTIME_ID";
case CLOCK_MONOTONIC_RAW:
return "CLOCK_MONOTONIC_RAW";
case CLOCK_REALTIME_COARSE:
return "CLOCK_REALTIME_COARSE";
case CLOCK_MONOTONIC_COARSE:
return "CLOCK_MONOTONIC_COARSE";
case CLOCK_BOOTTIME:
return "CLOCK_BOOTTIME";
case CLOCK_REALTIME_ALARM:
return "CLOCK_REALTIME_ALARM";
case CLOCK_BOOTTIME_ALARM:
return "CLOCK_BOOTTIME_ALARM";
case CLOCK_TAI:
return "CLOCK_TAI";
};
return "UNKNOWN_CLOCKID";
}
struct timespec timespec_add(struct timespec ts, unsigned long long ns)
{
ts.tv_nsec += ns;
while (ts.tv_nsec >= NSEC_PER_SEC) {
ts.tv_nsec -= NSEC_PER_SEC;
ts.tv_sec++;
}
return ts;
}
long long timespec_sub(struct timespec a, struct timespec b)
{
long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
return ret;
}
int nanosleep_lat_test(int clockid, long long ns)
{
struct timespec start, end, target;
long long latency = 0;
int i, count;
target.tv_sec = ns/NSEC_PER_SEC;
target.tv_nsec = ns%NSEC_PER_SEC;
if (clock_gettime(clockid, &start))
return UNSUPPORTED;
if (clock_nanosleep(clockid, 0, &target, NULL))
return UNSUPPORTED;
count = 10;
/* First check relative latency */
clock_gettime(clockid, &start);
for (i = 0; i < count; i++)
clock_nanosleep(clockid, 0, &target, NULL);
clock_gettime(clockid, &end);
if (((timespec_sub(start, end)/count)-ns) > UNRESONABLE_LATENCY) {
printf("Large rel latency: %lld ns :", (timespec_sub(start, end)/count)-ns);
return -1;
}
/* Next check absolute latency */
for (i = 0; i < count; i++) {
clock_gettime(clockid, &start);
target = timespec_add(start, ns);
clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL);
clock_gettime(clockid, &end);
latency += timespec_sub(target, end);
}
if (latency/count > UNRESONABLE_LATENCY) {
printf("Large abs latency: %lld ns :", latency/count);
return -1;
}
return 0;
}
int main(int argc, char **argv)
{
long long length;
int clockid, ret;
for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
/* Skip cputime clockids since nanosleep won't increment cputime */
if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
clockid == CLOCK_THREAD_CPUTIME_ID ||
clockid == CLOCK_HWSPECIFIC)
continue;
printf("nsleep latency %-26s ", clockstring(clockid));
fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
ret = nanosleep_lat_test(clockid, length);
if (ret)
break;
length *= 100;
}
if (ret == UNSUPPORTED) {
printf("[UNSUPPORTED]\n");
continue;
}
if (ret < 0) {
printf("[FAILED]\n");
return ksft_exit_fail();
}
printf("[OK]\n");
}
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/nsleep-lat.c |
/* alarmtimer suspend test
* John Stultz ([email protected])
* (C) Copyright Linaro 2013
* Licensed under the GPLv2
*
* This test makes sure the alarmtimer & RTC wakeup code is
* functioning.
*
* To build:
* $ gcc alarmtimer-suspend.c -o alarmtimer-suspend -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <pthread.h>
#include "../kselftest.h"
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_HWSPECIFIC 10
#define CLOCK_TAI 11
#define NR_CLOCKIDS 12
#define NSEC_PER_SEC 1000000000ULL
#define UNREASONABLE_LAT (NSEC_PER_SEC * 5) /* hopefully we resume in 5 secs */
#define SUSPEND_SECS 15
int alarmcount;
int alarm_clock_id;
struct timespec start_time;
char *clockstring(int clockid)
{
switch (clockid) {
case CLOCK_REALTIME:
return "CLOCK_REALTIME";
case CLOCK_MONOTONIC:
return "CLOCK_MONOTONIC";
case CLOCK_PROCESS_CPUTIME_ID:
return "CLOCK_PROCESS_CPUTIME_ID";
case CLOCK_THREAD_CPUTIME_ID:
return "CLOCK_THREAD_CPUTIME_ID";
case CLOCK_MONOTONIC_RAW:
return "CLOCK_MONOTONIC_RAW";
case CLOCK_REALTIME_COARSE:
return "CLOCK_REALTIME_COARSE";
case CLOCK_MONOTONIC_COARSE:
return "CLOCK_MONOTONIC_COARSE";
case CLOCK_BOOTTIME:
return "CLOCK_BOOTTIME";
case CLOCK_REALTIME_ALARM:
return "CLOCK_REALTIME_ALARM";
case CLOCK_BOOTTIME_ALARM:
return "CLOCK_BOOTTIME_ALARM";
case CLOCK_TAI:
return "CLOCK_TAI";
}
return "UNKNOWN_CLOCKID";
}
long long timespec_sub(struct timespec a, struct timespec b)
{
long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
return ret;
}
int final_ret;
void sigalarm(int signo)
{
long long delta_ns;
struct timespec ts;
clock_gettime(alarm_clock_id, &ts);
alarmcount++;
delta_ns = timespec_sub(start_time, ts);
delta_ns -= NSEC_PER_SEC * SUSPEND_SECS * alarmcount;
printf("ALARM(%i): %ld:%ld latency: %lld ns ", alarmcount, ts.tv_sec,
ts.tv_nsec, delta_ns);
if (delta_ns > UNREASONABLE_LAT) {
printf("[FAIL]\n");
final_ret = -1;
} else
printf("[OK]\n");
}
int main(void)
{
timer_t tm1;
struct itimerspec its1, its2;
struct sigevent se;
struct sigaction act;
int signum = SIGRTMAX;
/* Set up signal handler: */
sigfillset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = sigalarm;
sigaction(signum, &act, NULL);
/* Set up timer: */
memset(&se, 0, sizeof(se));
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = signum;
se.sigev_value.sival_int = 0;
for (alarm_clock_id = CLOCK_REALTIME_ALARM;
alarm_clock_id <= CLOCK_BOOTTIME_ALARM;
alarm_clock_id++) {
alarmcount = 0;
if (timer_create(alarm_clock_id, &se, &tm1) == -1) {
printf("timer_create failed, %s unsupported?\n",
clockstring(alarm_clock_id));
break;
}
clock_gettime(alarm_clock_id, &start_time);
printf("Start time (%s): %ld:%ld\n", clockstring(alarm_clock_id),
start_time.tv_sec, start_time.tv_nsec);
printf("Setting alarm for every %i seconds\n", SUSPEND_SECS);
its1.it_value = start_time;
its1.it_value.tv_sec += SUSPEND_SECS;
its1.it_interval.tv_sec = SUSPEND_SECS;
its1.it_interval.tv_nsec = 0;
timer_settime(tm1, TIMER_ABSTIME, &its1, &its2);
while (alarmcount < 5)
sleep(1); /* First 5 alarms, do nothing */
printf("Starting suspend loops\n");
while (alarmcount < 10) {
int ret;
sleep(3);
ret = system("echo mem > /sys/power/state");
if (ret)
break;
}
timer_delete(tm1);
}
if (final_ret)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/alarmtimer-suspend.c |
/* CLOCK_MONOTONIC vs CLOCK_MONOTONIC_RAW skew test
* by: john stultz ([email protected])
* John Stultz <[email protected]>
* (C) Copyright IBM 2012
* (C) Copyright Linaro Limited 2015
* Licensed under the GPLv2
*
* To build:
* $ gcc raw_skew.c -o raw_skew -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
#include "../kselftest.h"
#define CLOCK_MONOTONIC_RAW 4
#define NSEC_PER_SEC 1000000000LL
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
__typeof__(s) __s = (s); \
__x < 0 ? -(-__x >> __s) : __x >> __s; \
})
long long llabs(long long val)
{
if (val < 0)
val = -val;
return val;
}
unsigned long long ts_to_nsec(struct timespec ts)
{
return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
}
struct timespec nsec_to_ts(long long ns)
{
struct timespec ts;
ts.tv_sec = ns/NSEC_PER_SEC;
ts.tv_nsec = ns%NSEC_PER_SEC;
return ts;
}
long long diff_timespec(struct timespec start, struct timespec end)
{
long long start_ns, end_ns;
start_ns = ts_to_nsec(start);
end_ns = ts_to_nsec(end);
return end_ns - start_ns;
}
void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
{
struct timespec start, mid, end;
long long diff = 0, tmp;
int i;
for (i = 0; i < 3; i++) {
long long newdiff;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
clock_gettime(CLOCK_MONOTONIC, &end);
newdiff = diff_timespec(start, end);
if (diff == 0 || newdiff < diff) {
diff = newdiff;
*raw = mid;
tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
*mon = nsec_to_ts(tmp);
}
}
}
int main(int argc, char **argv)
{
struct timespec mon, raw, start, end;
long long delta1, delta2, interval, eppm, ppm;
struct timex tx1, tx2;
setbuf(stdout, NULL);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
return -1;
}
tx1.modes = 0;
adjtimex(&tx1);
get_monotonic_and_raw(&mon, &raw);
start = mon;
delta1 = diff_timespec(mon, raw);
if (tx1.offset)
printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
printf("Estimating clock drift: ");
fflush(stdout);
sleep(120);
get_monotonic_and_raw(&mon, &raw);
end = mon;
tx2.modes = 0;
adjtimex(&tx2);
delta2 = diff_timespec(mon, raw);
interval = diff_timespec(start, end);
/* calculate measured ppm between MONOTONIC and MONOTONIC_RAW */
eppm = ((delta2-delta1)*NSEC_PER_SEC)/interval;
eppm = -eppm;
printf("%lld.%i(est)", eppm/1000, abs((int)(eppm%1000)));
/* Avg the two actual freq samples adjtimex gave us */
ppm = (long long)(tx1.freq + tx2.freq) * 1000 / 2;
ppm = shift_right(ppm, 16);
printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
if (llabs(eppm - ppm) > 1000) {
if (tx1.offset || tx2.offset ||
tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
printf(" [SKIP]\n");
return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
}
printf(" [FAILED]\n");
return ksft_exit_fail();
}
printf(" [OK]\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/raw_skew.c |
/* Demo leapsecond deadlock
* by: John Stultz ([email protected])
* (C) Copyright IBM 2012
* (C) Copyright 2013, 2015 Linaro Limited
* Licensed under the GPL
*
* This test demonstrates leapsecond deadlock that is possible
* on kernels from 2.6.26 to 3.3.
*
* WARNING: THIS WILL LIKELY HARD HANG SYSTEMS AND MAY LOSE DATA
* RUN AT YOUR OWN RISK!
* To build:
* $ gcc leapcrash.c -o leapcrash -lrt
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include "../kselftest.h"
/* clear NTP time_status & time_state */
int clear_time_state(void)
{
struct timex tx;
int ret;
/*
* We have to call adjtime twice here, as kernels
* prior to 6b1859dba01c7 (included in 3.5 and
* -stable), had an issue with the state machine
* and wouldn't clear the STA_INS/DEL flag directly.
*/
tx.modes = ADJ_STATUS;
tx.status = STA_PLL;
ret = adjtimex(&tx);
tx.modes = ADJ_STATUS;
tx.status = 0;
ret = adjtimex(&tx);
return ret;
}
/* Make sure we cleanup on ctrl-c */
void handler(int unused)
{
clear_time_state();
exit(0);
}
int main(void)
{
struct timex tx;
struct timespec ts;
time_t next_leap;
int count = 0;
setbuf(stdout, NULL);
signal(SIGINT, handler);
signal(SIGKILL, handler);
printf("This runs for a few minutes. Press ctrl-c to stop\n");
clear_time_state();
/* Get the current time */
clock_gettime(CLOCK_REALTIME, &ts);
/* Calculate the next possible leap second 23:59:60 GMT */
next_leap = ts.tv_sec;
next_leap += 86400 - (next_leap % 86400);
for (count = 0; count < 20; count++) {
struct timeval tv;
/* set the time to 2 seconds before the leap */
tv.tv_sec = next_leap - 2;
tv.tv_usec = 0;
if (settimeofday(&tv, NULL)) {
printf("Error: You're likely not running with proper (ie: root) permissions\n");
return ksft_exit_fail();
}
tx.modes = 0;
adjtimex(&tx);
/* hammer on adjtime w/ STA_INS */
while (tx.time.tv_sec < next_leap + 1) {
/* Set the leap second insert flag */
tx.modes = ADJ_STATUS;
tx.status = STA_INS;
adjtimex(&tx);
}
clear_time_state();
printf(".");
fflush(stdout);
}
printf("[OK]\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/leapcrash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This test checks the response of the system clock to frequency
* steps made with adjtimex(). The frequency error and stability of
* the CLOCK_MONOTONIC clock relative to the CLOCK_MONOTONIC_RAW clock
* is measured in two intervals following the step. The test fails if
* values from the second interval exceed specified limits.
*
* Copyright (C) Miroslav Lichvar <[email protected]> 2017
*/
#include <math.h>
#include <stdio.h>
#include <sys/timex.h>
#include <time.h>
#include <unistd.h>
#include "../kselftest.h"
#define SAMPLES 100
#define SAMPLE_READINGS 10
#define MEAN_SAMPLE_INTERVAL 0.1
#define STEP_INTERVAL 1.0
#define MAX_PRECISION 500e-9
#define MAX_FREQ_ERROR 0.02e-6
#define MAX_STDDEV 50e-9
#ifndef ADJ_SETOFFSET
#define ADJ_SETOFFSET 0x0100
#endif
struct sample {
double offset;
double time;
};
static time_t mono_raw_base;
static time_t mono_base;
static long user_hz;
static double precision;
static double mono_freq_offset;
static double diff_timespec(struct timespec *ts1, struct timespec *ts2)
{
return ts1->tv_sec - ts2->tv_sec + (ts1->tv_nsec - ts2->tv_nsec) / 1e9;
}
static double get_sample(struct sample *sample)
{
double delay, mindelay = 0.0;
struct timespec ts1, ts2, ts3;
int i;
for (i = 0; i < SAMPLE_READINGS; i++) {
clock_gettime(CLOCK_MONOTONIC_RAW, &ts1);
clock_gettime(CLOCK_MONOTONIC, &ts2);
clock_gettime(CLOCK_MONOTONIC_RAW, &ts3);
ts1.tv_sec -= mono_raw_base;
ts2.tv_sec -= mono_base;
ts3.tv_sec -= mono_raw_base;
delay = diff_timespec(&ts3, &ts1);
if (delay <= 1e-9) {
i--;
continue;
}
if (!i || delay < mindelay) {
sample->offset = diff_timespec(&ts2, &ts1);
sample->offset -= delay / 2.0;
sample->time = ts1.tv_sec + ts1.tv_nsec / 1e9;
mindelay = delay;
}
}
return mindelay;
}
static void reset_ntp_error(void)
{
struct timex txc;
txc.modes = ADJ_SETOFFSET;
txc.time.tv_sec = 0;
txc.time.tv_usec = 0;
if (adjtimex(&txc) < 0) {
perror("[FAIL] adjtimex");
ksft_exit_fail();
}
}
static void set_frequency(double freq)
{
struct timex txc;
int tick_offset;
tick_offset = 1e6 * freq / user_hz;
txc.modes = ADJ_TICK | ADJ_FREQUENCY;
txc.tick = 1000000 / user_hz + tick_offset;
txc.freq = (1e6 * freq - user_hz * tick_offset) * (1 << 16);
if (adjtimex(&txc) < 0) {
perror("[FAIL] adjtimex");
ksft_exit_fail();
}
}
static void regress(struct sample *samples, int n, double *intercept,
double *slope, double *r_stddev, double *r_max)
{
double x, y, r, x_sum, y_sum, xy_sum, x2_sum, r2_sum;
int i;
x_sum = 0.0, y_sum = 0.0, xy_sum = 0.0, x2_sum = 0.0;
for (i = 0; i < n; i++) {
x = samples[i].time;
y = samples[i].offset;
x_sum += x;
y_sum += y;
xy_sum += x * y;
x2_sum += x * x;
}
*slope = (xy_sum - x_sum * y_sum / n) / (x2_sum - x_sum * x_sum / n);
*intercept = (y_sum - *slope * x_sum) / n;
*r_max = 0.0, r2_sum = 0.0;
for (i = 0; i < n; i++) {
x = samples[i].time;
y = samples[i].offset;
r = fabs(x * *slope + *intercept - y);
if (*r_max < r)
*r_max = r;
r2_sum += r * r;
}
*r_stddev = sqrt(r2_sum / n);
}
static int run_test(int calibration, double freq_base, double freq_step)
{
struct sample samples[SAMPLES];
double intercept, slope, stddev1, max1, stddev2, max2;
double freq_error1, freq_error2;
int i;
set_frequency(freq_base);
for (i = 0; i < 10; i++)
usleep(1e6 * MEAN_SAMPLE_INTERVAL / 10);
reset_ntp_error();
set_frequency(freq_base + freq_step);
for (i = 0; i < 10; i++)
usleep(rand() % 2000000 * STEP_INTERVAL / 10);
set_frequency(freq_base);
for (i = 0; i < SAMPLES; i++) {
usleep(rand() % 2000000 * MEAN_SAMPLE_INTERVAL);
get_sample(&samples[i]);
}
if (calibration) {
regress(samples, SAMPLES, &intercept, &slope, &stddev1, &max1);
mono_freq_offset = slope;
printf("CLOCK_MONOTONIC_RAW frequency offset: %11.3f ppm\n",
1e6 * mono_freq_offset);
return 0;
}
regress(samples, SAMPLES / 2, &intercept, &slope, &stddev1, &max1);
freq_error1 = slope * (1.0 - mono_freq_offset) - mono_freq_offset -
freq_base;
regress(samples + SAMPLES / 2, SAMPLES / 2, &intercept, &slope,
&stddev2, &max2);
freq_error2 = slope * (1.0 - mono_freq_offset) - mono_freq_offset -
freq_base;
printf("%6.0f %+10.3f %6.0f %7.0f %+10.3f %6.0f %7.0f\t",
1e6 * freq_step,
1e6 * freq_error1, 1e9 * stddev1, 1e9 * max1,
1e6 * freq_error2, 1e9 * stddev2, 1e9 * max2);
if (fabs(freq_error2) > MAX_FREQ_ERROR || stddev2 > MAX_STDDEV) {
printf("[FAIL]\n");
return 1;
}
printf("[OK]\n");
return 0;
}
static void init_test(void)
{
struct timespec ts;
struct sample sample;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts)) {
perror("[FAIL] clock_gettime(CLOCK_MONOTONIC_RAW)");
ksft_exit_fail();
}
mono_raw_base = ts.tv_sec;
if (clock_gettime(CLOCK_MONOTONIC, &ts)) {
perror("[FAIL] clock_gettime(CLOCK_MONOTONIC)");
ksft_exit_fail();
}
mono_base = ts.tv_sec;
user_hz = sysconf(_SC_CLK_TCK);
precision = get_sample(&sample) / 2.0;
printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
1e9 * precision);
if (precision > MAX_PRECISION)
ksft_exit_skip("precision: %.0f ns > MAX_PRECISION: %.0f ns\n",
1e9 * precision, 1e9 * MAX_PRECISION);
printf("[OK]\n");
srand(ts.tv_sec ^ ts.tv_nsec);
run_test(1, 0.0, 0.0);
}
int main(int argc, char **argv)
{
double freq_base, freq_step;
int i, j, fails = 0;
init_test();
printf("Checking response to frequency step:\n");
printf(" Step 1st interval 2nd interval\n");
printf(" Freq Dev Max Freq Dev Max\n");
for (i = 2; i >= 0; i--) {
for (j = 0; j < 5; j++) {
freq_base = (rand() % (1 << 24) - (1 << 23)) / 65536e6;
freq_step = 10e-6 * (1 << (6 * i));
fails += run_test(0, freq_base, freq_step);
}
}
set_frequency(0.0);
if (fails)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/freq-step.c |
/* Measure mqueue timeout latency
* by: john stultz ([email protected])
* (C) Copyright Linaro 2013
*
* Inspired with permission from example test by:
* Romain Francoise <[email protected]>
* Licensed under the GPLv2
*
* To build:
* $ gcc mqueue-lat.c -o mqueue-lat -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <mqueue.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000ULL
#define TARGET_TIMEOUT 100000000 /* 100ms in nanoseconds */
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
long long timespec_sub(struct timespec a, struct timespec b)
{
long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
return ret;
}
struct timespec timespec_add(struct timespec ts, unsigned long long ns)
{
ts.tv_nsec += ns;
while (ts.tv_nsec >= NSEC_PER_SEC) {
ts.tv_nsec -= NSEC_PER_SEC;
ts.tv_sec++;
}
return ts;
}
int mqueue_lat_test(void)
{
mqd_t q;
struct mq_attr attr;
struct timespec start, end, now, target;
int i, count, ret;
q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL);
if (q < 0) {
perror("mq_open");
return -1;
}
mq_getattr(q, &attr);
count = 100;
clock_gettime(CLOCK_MONOTONIC, &start);
for (i = 0; i < count; i++) {
char buf[attr.mq_msgsize];
clock_gettime(CLOCK_REALTIME, &now);
target = now;
target = timespec_add(now, TARGET_TIMEOUT); /* 100ms */
ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target);
if (ret < 0 && errno != ETIMEDOUT) {
perror("mq_timedreceive");
return -1;
}
}
clock_gettime(CLOCK_MONOTONIC, &end);
mq_close(q);
if ((timespec_sub(start, end)/count) > TARGET_TIMEOUT + UNRESONABLE_LATENCY)
return -1;
return 0;
}
int main(int argc, char **argv)
{
int ret;
printf("Mqueue latency : ");
fflush(stdout);
ret = mqueue_lat_test();
if (ret < 0) {
printf("[FAILED]\n");
return ksft_exit_fail();
}
printf("[OK]\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/mqueue-lat.c |
/* Time inconsistency check test
* by: john stultz ([email protected])
* (C) Copyright IBM 2003, 2004, 2005, 2012
* (C) Copyright Linaro Limited 2015
* Licensed under the GPLv2
*
* To build:
* $ gcc inconsistency-check.c -o inconsistency-check -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include "../kselftest.h"
#define CALLS_PER_LOOP 64
#define NSEC_PER_SEC 1000000000ULL
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_HWSPECIFIC 10
#define CLOCK_TAI 11
#define NR_CLOCKIDS 12
char *clockstring(int clockid)
{
switch (clockid) {
case CLOCK_REALTIME:
return "CLOCK_REALTIME";
case CLOCK_MONOTONIC:
return "CLOCK_MONOTONIC";
case CLOCK_PROCESS_CPUTIME_ID:
return "CLOCK_PROCESS_CPUTIME_ID";
case CLOCK_THREAD_CPUTIME_ID:
return "CLOCK_THREAD_CPUTIME_ID";
case CLOCK_MONOTONIC_RAW:
return "CLOCK_MONOTONIC_RAW";
case CLOCK_REALTIME_COARSE:
return "CLOCK_REALTIME_COARSE";
case CLOCK_MONOTONIC_COARSE:
return "CLOCK_MONOTONIC_COARSE";
case CLOCK_BOOTTIME:
return "CLOCK_BOOTTIME";
case CLOCK_REALTIME_ALARM:
return "CLOCK_REALTIME_ALARM";
case CLOCK_BOOTTIME_ALARM:
return "CLOCK_BOOTTIME_ALARM";
case CLOCK_TAI:
return "CLOCK_TAI";
}
return "UNKNOWN_CLOCKID";
}
/* returns 1 if a <= b, 0 otherwise */
static inline int in_order(struct timespec a, struct timespec b)
{
/* use unsigned to avoid false positives on 2038 rollover */
if ((unsigned long)a.tv_sec < (unsigned long)b.tv_sec)
return 1;
if ((unsigned long)a.tv_sec > (unsigned long)b.tv_sec)
return 0;
if (a.tv_nsec > b.tv_nsec)
return 0;
return 1;
}
int consistency_test(int clock_type, unsigned long seconds)
{
struct timespec list[CALLS_PER_LOOP];
int i, inconsistent;
long now, then;
time_t t;
char *start_str;
clock_gettime(clock_type, &list[0]);
now = then = list[0].tv_sec;
/* timestamp start of test */
t = time(0);
start_str = ctime(&t);
while (seconds == -1 || now - then < seconds) {
inconsistent = -1;
/* Fill list */
for (i = 0; i < CALLS_PER_LOOP; i++)
clock_gettime(clock_type, &list[i]);
/* Check for inconsistencies */
for (i = 0; i < CALLS_PER_LOOP - 1; i++)
if (!in_order(list[i], list[i+1]))
inconsistent = i;
/* display inconsistency */
if (inconsistent >= 0) {
unsigned long long delta;
ksft_print_msg("\%s\n", start_str);
for (i = 0; i < CALLS_PER_LOOP; i++) {
if (i == inconsistent)
ksft_print_msg("--------------------\n");
ksft_print_msg("%lu:%lu\n", list[i].tv_sec,
list[i].tv_nsec);
if (i == inconsistent + 1)
ksft_print_msg("--------------------\n");
}
delta = list[inconsistent].tv_sec * NSEC_PER_SEC;
delta += list[inconsistent].tv_nsec;
delta -= list[inconsistent+1].tv_sec * NSEC_PER_SEC;
delta -= list[inconsistent+1].tv_nsec;
ksft_print_msg("Delta: %llu ns\n", delta);
fflush(0);
/* timestamp inconsistency*/
t = time(0);
ksft_print_msg("%s\n", ctime(&t));
return -1;
}
now = list[0].tv_sec;
}
return 0;
}
int main(int argc, char *argv[])
{
int clockid, opt;
int userclock = CLOCK_REALTIME;
int maxclocks = NR_CLOCKIDS;
int runtime = 10;
struct timespec ts;
/* Process arguments */
while ((opt = getopt(argc, argv, "t:c:")) != -1) {
switch (opt) {
case 't':
runtime = atoi(optarg);
break;
case 'c':
userclock = atoi(optarg);
maxclocks = userclock + 1;
break;
default:
printf("Usage: %s [-t <secs>] [-c <clockid>]\n", argv[0]);
printf(" -t: Number of seconds to run\n");
printf(" -c: clockid to use (default, all clockids)\n");
exit(-1);
}
}
setbuf(stdout, NULL);
ksft_print_header();
ksft_set_plan(maxclocks - userclock);
for (clockid = userclock; clockid < maxclocks; clockid++) {
if (clockid == CLOCK_HWSPECIFIC || clock_gettime(clockid, &ts)) {
ksft_test_result_skip("%-31s\n", clockstring(clockid));
continue;
}
if (consistency_test(clockid, runtime)) {
ksft_test_result_fail("%-31s\n", clockstring(clockid));
ksft_exit_fail();
} else {
ksft_test_result_pass("%-31s\n", clockstring(clockid));
}
}
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/inconsistency-check.c |
/* Leap second stress test
* by: John Stultz ([email protected])
* (C) Copyright IBM 2012
* (C) Copyright 2013, 2015 Linaro Limited
* Licensed under the GPLv2
*
* This test signals the kernel to insert a leap second
* every day at midnight GMT. This allows for stressing the
* kernel's leap-second behavior, as well as how well applications
* handle the leap-second discontinuity.
*
* Usage: leap-a-day [-s] [-i <num>]
*
* Options:
* -s: Each iteration, set the date to 10 seconds before midnight GMT.
* This speeds up the number of leapsecond transitions tested,
* but because it calls settimeofday frequently, advancing the
* time by 24 hours every ~16 seconds, it may cause application
* disruption.
*
* -i: Number of iterations to run (default: infinite)
*
* Other notes: Disabling NTP prior to running this is advised, as the two
* may conflict in their commands to the kernel.
*
* To build:
* $ gcc leap-a-day.c -o leap-a-day -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/errno.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000ULL
#define CLOCK_TAI 11
time_t next_leap;
int error_found;
/* returns 1 if a <= b, 0 otherwise */
static inline int in_order(struct timespec a, struct timespec b)
{
if (a.tv_sec < b.tv_sec)
return 1;
if (a.tv_sec > b.tv_sec)
return 0;
if (a.tv_nsec > b.tv_nsec)
return 0;
return 1;
}
struct timespec timespec_add(struct timespec ts, unsigned long long ns)
{
ts.tv_nsec += ns;
while (ts.tv_nsec >= NSEC_PER_SEC) {
ts.tv_nsec -= NSEC_PER_SEC;
ts.tv_sec++;
}
return ts;
}
char *time_state_str(int state)
{
switch (state) {
case TIME_OK: return "TIME_OK";
case TIME_INS: return "TIME_INS";
case TIME_DEL: return "TIME_DEL";
case TIME_OOP: return "TIME_OOP";
case TIME_WAIT: return "TIME_WAIT";
case TIME_BAD: return "TIME_BAD";
}
return "ERROR";
}
/* clear NTP time_status & time_state */
int clear_time_state(void)
{
struct timex tx;
int ret;
/*
* We have to call adjtime twice here, as kernels
* prior to 6b1859dba01c7 (included in 3.5 and
* -stable), had an issue with the state machine
* and wouldn't clear the STA_INS/DEL flag directly.
*/
tx.modes = ADJ_STATUS;
tx.status = STA_PLL;
ret = adjtimex(&tx);
/* Clear maxerror, as it can cause UNSYNC to be set */
tx.modes = ADJ_MAXERROR;
tx.maxerror = 0;
ret = adjtimex(&tx);
/* Clear the status */
tx.modes = ADJ_STATUS;
tx.status = 0;
ret = adjtimex(&tx);
return ret;
}
/* Make sure we cleanup on ctrl-c */
void handler(int unused)
{
clear_time_state();
exit(0);
}
void sigalarm(int signo)
{
struct timex tx;
int ret;
tx.modes = 0;
ret = adjtimex(&tx);
if (tx.time.tv_sec < next_leap) {
printf("Error: Early timer expiration! (Should be %ld)\n", next_leap);
error_found = 1;
printf("adjtimex: %10ld sec + %6ld us (%i)\t%s\n",
tx.time.tv_sec,
tx.time.tv_usec,
tx.tai,
time_state_str(ret));
}
if (ret != TIME_WAIT) {
printf("Error: Timer seeing incorrect NTP state? (Should be TIME_WAIT)\n");
error_found = 1;
printf("adjtimex: %10ld sec + %6ld us (%i)\t%s\n",
tx.time.tv_sec,
tx.time.tv_usec,
tx.tai,
time_state_str(ret));
}
}
/* Test for known hrtimer failure */
void test_hrtimer_failure(void)
{
struct timespec now, target;
clock_gettime(CLOCK_REALTIME, &now);
target = timespec_add(now, NSEC_PER_SEC/2);
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &target, NULL);
clock_gettime(CLOCK_REALTIME, &now);
if (!in_order(target, now)) {
printf("ERROR: hrtimer early expiration failure observed.\n");
error_found = 1;
}
}
int main(int argc, char **argv)
{
timer_t tm1;
struct itimerspec its1;
struct sigevent se;
struct sigaction act;
int signum = SIGRTMAX;
int settime = 1;
int tai_time = 0;
int insert = 1;
int iterations = 10;
int opt;
/* Process arguments */
while ((opt = getopt(argc, argv, "sti:")) != -1) {
switch (opt) {
case 'w':
printf("Only setting leap-flag, not changing time. It could take up to a day for leap to trigger.\n");
settime = 0;
break;
case 'i':
iterations = atoi(optarg);
break;
case 't':
tai_time = 1;
break;
default:
printf("Usage: %s [-w] [-i <iterations>]\n", argv[0]);
printf(" -w: Set flag and wait for leap second each iteration");
printf(" (default sets time to right before leapsecond)\n");
printf(" -i: Number of iterations (-1 = infinite, default is 10)\n");
printf(" -t: Print TAI time\n");
exit(-1);
}
}
/* Make sure TAI support is present if -t was used */
if (tai_time) {
struct timespec ts;
if (clock_gettime(CLOCK_TAI, &ts)) {
printf("System doesn't support CLOCK_TAI\n");
ksft_exit_fail();
}
}
signal(SIGINT, handler);
signal(SIGKILL, handler);
/* Set up timer signal handler: */
sigfillset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = sigalarm;
sigaction(signum, &act, NULL);
if (iterations < 0)
printf("This runs continuously. Press ctrl-c to stop\n");
else
printf("Running for %i iterations. Press ctrl-c to stop\n", iterations);
printf("\n");
while (1) {
int ret;
struct timespec ts;
struct timex tx;
time_t now;
/* Get the current time */
clock_gettime(CLOCK_REALTIME, &ts);
/* Calculate the next possible leap second 23:59:60 GMT */
next_leap = ts.tv_sec;
next_leap += 86400 - (next_leap % 86400);
if (settime) {
struct timeval tv;
tv.tv_sec = next_leap - 10;
tv.tv_usec = 0;
settimeofday(&tv, NULL);
printf("Setting time to %s", ctime(&tv.tv_sec));
}
/* Reset NTP time state */
clear_time_state();
/* Set the leap second insert flag */
tx.modes = ADJ_STATUS;
if (insert)
tx.status = STA_INS;
else
tx.status = STA_DEL;
ret = adjtimex(&tx);
if (ret < 0) {
printf("Error: Problem setting STA_INS/STA_DEL!: %s\n",
time_state_str(ret));
return ksft_exit_fail();
}
/* Validate STA_INS was set */
tx.modes = 0;
ret = adjtimex(&tx);
if (tx.status != STA_INS && tx.status != STA_DEL) {
printf("Error: STA_INS/STA_DEL not set!: %s\n",
time_state_str(ret));
return ksft_exit_fail();
}
if (tai_time) {
printf("Using TAI time,"
" no inconsistencies should be seen!\n");
}
printf("Scheduling leap second for %s", ctime(&next_leap));
/* Set up timer */
printf("Setting timer for %ld - %s", next_leap, ctime(&next_leap));
memset(&se, 0, sizeof(se));
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = signum;
se.sigev_value.sival_int = 0;
if (timer_create(CLOCK_REALTIME, &se, &tm1) == -1) {
printf("Error: timer_create failed\n");
return ksft_exit_fail();
}
its1.it_value.tv_sec = next_leap;
its1.it_value.tv_nsec = 0;
its1.it_interval.tv_sec = 0;
its1.it_interval.tv_nsec = 0;
timer_settime(tm1, TIMER_ABSTIME, &its1, NULL);
/* Wake up 3 seconds before leap */
ts.tv_sec = next_leap - 3;
ts.tv_nsec = 0;
while (clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts, NULL))
printf("Something woke us up, returning to sleep\n");
/* Validate STA_INS is still set */
tx.modes = 0;
ret = adjtimex(&tx);
if (tx.status != STA_INS && tx.status != STA_DEL) {
printf("Something cleared STA_INS/STA_DEL, setting it again.\n");
tx.modes = ADJ_STATUS;
if (insert)
tx.status = STA_INS;
else
tx.status = STA_DEL;
ret = adjtimex(&tx);
}
/* Check adjtimex output every half second */
now = tx.time.tv_sec;
while (now < next_leap + 2) {
char buf[26];
struct timespec tai;
int ret;
tx.modes = 0;
ret = adjtimex(&tx);
if (tai_time) {
clock_gettime(CLOCK_TAI, &tai);
printf("%ld sec, %9ld ns\t%s\n",
tai.tv_sec,
tai.tv_nsec,
time_state_str(ret));
} else {
ctime_r(&tx.time.tv_sec, buf);
buf[strlen(buf)-1] = 0; /*remove trailing\n */
printf("%s + %6ld us (%i)\t%s\n",
buf,
tx.time.tv_usec,
tx.tai,
time_state_str(ret));
}
now = tx.time.tv_sec;
/* Sleep for another half second */
ts.tv_sec = 0;
ts.tv_nsec = NSEC_PER_SEC / 2;
clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
}
/* Switch to using other mode */
insert = !insert;
/* Note if kernel has known hrtimer failure */
test_hrtimer_failure();
printf("Leap complete\n");
if (error_found) {
printf("Errors observed\n");
clear_time_state();
return ksft_exit_fail();
}
printf("\n");
if ((iterations != -1) && !(--iterations))
break;
}
clear_time_state();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/leap-a-day.c |
/* ADJ_FREQ Skew change test
* by: john stultz ([email protected])
* (C) Copyright IBM 2012
* Licensed under the GPLv2
*
* NOTE: This is a meta-test which cranks the ADJ_FREQ knob and
* then uses other tests to detect problems. Thus this test requires
* that the raw_skew, inconsistency-check and nanosleep tests be
* present in the same directory it is run from.
*
* To build:
* $ gcc change_skew.c -o change_skew -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000LL
int change_skew_test(int ppm)
{
struct timex tx;
int ret;
tx.modes = ADJ_FREQUENCY;
tx.freq = ppm << 16;
ret = adjtimex(&tx);
if (ret < 0) {
printf("Error adjusting freq\n");
return ret;
}
ret = system("./raw_skew");
ret |= system("./inconsistency-check");
ret |= system("./nanosleep");
return ret;
}
int main(int argc, char **argv)
{
struct timex tx;
int i, ret;
int ppm[5] = {0, 250, 500, -250, -500};
/* Kill ntpd */
ret = system("killall -9 ntpd");
/* Make sure there's no offset adjustment going on */
tx.modes = ADJ_OFFSET;
tx.offset = 0;
ret = adjtimex(&tx);
if (ret < 0) {
printf("Maybe you're not running as root?\n");
return -1;
}
for (i = 0; i < 5; i++) {
printf("Using %i ppm adjustment\n", ppm[i]);
ret = change_skew_test(ppm[i]);
if (ret)
break;
}
/* Set things back */
tx.modes = ADJ_FREQUENCY;
tx.offset = 0;
adjtimex(&tx);
if (ret) {
printf("[FAIL]");
return ksft_exit_fail();
}
printf("[OK]");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/change_skew.c |
/* Make sure timers don't return early
* by: john stultz ([email protected])
* John Stultz ([email protected])
* (C) Copyright IBM 2012
* (C) Copyright Linaro 2013 2015
* Licensed under the GPLv2
*
* To build:
* $ gcc nanosleep.c -o nanosleep -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000ULL
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_HWSPECIFIC 10
#define CLOCK_TAI 11
#define NR_CLOCKIDS 12
#define UNSUPPORTED 0xf00f
char *clockstring(int clockid)
{
switch (clockid) {
case CLOCK_REALTIME:
return "CLOCK_REALTIME";
case CLOCK_MONOTONIC:
return "CLOCK_MONOTONIC";
case CLOCK_PROCESS_CPUTIME_ID:
return "CLOCK_PROCESS_CPUTIME_ID";
case CLOCK_THREAD_CPUTIME_ID:
return "CLOCK_THREAD_CPUTIME_ID";
case CLOCK_MONOTONIC_RAW:
return "CLOCK_MONOTONIC_RAW";
case CLOCK_REALTIME_COARSE:
return "CLOCK_REALTIME_COARSE";
case CLOCK_MONOTONIC_COARSE:
return "CLOCK_MONOTONIC_COARSE";
case CLOCK_BOOTTIME:
return "CLOCK_BOOTTIME";
case CLOCK_REALTIME_ALARM:
return "CLOCK_REALTIME_ALARM";
case CLOCK_BOOTTIME_ALARM:
return "CLOCK_BOOTTIME_ALARM";
case CLOCK_TAI:
return "CLOCK_TAI";
};
return "UNKNOWN_CLOCKID";
}
/* returns 1 if a <= b, 0 otherwise */
static inline int in_order(struct timespec a, struct timespec b)
{
if (a.tv_sec < b.tv_sec)
return 1;
if (a.tv_sec > b.tv_sec)
return 0;
if (a.tv_nsec > b.tv_nsec)
return 0;
return 1;
}
struct timespec timespec_add(struct timespec ts, unsigned long long ns)
{
ts.tv_nsec += ns;
while (ts.tv_nsec >= NSEC_PER_SEC) {
ts.tv_nsec -= NSEC_PER_SEC;
ts.tv_sec++;
}
return ts;
}
int nanosleep_test(int clockid, long long ns)
{
struct timespec now, target, rel;
/* First check abs time */
if (clock_gettime(clockid, &now))
return UNSUPPORTED;
target = timespec_add(now, ns);
if (clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL))
return UNSUPPORTED;
clock_gettime(clockid, &now);
if (!in_order(target, now))
return -1;
/* Second check reltime */
clock_gettime(clockid, &now);
rel.tv_sec = 0;
rel.tv_nsec = 0;
rel = timespec_add(rel, ns);
target = timespec_add(now, ns);
clock_nanosleep(clockid, 0, &rel, NULL);
clock_gettime(clockid, &now);
if (!in_order(target, now))
return -1;
return 0;
}
int main(int argc, char **argv)
{
long long length;
int clockid, ret;
ksft_print_header();
ksft_set_plan(NR_CLOCKIDS);
for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
/* Skip cputime clockids since nanosleep won't increment cputime */
if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
clockid == CLOCK_THREAD_CPUTIME_ID ||
clockid == CLOCK_HWSPECIFIC) {
ksft_test_result_skip("%-31s\n", clockstring(clockid));
continue;
}
fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
ret = nanosleep_test(clockid, length);
if (ret == UNSUPPORTED) {
ksft_test_result_skip("%-31s\n", clockstring(clockid));
goto next;
}
if (ret < 0) {
ksft_test_result_fail("%-31s\n", clockstring(clockid));
ksft_exit_fail();
}
length *= 100;
}
ksft_test_result_pass("%-31s\n", clockstring(clockid));
next:
ret = 0;
}
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/nanosleep.c |
/* threadtest.c
* by: john stultz ([email protected])
* (C) Copyright IBM 2004, 2005, 2006, 2012
* Licensed under the GPLv2
*
* To build:
* $ gcc threadtest.c -o threadtest -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
#include "../kselftest.h"
/* serializes shared list access */
pthread_mutex_t list_lock = PTHREAD_MUTEX_INITIALIZER;
/* serializes console output */
pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
#define MAX_THREADS 128
#define LISTSIZE 128
int done = 0;
struct timespec global_list[LISTSIZE];
int listcount = 0;
void checklist(struct timespec *list, int size)
{
int i, j;
struct timespec *a, *b;
/* scan the list */
for (i = 0; i < size-1; i++) {
a = &list[i];
b = &list[i+1];
/* look for any time inconsistencies */
if ((b->tv_sec <= a->tv_sec) &&
(b->tv_nsec < a->tv_nsec)) {
/* flag other threads */
done = 1;
/*serialize printing to avoid junky output*/
pthread_mutex_lock(&print_lock);
/* dump the list */
printf("\n");
for (j = 0; j < size; j++) {
if (j == i)
printf("---------------\n");
printf("%lu:%lu\n", list[j].tv_sec, list[j].tv_nsec);
if (j == i+1)
printf("---------------\n");
}
printf("[FAILED]\n");
pthread_mutex_unlock(&print_lock);
}
}
}
/* The shared thread shares a global list
* that each thread fills while holding the lock.
* This stresses clock synchronization across cpus.
*/
void *shared_thread(void *arg)
{
while (!done) {
/* protect the list */
pthread_mutex_lock(&list_lock);
/* see if we're ready to check the list */
if (listcount >= LISTSIZE) {
checklist(global_list, LISTSIZE);
listcount = 0;
}
clock_gettime(CLOCK_MONOTONIC, &global_list[listcount++]);
pthread_mutex_unlock(&list_lock);
}
return NULL;
}
/* Each independent thread fills in its own
* list. This stresses clock_gettime() lock contention.
*/
void *independent_thread(void *arg)
{
struct timespec my_list[LISTSIZE];
int count;
while (!done) {
/* fill the list */
for (count = 0; count < LISTSIZE; count++)
clock_gettime(CLOCK_MONOTONIC, &my_list[count]);
checklist(my_list, LISTSIZE);
}
return NULL;
}
#define DEFAULT_THREAD_COUNT 8
#define DEFAULT_RUNTIME 30
int main(int argc, char **argv)
{
int thread_count, i;
time_t start, now, runtime;
char buf[255];
pthread_t pth[MAX_THREADS];
int opt;
void *tret;
int ret = 0;
void *(*thread)(void *) = shared_thread;
thread_count = DEFAULT_THREAD_COUNT;
runtime = DEFAULT_RUNTIME;
/* Process arguments */
while ((opt = getopt(argc, argv, "t:n:i")) != -1) {
switch (opt) {
case 't':
runtime = atoi(optarg);
break;
case 'n':
thread_count = atoi(optarg);
break;
case 'i':
thread = independent_thread;
printf("using independent threads\n");
break;
default:
printf("Usage: %s [-t <secs>] [-n <numthreads>] [-i]\n", argv[0]);
printf(" -t: time to run\n");
printf(" -n: number of threads\n");
printf(" -i: use independent threads\n");
return -1;
}
}
if (thread_count > MAX_THREADS)
thread_count = MAX_THREADS;
setbuf(stdout, NULL);
start = time(0);
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
printf("%s\n", buf);
printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
fflush(stdout);
/* spawn */
for (i = 0; i < thread_count; i++)
pthread_create(&pth[i], 0, thread, 0);
while (time(&now) < start + runtime) {
sleep(1);
if (done) {
ret = 1;
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&now));
printf("%s\n", buf);
goto out;
}
}
printf("[OK]\n");
done = 1;
out:
/* wait */
for (i = 0; i < thread_count; i++)
pthread_join(pth[i], &tret);
/* die */
if (ret)
ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/threadtest.c |
/* Set tz value
* by: John Stultz <[email protected]>
* (C) Copyright Linaro 2016
* Licensed under the GPLv2
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include "../kselftest.h"
int set_tz(int min, int dst)
{
struct timezone tz;
tz.tz_minuteswest = min;
tz.tz_dsttime = dst;
return settimeofday(0, &tz);
}
int get_tz_min(void)
{
struct timezone tz;
struct timeval tv;
memset(&tz, 0, sizeof(tz));
gettimeofday(&tv, &tz);
return tz.tz_minuteswest;
}
int get_tz_dst(void)
{
struct timezone tz;
struct timeval tv;
memset(&tz, 0, sizeof(tz));
gettimeofday(&tv, &tz);
return tz.tz_dsttime;
}
int main(int argc, char **argv)
{
int i, ret;
int min, dst;
min = get_tz_min();
dst = get_tz_dst();
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
printf("Checking tz_minuteswest can be properly set: ");
fflush(stdout);
for (i = -15*60; i < 15*60; i += 30) {
ret = set_tz(i, dst);
ret = get_tz_min();
if (ret != i) {
printf("[FAILED] expected: %i got %i\n", i, ret);
goto err;
}
}
printf("[OK]\n");
printf("Checking invalid tz_minuteswest values are caught: ");
fflush(stdout);
if (!set_tz(-15*60-1, dst)) {
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
goto err;
}
if (!set_tz(15*60+1, dst)) {
printf("[FAILED] %i didn't return failure!\n", 15*60+1);
goto err;
}
if (!set_tz(-24*60, dst)) {
printf("[FAILED] %i didn't return failure!\n", -24*60);
goto err;
}
if (!set_tz(24*60, dst)) {
printf("[FAILED] %i didn't return failure!\n", 24*60);
goto err;
}
printf("[OK]\n");
set_tz(min, dst);
return ksft_exit_pass();
err:
set_tz(min, dst);
return ksft_exit_fail();
}
| linux-master | tools/testing/selftests/timers/set-tz.c |
/* Time bounds setting test
* by: john stultz ([email protected])
* (C) Copyright IBM 2012
* Licensed under the GPLv2
*
* NOTE: This is a meta-test which sets the time to edge cases then
* uses other tests to detect problems. Thus this test requires that
* the inconsistency-check and nanosleep tests be present in the same
* directory it is run from.
*
* To build:
* $ gcc set-2038.c -o set-2038 -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include "../kselftest.h"
#define NSEC_PER_SEC 1000000000LL
#define KTIME_MAX ((long long)~((unsigned long long)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
#define YEAR_1901 (-0x7fffffffL)
#define YEAR_1970 1
#define YEAR_2038 0x7fffffffL /*overflows 32bit time_t */
#define YEAR_2262 KTIME_SEC_MAX /*overflows 64bit ktime_t */
#define YEAR_MAX ((long long)((1ULL<<63)-1)) /*overflows 64bit time_t */
int is32bits(void)
{
return (sizeof(long) == 4);
}
int settime(long long time)
{
struct timeval now;
int ret;
now.tv_sec = (time_t)time;
now.tv_usec = 0;
ret = settimeofday(&now, NULL);
printf("Setting time to 0x%lx: %d\n", (long)time, ret);
return ret;
}
int do_tests(void)
{
int ret;
ret = system("date");
ret = system("./inconsistency-check -c 0 -t 20");
ret |= system("./nanosleep");
ret |= system("./nsleep-lat");
return ret;
}
int main(int argc, char *argv[])
{
int ret = 0;
int opt, dangerous = 0;
time_t start;
/* Process arguments */
while ((opt = getopt(argc, argv, "d")) != -1) {
switch (opt) {
case 'd':
dangerous = 1;
}
}
start = time(0);
/* First test that crazy values don't work */
if (!settime(YEAR_1901)) {
ret = -1;
goto out;
}
if (!settime(YEAR_MAX)) {
ret = -1;
goto out;
}
if (!is32bits() && !settime(YEAR_2262)) {
ret = -1;
goto out;
}
/* Now test behavior near edges */
settime(YEAR_1970);
ret = do_tests();
if (ret)
goto out;
settime(YEAR_2038 - 600);
ret = do_tests();
if (ret)
goto out;
/* The rest of the tests can blowup on 32bit systems */
if (is32bits() && !dangerous)
goto out;
/* Test rollover behavior 32bit edge */
settime(YEAR_2038 - 10);
ret = do_tests();
if (ret)
goto out;
settime(YEAR_2262 - 600);
ret = do_tests();
out:
/* restore clock */
settime(start);
if (ret)
return ksft_exit_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/timers/set-2038.c |
/* Clocksource change test
* by: john stultz ([email protected])
* (C) Copyright IBM 2012
* Licensed under the GPLv2
*
* NOTE: This is a meta-test which quickly changes the clocksource and
* then uses other tests to detect problems. Thus this test requires
* that the inconsistency-check and nanosleep tests be present in the
* same directory it is run from.
*
* To build:
* $ gcc clocksource-switch.c -o clocksource-switch -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include "../kselftest.h"
int get_clocksources(char list[][30])
{
int fd, i;
size_t size;
char buf[512];
char *head, *tmp;
fd = open("/sys/devices/system/clocksource/clocksource0/available_clocksource", O_RDONLY);
size = read(fd, buf, 512);
close(fd);
for (i = 0; i < 10; i++)
list[i][0] = '\0';
head = buf;
i = 0;
while (head - buf < size) {
/* Find the next space */
for (tmp = head; *tmp != ' '; tmp++) {
if (*tmp == '\n')
break;
if (*tmp == '\0')
break;
}
*tmp = '\0';
strcpy(list[i], head);
head = tmp + 1;
i++;
}
return i-1;
}
int get_cur_clocksource(char *buf, size_t size)
{
int fd;
fd = open("/sys/devices/system/clocksource/clocksource0/current_clocksource", O_RDONLY);
size = read(fd, buf, size);
return 0;
}
int change_clocksource(char *clocksource)
{
int fd;
ssize_t size;
fd = open("/sys/devices/system/clocksource/clocksource0/current_clocksource", O_WRONLY);
if (fd < 0)
return -1;
size = write(fd, clocksource, strlen(clocksource));
if (size < 0)
return -1;
close(fd);
return 0;
}
int run_tests(int secs)
{
int ret;
char buf[255];
sprintf(buf, "./inconsistency-check -t %i", secs);
ret = system(buf);
if (WIFEXITED(ret) && WEXITSTATUS(ret))
return WEXITSTATUS(ret);
ret = system("./nanosleep");
return WIFEXITED(ret) ? WEXITSTATUS(ret) : 0;
}
char clocksource_list[10][30];
int main(int argc, char **argv)
{
char orig_clk[512];
int count, i, status, opt;
int do_sanity_check = 1;
int runtime = 60;
pid_t pid;
/* Process arguments */
while ((opt = getopt(argc, argv, "st:")) != -1) {
switch (opt) {
case 's':
do_sanity_check = 0;
break;
case 't':
runtime = atoi(optarg);
break;
default:
printf("Usage: %s [-s] [-t <secs>]\n", argv[0]);
printf(" -s: skip sanity checks\n");
printf(" -t: Number of seconds to run\n");
exit(-1);
}
}
get_cur_clocksource(orig_clk, 512);
count = get_clocksources(clocksource_list);
if (change_clocksource(clocksource_list[0])) {
printf("Error: You probably need to run this as root\n");
return -1;
}
/* Check everything is sane before we start switching asynchronously */
if (do_sanity_check) {
for (i = 0; i < count; i++) {
printf("Validating clocksource %s\n",
clocksource_list[i]);
if (change_clocksource(clocksource_list[i])) {
status = -1;
goto out;
}
if (run_tests(5)) {
status = -1;
goto out;
}
}
}
printf("Running Asynchronous Switching Tests...\n");
pid = fork();
if (!pid)
return run_tests(runtime);
while (pid != waitpid(pid, &status, WNOHANG))
for (i = 0; i < count; i++)
if (change_clocksource(clocksource_list[i])) {
status = -1;
goto out;
}
out:
change_clocksource(orig_clk);
/* Print at the end to not mix output with child process */
ksft_print_header();
ksft_set_plan(1);
ksft_test_result(!status, "clocksource-switch\n");
ksft_exit(!status);
}
| linux-master | tools/testing/selftests/timers/clocksource-switch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock Periodic Interrupt test program
*
* Since commit 6610e0893b8bc ("RTC: Rework RTC code to use timerqueue for
* events"), PIE are completely handled using hrtimers, without actually using
* any underlying hardware RTC.
*
*/
#include <stdio.h>
#include <linux/rtc.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include "../kselftest.h"
/*
* This expects the new RTC class driver framework, working with
* clocks that will often not be clones of what the PC-AT had.
* Use the command line to specify another RTC if you need one.
*/
static const char default_rtc[] = "/dev/rtc0";
int main(int argc, char **argv)
{
int i, fd, retval, irqcount = 0;
unsigned long tmp, data, old_pie_rate;
const char *rtc = default_rtc;
struct timeval start, end, diff;
switch (argc) {
case 2:
rtc = argv[1];
break;
case 1:
fd = open(default_rtc, O_RDONLY);
if (fd == -1) {
printf("Default RTC %s does not exist. Test Skipped!\n", default_rtc);
exit(KSFT_SKIP);
}
close(fd);
break;
default:
fprintf(stderr, "usage: rtctest [rtcdev] [d]\n");
return 1;
}
fd = open(rtc, O_RDONLY);
if (fd == -1) {
perror(rtc);
exit(errno);
}
/* Read periodic IRQ rate */
retval = ioctl(fd, RTC_IRQP_READ, &old_pie_rate);
if (retval == -1) {
/* not all RTCs support periodic IRQs */
if (errno == EINVAL) {
fprintf(stderr, "\nNo periodic IRQ support\n");
goto done;
}
perror("RTC_IRQP_READ ioctl");
exit(errno);
}
fprintf(stderr, "\nPeriodic IRQ rate is %ldHz.\n", old_pie_rate);
fprintf(stderr, "Counting 20 interrupts at:");
fflush(stderr);
/* The frequencies 128Hz, 256Hz, ... 8192Hz are only allowed for root. */
for (tmp=2; tmp<=64; tmp*=2) {
retval = ioctl(fd, RTC_IRQP_SET, tmp);
if (retval == -1) {
/* not all RTCs can change their periodic IRQ rate */
if (errno == EINVAL) {
fprintf(stderr,
"\n...Periodic IRQ rate is fixed\n");
goto done;
}
perror("RTC_IRQP_SET ioctl");
exit(errno);
}
fprintf(stderr, "\n%ldHz:\t", tmp);
fflush(stderr);
/* Enable periodic interrupts */
retval = ioctl(fd, RTC_PIE_ON, 0);
if (retval == -1) {
perror("RTC_PIE_ON ioctl");
exit(errno);
}
for (i=1; i<21; i++) {
gettimeofday(&start, NULL);
/* This blocks */
retval = read(fd, &data, sizeof(unsigned long));
if (retval == -1) {
perror("read");
exit(errno);
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
if (diff.tv_sec > 0 ||
diff.tv_usec > ((1000000L / tmp) * 1.10)) {
fprintf(stderr, "\nPIE delta error: %ld.%06ld should be close to 0.%06ld\n",
diff.tv_sec, diff.tv_usec,
(1000000L / tmp));
fflush(stdout);
exit(-1);
}
fprintf(stderr, " %d",i);
fflush(stderr);
irqcount++;
}
/* Disable periodic interrupts */
retval = ioctl(fd, RTC_PIE_OFF, 0);
if (retval == -1) {
perror("RTC_PIE_OFF ioctl");
exit(errno);
}
}
done:
ioctl(fd, RTC_IRQP_SET, old_pie_rate);
fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n");
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/timers/rtcpie.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2020 Bernd Edlinger <[email protected]>
* All rights reserved.
*
* Check whether /proc/$pid/mem can be accessed without causing deadlocks
* when de_thread is blocked with ->cred_guard_mutex held.
*/
#include "../kselftest_harness.h"
#include <stdio.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <unistd.h>
#include <sys/ptrace.h>
static void *thread(void *arg)
{
ptrace(PTRACE_TRACEME, 0, 0L, 0L);
return NULL;
}
TEST(vmaccess)
{
int f, pid = fork();
char mm[64];
if (!pid) {
pthread_t pt;
pthread_create(&pt, NULL, thread, NULL);
pthread_join(pt, NULL);
execlp("true", "true", NULL);
}
sleep(1);
sprintf(mm, "/proc/%d/mem", pid);
f = open(mm, O_RDONLY);
ASSERT_GE(f, 0);
close(f);
f = kill(pid, SIGCONT);
ASSERT_EQ(f, 0);
}
TEST(attach)
{
int s, k, pid = fork();
if (!pid) {
pthread_t pt;
pthread_create(&pt, NULL, thread, NULL);
pthread_join(pt, NULL);
execlp("sleep", "sleep", "2", NULL);
}
sleep(1);
k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
ASSERT_EQ(errno, EAGAIN);
ASSERT_EQ(k, -1);
k = waitpid(-1, &s, WNOHANG);
ASSERT_NE(k, -1);
ASSERT_NE(k, 0);
ASSERT_NE(k, pid);
ASSERT_EQ(WIFEXITED(s), 1);
ASSERT_EQ(WEXITSTATUS(s), 0);
sleep(1);
k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
ASSERT_EQ(k, 0);
k = waitpid(-1, &s, 0);
ASSERT_EQ(k, pid);
ASSERT_EQ(WIFSTOPPED(s), 1);
ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
k = ptrace(PTRACE_DETACH, pid, 0L, 0L);
ASSERT_EQ(k, 0);
k = waitpid(-1, &s, 0);
ASSERT_EQ(k, pid);
ASSERT_EQ(WIFEXITED(s), 1);
ASSERT_EQ(WEXITSTATUS(s), 0);
k = waitpid(-1, NULL, 0);
ASSERT_EQ(k, -1);
ASSERT_EQ(errno, ECHILD);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/ptrace/vmaccess.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include "../kselftest_harness.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/prctl.h>
#include "linux/ptrace.h"
static int sys_ptrace(int request, pid_t pid, void *addr, void *data)
{
return syscall(SYS_ptrace, request, pid, addr, data);
}
TEST(get_set_sud)
{
struct ptrace_sud_config config;
pid_t child;
int ret = 0;
int status;
child = fork();
ASSERT_GE(child, 0);
if (child == 0) {
ASSERT_EQ(0, sys_ptrace(PTRACE_TRACEME, 0, 0, 0)) {
TH_LOG("PTRACE_TRACEME: %m");
}
kill(getpid(), SIGSTOP);
_exit(1);
}
waitpid(child, &status, 0);
memset(&config, 0xff, sizeof(config));
config.mode = PR_SYS_DISPATCH_ON;
ret = sys_ptrace(PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG, child,
(void *)sizeof(config), &config);
ASSERT_EQ(ret, 0);
ASSERT_EQ(config.mode, PR_SYS_DISPATCH_OFF);
ASSERT_EQ(config.selector, 0);
ASSERT_EQ(config.offset, 0);
ASSERT_EQ(config.len, 0);
config.mode = PR_SYS_DISPATCH_ON;
config.selector = 0;
config.offset = 0x400000;
config.len = 0x1000;
ret = sys_ptrace(PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG, child,
(void *)sizeof(config), &config);
ASSERT_EQ(ret, 0);
memset(&config, 1, sizeof(config));
ret = sys_ptrace(PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG, child,
(void *)sizeof(config), &config);
ASSERT_EQ(ret, 0);
ASSERT_EQ(config.mode, PR_SYS_DISPATCH_ON);
ASSERT_EQ(config.selector, 0);
ASSERT_EQ(config.offset, 0x400000);
ASSERT_EQ(config.len, 0x1000);
kill(child, SIGKILL);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/ptrace/get_set_sud.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#include <errno.h>
#include <linux/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/user.h>
#include <sys/mman.h>
#include "linux/ptrace.h"
static int sys_rt_sigqueueinfo(pid_t tgid, int sig, siginfo_t *uinfo)
{
return syscall(SYS_rt_sigqueueinfo, tgid, sig, uinfo);
}
static int sys_rt_tgsigqueueinfo(pid_t tgid, pid_t tid,
int sig, siginfo_t *uinfo)
{
return syscall(SYS_rt_tgsigqueueinfo, tgid, tid, sig, uinfo);
}
static int sys_ptrace(int request, pid_t pid, void *addr, void *data)
{
return syscall(SYS_ptrace, request, pid, addr, data);
}
#define SIGNR 10
#define TEST_SICODE_PRIV -1
#define TEST_SICODE_SHARE -2
#ifndef PAGE_SIZE
#define PAGE_SIZE sysconf(_SC_PAGESIZE)
#endif
#define err(fmt, ...) \
fprintf(stderr, \
"Error (%s:%d): " fmt, \
__FILE__, __LINE__, ##__VA_ARGS__)
static int check_error_paths(pid_t child)
{
struct ptrace_peeksiginfo_args arg;
int ret, exit_code = -1;
void *addr_rw, *addr_ro;
/*
* Allocate two contiguous pages. The first one is for read-write,
* another is for read-only.
*/
addr_rw = mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr_rw == MAP_FAILED) {
err("mmap() failed: %m\n");
return 1;
}
addr_ro = mmap(addr_rw + PAGE_SIZE, PAGE_SIZE, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (addr_ro == MAP_FAILED) {
err("mmap() failed: %m\n");
goto out;
}
arg.nr = SIGNR;
arg.off = 0;
/* Unsupported flags */
arg.flags = ~0;
ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_rw);
if (ret != -1 || errno != EINVAL) {
err("sys_ptrace() returns %d (expected -1),"
" errno %d (expected %d): %m\n",
ret, errno, EINVAL);
goto out;
}
arg.flags = 0;
/* A part of the buffer is read-only */
ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg,
addr_ro - sizeof(siginfo_t) * 2);
if (ret != 2) {
err("sys_ptrace() returns %d (expected 2): %m\n", ret);
goto out;
}
/* Read-only buffer */
ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_ro);
if (ret != -1 && errno != EFAULT) {
err("sys_ptrace() returns %d (expected -1),"
" errno %d (expected %d): %m\n",
ret, errno, EFAULT);
goto out;
}
exit_code = 0;
out:
munmap(addr_rw, 2 * PAGE_SIZE);
return exit_code;
}
int check_direct_path(pid_t child, int shared, int nr)
{
struct ptrace_peeksiginfo_args arg = {.flags = 0, .nr = nr, .off = 0};
int i, j, ret, exit_code = -1;
siginfo_t siginfo[SIGNR];
int si_code;
if (shared == 1) {
arg.flags = PTRACE_PEEKSIGINFO_SHARED;
si_code = TEST_SICODE_SHARE;
} else {
arg.flags = 0;
si_code = TEST_SICODE_PRIV;
}
for (i = 0; i < SIGNR; ) {
arg.off = i;
ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, siginfo);
if (ret == -1) {
err("ptrace() failed: %m\n");
goto out;
}
if (ret == 0)
break;
for (j = 0; j < ret; j++, i++) {
if (siginfo[j].si_code == si_code &&
siginfo[j].si_int == i)
continue;
err("%d: Wrong siginfo i=%d si_code=%d si_int=%d\n",
shared, i, siginfo[j].si_code, siginfo[j].si_int);
goto out;
}
}
if (i != SIGNR) {
err("Only %d signals were read\n", i);
goto out;
}
exit_code = 0;
out:
return exit_code;
}
int main(int argc, char *argv[])
{
siginfo_t siginfo;
int i, exit_code = 1;
sigset_t blockmask;
pid_t child;
sigemptyset(&blockmask);
sigaddset(&blockmask, SIGRTMIN);
sigprocmask(SIG_BLOCK, &blockmask, NULL);
child = fork();
if (child == -1) {
err("fork() failed: %m");
return 1;
} else if (child == 0) {
pid_t ppid = getppid();
while (1) {
if (ppid != getppid())
break;
sleep(1);
}
return 1;
}
/* Send signals in process-wide and per-thread queues */
for (i = 0; i < SIGNR; i++) {
siginfo.si_code = TEST_SICODE_SHARE;
siginfo.si_int = i;
sys_rt_sigqueueinfo(child, SIGRTMIN, &siginfo);
siginfo.si_code = TEST_SICODE_PRIV;
siginfo.si_int = i;
sys_rt_tgsigqueueinfo(child, child, SIGRTMIN, &siginfo);
}
if (sys_ptrace(PTRACE_ATTACH, child, NULL, NULL) == -1)
return 1;
waitpid(child, NULL, 0);
/* Dump signals one by one*/
if (check_direct_path(child, 0, 1))
goto out;
/* Dump all signals for one call */
if (check_direct_path(child, 0, SIGNR))
goto out;
/*
* Dump signal from the process-wide queue.
* The number of signals is not multible to the buffer size
*/
if (check_direct_path(child, 1, 3))
goto out;
if (check_error_paths(child))
goto out;
printf("PASS\n");
exit_code = 0;
out:
if (sys_ptrace(PTRACE_KILL, child, NULL, NULL) == -1)
return 1;
waitpid(child, NULL, 0);
return exit_code;
}
| linux-master | tools/testing/selftests/ptrace/peeksiginfo.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2018 Dmitry V. Levin <[email protected]>
* All rights reserved.
*
* Check whether PTRACE_GET_SYSCALL_INFO semantics implemented in the kernel
* matches userspace expectations.
*/
#include "../kselftest_harness.h"
#include <err.h>
#include <signal.h>
#include <asm/unistd.h>
#include "linux/ptrace.h"
static int
kill_tracee(pid_t pid)
{
if (!pid)
return 0;
int saved_errno = errno;
int rc = kill(pid, SIGKILL);
errno = saved_errno;
return rc;
}
static long
sys_ptrace(int request, pid_t pid, unsigned long addr, unsigned long data)
{
return syscall(__NR_ptrace, request, pid, addr, data);
}
#define LOG_KILL_TRACEE(fmt, ...) \
do { \
kill_tracee(pid); \
TH_LOG("wait #%d: " fmt, \
ptrace_stop, ##__VA_ARGS__); \
} while (0)
TEST(get_syscall_info)
{
static const unsigned long args[][7] = {
/* a sequence of architecture-agnostic syscalls */
{
__NR_chdir,
(unsigned long) "",
0xbad1fed1,
0xbad2fed2,
0xbad3fed3,
0xbad4fed4,
0xbad5fed5
},
{
__NR_gettid,
0xcaf0bea0,
0xcaf1bea1,
0xcaf2bea2,
0xcaf3bea3,
0xcaf4bea4,
0xcaf5bea5
},
{
__NR_exit_group,
0,
0xfac1c0d1,
0xfac2c0d2,
0xfac3c0d3,
0xfac4c0d4,
0xfac5c0d5
}
};
const unsigned long *exp_args;
pid_t pid = fork();
ASSERT_LE(0, pid) {
TH_LOG("fork: %m");
}
if (pid == 0) {
/* get the pid before PTRACE_TRACEME */
pid = getpid();
ASSERT_EQ(0, sys_ptrace(PTRACE_TRACEME, 0, 0, 0)) {
TH_LOG("PTRACE_TRACEME: %m");
}
ASSERT_EQ(0, kill(pid, SIGSTOP)) {
/* cannot happen */
TH_LOG("kill SIGSTOP: %m");
}
for (unsigned int i = 0; i < ARRAY_SIZE(args); ++i) {
syscall(args[i][0],
args[i][1], args[i][2], args[i][3],
args[i][4], args[i][5], args[i][6]);
}
/* unreachable */
_exit(1);
}
const struct {
unsigned int is_error;
int rval;
} *exp_param, exit_param[] = {
{ 1, -ENOENT }, /* chdir */
{ 0, pid } /* gettid */
};
unsigned int ptrace_stop;
for (ptrace_stop = 0; ; ++ptrace_stop) {
struct ptrace_syscall_info info = {
.op = 0xff /* invalid PTRACE_SYSCALL_INFO_* op */
};
const size_t size = sizeof(info);
const int expected_none_size =
(void *) &info.entry - (void *) &info;
const int expected_entry_size =
(void *) &info.entry.args[6] - (void *) &info;
const int expected_exit_size =
(void *) (&info.exit.is_error + 1) -
(void *) &info;
int status;
long rc;
ASSERT_EQ(pid, wait(&status)) {
/* cannot happen */
LOG_KILL_TRACEE("wait: %m");
}
if (WIFEXITED(status)) {
pid = 0; /* the tracee is no more */
ASSERT_EQ(0, WEXITSTATUS(status));
break;
}
ASSERT_FALSE(WIFSIGNALED(status)) {
pid = 0; /* the tracee is no more */
LOG_KILL_TRACEE("unexpected signal %u",
WTERMSIG(status));
}
ASSERT_TRUE(WIFSTOPPED(status)) {
/* cannot happen */
LOG_KILL_TRACEE("unexpected wait status %#x", status);
}
switch (WSTOPSIG(status)) {
case SIGSTOP:
ASSERT_EQ(0, ptrace_stop) {
LOG_KILL_TRACEE("unexpected signal stop");
}
ASSERT_EQ(0, sys_ptrace(PTRACE_SETOPTIONS, pid, 0,
PTRACE_O_TRACESYSGOOD)) {
LOG_KILL_TRACEE("PTRACE_SETOPTIONS: %m");
}
ASSERT_LT(0, (rc = sys_ptrace(PTRACE_GET_SYSCALL_INFO,
pid, size,
(unsigned long) &info))) {
LOG_KILL_TRACEE("PTRACE_GET_SYSCALL_INFO: %m");
}
ASSERT_EQ(expected_none_size, rc) {
LOG_KILL_TRACEE("signal stop mismatch");
}
ASSERT_EQ(PTRACE_SYSCALL_INFO_NONE, info.op) {
LOG_KILL_TRACEE("signal stop mismatch");
}
ASSERT_TRUE(info.arch) {
LOG_KILL_TRACEE("signal stop mismatch");
}
ASSERT_TRUE(info.instruction_pointer) {
LOG_KILL_TRACEE("signal stop mismatch");
}
ASSERT_TRUE(info.stack_pointer) {
LOG_KILL_TRACEE("signal stop mismatch");
}
break;
case SIGTRAP | 0x80:
ASSERT_LT(0, (rc = sys_ptrace(PTRACE_GET_SYSCALL_INFO,
pid, size,
(unsigned long) &info))) {
LOG_KILL_TRACEE("PTRACE_GET_SYSCALL_INFO: %m");
}
switch (ptrace_stop) {
case 1: /* entering chdir */
case 3: /* entering gettid */
case 5: /* entering exit_group */
exp_args = args[ptrace_stop / 2];
ASSERT_EQ(expected_entry_size, rc) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(PTRACE_SYSCALL_INFO_ENTRY, info.op) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_TRUE(info.arch) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_TRUE(info.instruction_pointer) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_TRUE(info.stack_pointer) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[0], info.entry.nr) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[1], info.entry.args[0]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[2], info.entry.args[1]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[3], info.entry.args[2]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[4], info.entry.args[3]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[5], info.entry.args[4]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
ASSERT_EQ(exp_args[6], info.entry.args[5]) {
LOG_KILL_TRACEE("entry stop mismatch");
}
break;
case 2: /* exiting chdir */
case 4: /* exiting gettid */
exp_param = &exit_param[ptrace_stop / 2 - 1];
ASSERT_EQ(expected_exit_size, rc) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_EQ(PTRACE_SYSCALL_INFO_EXIT, info.op) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_TRUE(info.arch) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_TRUE(info.instruction_pointer) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_TRUE(info.stack_pointer) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_EQ(exp_param->is_error,
info.exit.is_error) {
LOG_KILL_TRACEE("exit stop mismatch");
}
ASSERT_EQ(exp_param->rval, info.exit.rval) {
LOG_KILL_TRACEE("exit stop mismatch");
}
break;
default:
LOG_KILL_TRACEE("unexpected syscall stop");
abort();
}
break;
default:
LOG_KILL_TRACEE("unexpected stop signal %#x",
WSTOPSIG(status));
abort();
}
ASSERT_EQ(0, sys_ptrace(PTRACE_SYSCALL, pid, 0, 0)) {
LOG_KILL_TRACEE("PTRACE_SYSCALL: %m");
}
}
ASSERT_EQ(ARRAY_SIZE(args) * 2, ptrace_stop);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/ptrace/get_syscall_info.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <errno.h>
#include <pthread.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/vfs.h>
#include <sys/statvfs.h>
#include <sys/sysinfo.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <grp.h>
#include <stdbool.h>
#include <stdarg.h>
#include <linux/mount.h>
#include "../kselftest_harness.h"
#ifndef CLONE_NEWNS
#define CLONE_NEWNS 0x00020000
#endif
#ifndef CLONE_NEWUSER
#define CLONE_NEWUSER 0x10000000
#endif
#ifndef MS_REC
#define MS_REC 16384
#endif
#ifndef MS_RELATIME
#define MS_RELATIME (1 << 21)
#endif
#ifndef MS_STRICTATIME
#define MS_STRICTATIME (1 << 24)
#endif
#ifndef MOUNT_ATTR_RDONLY
#define MOUNT_ATTR_RDONLY 0x00000001
#endif
#ifndef MOUNT_ATTR_NOSUID
#define MOUNT_ATTR_NOSUID 0x00000002
#endif
#ifndef MOUNT_ATTR_NOEXEC
#define MOUNT_ATTR_NOEXEC 0x00000008
#endif
#ifndef MOUNT_ATTR_NODIRATIME
#define MOUNT_ATTR_NODIRATIME 0x00000080
#endif
#ifndef MOUNT_ATTR__ATIME
#define MOUNT_ATTR__ATIME 0x00000070
#endif
#ifndef MOUNT_ATTR_RELATIME
#define MOUNT_ATTR_RELATIME 0x00000000
#endif
#ifndef MOUNT_ATTR_NOATIME
#define MOUNT_ATTR_NOATIME 0x00000010
#endif
#ifndef MOUNT_ATTR_STRICTATIME
#define MOUNT_ATTR_STRICTATIME 0x00000020
#endif
#ifndef AT_RECURSIVE
#define AT_RECURSIVE 0x8000
#endif
#ifndef MS_SHARED
#define MS_SHARED (1 << 20)
#endif
#define DEFAULT_THREADS 4
#define ptr_to_int(p) ((int)((intptr_t)(p)))
#define int_to_ptr(u) ((void *)((intptr_t)(u)))
#ifndef __NR_mount_setattr
#if defined __alpha__
#define __NR_mount_setattr 552
#elif defined _MIPS_SIM
#if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
#define __NR_mount_setattr (442 + 4000)
#endif
#if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
#define __NR_mount_setattr (442 + 6000)
#endif
#if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
#define __NR_mount_setattr (442 + 5000)
#endif
#elif defined __ia64__
#define __NR_mount_setattr (442 + 1024)
#else
#define __NR_mount_setattr 442
#endif
#endif
#ifndef __NR_open_tree
#if defined __alpha__
#define __NR_open_tree 538
#elif defined _MIPS_SIM
#if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
#define __NR_open_tree 4428
#endif
#if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
#define __NR_open_tree 6428
#endif
#if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
#define __NR_open_tree 5428
#endif
#elif defined __ia64__
#define __NR_open_tree (428 + 1024)
#else
#define __NR_open_tree 428
#endif
#endif
#ifndef MOUNT_ATTR_IDMAP
#define MOUNT_ATTR_IDMAP 0x00100000
#endif
#ifndef MOUNT_ATTR_NOSYMFOLLOW
#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000
#endif
static inline int sys_mount_setattr(int dfd, const char *path, unsigned int flags,
struct mount_attr *attr, size_t size)
{
return syscall(__NR_mount_setattr, dfd, path, flags, attr, size);
}
#ifndef OPEN_TREE_CLONE
#define OPEN_TREE_CLONE 1
#endif
#ifndef OPEN_TREE_CLOEXEC
#define OPEN_TREE_CLOEXEC O_CLOEXEC
#endif
#ifndef AT_RECURSIVE
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
#endif
static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
{
return syscall(__NR_open_tree, dfd, filename, flags);
}
static ssize_t write_nointr(int fd, const void *buf, size_t count)
{
ssize_t ret;
do {
ret = write(fd, buf, count);
} while (ret < 0 && errno == EINTR);
return ret;
}
static int write_file(const char *path, const void *buf, size_t count)
{
int fd;
ssize_t ret;
fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
if (fd < 0)
return -1;
ret = write_nointr(fd, buf, count);
close(fd);
if (ret < 0 || (size_t)ret != count)
return -1;
return 0;
}
static int create_and_enter_userns(void)
{
uid_t uid;
gid_t gid;
char map[100];
uid = getuid();
gid = getgid();
if (unshare(CLONE_NEWUSER))
return -1;
if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
errno != ENOENT)
return -1;
snprintf(map, sizeof(map), "0 %d 1", uid);
if (write_file("/proc/self/uid_map", map, strlen(map)))
return -1;
snprintf(map, sizeof(map), "0 %d 1", gid);
if (write_file("/proc/self/gid_map", map, strlen(map)))
return -1;
if (setgid(0))
return -1;
if (setuid(0))
return -1;
return 0;
}
static int prepare_unpriv_mountns(void)
{
if (create_and_enter_userns())
return -1;
if (unshare(CLONE_NEWNS))
return -1;
if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
return -1;
return 0;
}
#ifndef ST_NOSYMFOLLOW
#define ST_NOSYMFOLLOW 0x2000 /* do not follow symlinks */
#endif
static int read_mnt_flags(const char *path)
{
int ret;
struct statvfs stat;
unsigned int mnt_flags;
ret = statvfs(path, &stat);
if (ret != 0)
return -EINVAL;
if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | ST_NOEXEC |
ST_NOATIME | ST_NODIRATIME | ST_RELATIME |
ST_SYNCHRONOUS | ST_MANDLOCK | ST_NOSYMFOLLOW))
return -EINVAL;
mnt_flags = 0;
if (stat.f_flag & ST_RDONLY)
mnt_flags |= MS_RDONLY;
if (stat.f_flag & ST_NOSUID)
mnt_flags |= MS_NOSUID;
if (stat.f_flag & ST_NODEV)
mnt_flags |= MS_NODEV;
if (stat.f_flag & ST_NOEXEC)
mnt_flags |= MS_NOEXEC;
if (stat.f_flag & ST_NOATIME)
mnt_flags |= MS_NOATIME;
if (stat.f_flag & ST_NODIRATIME)
mnt_flags |= MS_NODIRATIME;
if (stat.f_flag & ST_RELATIME)
mnt_flags |= MS_RELATIME;
if (stat.f_flag & ST_SYNCHRONOUS)
mnt_flags |= MS_SYNCHRONOUS;
if (stat.f_flag & ST_MANDLOCK)
mnt_flags |= ST_MANDLOCK;
if (stat.f_flag & ST_NOSYMFOLLOW)
mnt_flags |= ST_NOSYMFOLLOW;
return mnt_flags;
}
static char *get_field(char *src, int nfields)
{
int i;
char *p = src;
for (i = 0; i < nfields; i++) {
while (*p && *p != ' ' && *p != '\t')
p++;
if (!*p)
break;
p++;
}
return p;
}
static void null_endofword(char *word)
{
while (*word && *word != ' ' && *word != '\t')
word++;
*word = '\0';
}
static bool is_shared_mount(const char *path)
{
size_t len = 0;
char *line = NULL;
FILE *f = NULL;
f = fopen("/proc/self/mountinfo", "re");
if (!f)
return false;
while (getline(&line, &len, f) != -1) {
char *opts, *target;
target = get_field(line, 4);
if (!target)
continue;
opts = get_field(target, 2);
if (!opts)
continue;
null_endofword(target);
if (strcmp(target, path) != 0)
continue;
null_endofword(opts);
if (strstr(opts, "shared:"))
return true;
}
free(line);
fclose(f);
return false;
}
static void *mount_setattr_thread(void *data)
{
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID,
.attr_clr = 0,
.propagation = MS_SHARED,
};
if (sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)))
pthread_exit(int_to_ptr(-1));
pthread_exit(int_to_ptr(0));
}
/* Attempt to de-conflict with the selftests tree. */
#ifndef SKIP
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
static bool mount_setattr_supported(void)
{
int ret;
ret = sys_mount_setattr(-EBADF, "", AT_EMPTY_PATH, NULL, 0);
if (ret < 0 && errno == ENOSYS)
return false;
return true;
}
FIXTURE(mount_setattr) {
};
#define NOSYMFOLLOW_TARGET "/mnt/A/AA/data"
#define NOSYMFOLLOW_SYMLINK "/mnt/A/AA/symlink"
FIXTURE_SETUP(mount_setattr)
{
int fd = -EBADF;
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_EQ(prepare_unpriv_mountns(), 0);
(void)umount2("/mnt", MNT_DETACH);
(void)umount2("/tmp", MNT_DETACH);
ASSERT_EQ(mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/tmp/B", 0777), 0);
ASSERT_EQ(mount("testing", "/tmp/B", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
ASSERT_EQ(mount("testing", "/mnt/A", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/mnt/A/AA", 0777), 0);
ASSERT_EQ(mount("/tmp", "/mnt/A/AA", NULL, MS_BIND | MS_REC, NULL), 0);
ASSERT_EQ(mkdir("/mnt/B", 0777), 0);
ASSERT_EQ(mount("testing", "/mnt/B", "ramfs",
MS_NOATIME | MS_NODEV | MS_NOSUID, 0), 0);
ASSERT_EQ(mkdir("/mnt/B/BB", 0777), 0);
ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts",
MS_RELATIME | MS_NOEXEC | MS_RDONLY, 0), 0);
fd = creat(NOSYMFOLLOW_TARGET, O_RDWR | O_CLOEXEC);
ASSERT_GT(fd, 0);
ASSERT_EQ(symlink(NOSYMFOLLOW_TARGET, NOSYMFOLLOW_SYMLINK), 0);
ASSERT_EQ(close(fd), 0);
}
FIXTURE_TEARDOWN(mount_setattr)
{
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
(void)umount2("/mnt/A", MNT_DETACH);
(void)umount2("/tmp", MNT_DETACH);
}
TEST_F(mount_setattr, invalid_attributes)
{
struct mount_attr invalid_attr = {
.attr_set = (1U << 31),
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr)), 0);
invalid_attr.attr_set = 0;
invalid_attr.attr_clr = (1U << 31);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr)), 0);
invalid_attr.attr_clr = 0;
invalid_attr.propagation = (1U << 31);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr)), 0);
invalid_attr.attr_set = (1U << 31);
invalid_attr.attr_clr = (1U << 31);
invalid_attr.propagation = (1U << 31);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr)), 0);
ASSERT_NE(sys_mount_setattr(-1, "mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr)), 0);
}
TEST_F(mount_setattr, extensibility)
{
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
char *s = "dummy";
struct mount_attr invalid_attr = {};
struct mount_attr_large {
struct mount_attr attr1;
struct mount_attr attr2;
struct mount_attr attr3;
} large_attr = {};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, NULL,
sizeof(invalid_attr)), 0);
ASSERT_EQ(errno, EFAULT);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, (void *)s,
sizeof(invalid_attr)), 0);
ASSERT_EQ(errno, EINVAL);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr, 0), 0);
ASSERT_EQ(errno, EINVAL);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr) / 2), 0);
ASSERT_EQ(errno, EINVAL);
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
sizeof(invalid_attr) / 2), 0);
ASSERT_EQ(errno, EINVAL);
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
(void *)&large_attr, sizeof(large_attr)), 0);
large_attr.attr3.attr_set = MOUNT_ATTR_RDONLY;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
(void *)&large_attr, sizeof(large_attr)), 0);
large_attr.attr3.attr_set = 0;
large_attr.attr1.attr_set = MOUNT_ATTR_RDONLY;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
(void *)&large_attr, sizeof(large_attr)), 0);
expected_flags = old_flags;
expected_flags |= MS_RDONLY;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
}
TEST_F(mount_setattr, basic)
{
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
.attr_clr = MOUNT_ATTR__ATIME,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", 0, &attr, sizeof(attr)), 0);
expected_flags = old_flags;
expected_flags |= MS_RDONLY;
expected_flags |= MS_NOEXEC;
expected_flags &= ~MS_NOATIME;
expected_flags |= MS_RELATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, old_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, old_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, old_flags);
}
TEST_F(mount_setattr, basic_recursive)
{
int fd;
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
.attr_clr = MOUNT_ATTR__ATIME,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags = old_flags;
expected_flags |= MS_RDONLY;
expected_flags |= MS_NOEXEC;
expected_flags &= ~MS_NOATIME;
expected_flags |= MS_RELATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
attr.attr_clr = MOUNT_ATTR_RDONLY;
attr.propagation = MS_SHARED;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags &= ~MS_RDONLY;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A"), true);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
fd = open("/mnt/A/AA/B/b", O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0777);
ASSERT_GE(fd, 0);
/*
* We're holding a fd open for writing so this needs to fail somewhere
* in the middle and the mount options need to be unchanged.
*/
attr.attr_set = MOUNT_ATTR_RDONLY;
ASSERT_LT(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A"), true);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
EXPECT_EQ(close(fd), 0);
}
TEST_F(mount_setattr, mount_has_writers)
{
int fd, dfd;
unsigned int old_flags = 0, new_flags = 0;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
.attr_clr = MOUNT_ATTR__ATIME,
.propagation = MS_SHARED,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
fd = open("/mnt/A/AA/B/b", O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0777);
ASSERT_GE(fd, 0);
/*
* We're holding a fd open to a mount somwhere in the middle so this
* needs to fail somewhere in the middle. After this the mount options
* need to be unchanged.
*/
ASSERT_LT(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, old_flags);
ASSERT_EQ(is_shared_mount("/mnt/A"), false);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, old_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA"), false);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, old_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), false);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, old_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), false);
dfd = open("/mnt/A/AA/B", O_DIRECTORY | O_CLOEXEC);
ASSERT_GE(dfd, 0);
EXPECT_EQ(fsync(dfd), 0);
EXPECT_EQ(close(dfd), 0);
EXPECT_EQ(fsync(fd), 0);
EXPECT_EQ(close(fd), 0);
/* All writers are gone so this should succeed. */
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
}
TEST_F(mount_setattr, mixed_mount_options)
{
unsigned int old_flags1 = 0, old_flags2 = 0, new_flags = 0, expected_flags = 0;
struct mount_attr attr = {
.attr_clr = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME,
.attr_set = MOUNT_ATTR_RELATIME,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags1 = read_mnt_flags("/mnt/B");
ASSERT_GT(old_flags1, 0);
old_flags2 = read_mnt_flags("/mnt/B/BB");
ASSERT_GT(old_flags2, 0);
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/B", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags = old_flags2;
expected_flags &= ~(MS_RDONLY | MS_NOEXEC | MS_NOATIME | MS_NOSUID);
expected_flags |= MS_RELATIME;
new_flags = read_mnt_flags("/mnt/B");
ASSERT_EQ(new_flags, expected_flags);
expected_flags = old_flags2;
expected_flags &= ~(MS_RDONLY | MS_NOEXEC | MS_NOATIME | MS_NOSUID);
expected_flags |= MS_RELATIME;
new_flags = read_mnt_flags("/mnt/B/BB");
ASSERT_EQ(new_flags, expected_flags);
}
TEST_F(mount_setattr, time_changes)
{
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_NODIRATIME | MOUNT_ATTR_NOATIME,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
attr.attr_set = MOUNT_ATTR_STRICTATIME;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
attr.attr_set = MOUNT_ATTR_STRICTATIME | MOUNT_ATTR_NOATIME;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
attr.attr_set = MOUNT_ATTR_STRICTATIME | MOUNT_ATTR_NOATIME;
attr.attr_clr = MOUNT_ATTR__ATIME;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
attr.attr_set = 0;
attr.attr_clr = MOUNT_ATTR_STRICTATIME;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
attr.attr_clr = MOUNT_ATTR_NOATIME;
ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
attr.attr_set = MOUNT_ATTR_NODIRATIME | MOUNT_ATTR_NOATIME;
attr.attr_clr = MOUNT_ATTR__ATIME;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags = old_flags;
expected_flags |= MS_NOATIME;
expected_flags |= MS_NODIRATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
attr.attr_set &= ~MOUNT_ATTR_NOATIME;
attr.attr_set |= MOUNT_ATTR_RELATIME;
attr.attr_clr |= MOUNT_ATTR__ATIME;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags &= ~MS_NOATIME;
expected_flags |= MS_RELATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
attr.attr_set &= ~MOUNT_ATTR_RELATIME;
attr.attr_set |= MOUNT_ATTR_STRICTATIME;
attr.attr_clr |= MOUNT_ATTR__ATIME;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags &= ~MS_RELATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
attr.attr_set &= ~MOUNT_ATTR_STRICTATIME;
attr.attr_set |= MOUNT_ATTR_NOATIME;
attr.attr_clr |= MOUNT_ATTR__ATIME;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags |= MS_NOATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
memset(&attr, 0, sizeof(attr));
attr.attr_clr = MOUNT_ATTR_NODIRATIME;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags &= ~MS_NODIRATIME;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
}
TEST_F(mount_setattr, multi_threaded)
{
int i, j, nthreads, ret = 0;
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
pthread_attr_t pattr;
pthread_t threads[DEFAULT_THREADS];
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
/* Try to change mount options from multiple threads. */
nthreads = get_nprocs_conf();
if (nthreads > DEFAULT_THREADS)
nthreads = DEFAULT_THREADS;
pthread_attr_init(&pattr);
for (i = 0; i < nthreads; i++)
ASSERT_EQ(pthread_create(&threads[i], &pattr, mount_setattr_thread, NULL), 0);
for (j = 0; j < i; j++) {
void *retptr = NULL;
EXPECT_EQ(pthread_join(threads[j], &retptr), 0);
ret += ptr_to_int(retptr);
EXPECT_EQ(ret, 0);
}
pthread_attr_destroy(&pattr);
ASSERT_EQ(ret, 0);
expected_flags = old_flags;
expected_flags |= MS_RDONLY;
expected_flags |= MS_NOSUID;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A"), true);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
}
TEST_F(mount_setattr, wrong_user_namespace)
{
int ret;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
EXPECT_EQ(create_and_enter_userns(), 0);
ret = sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr));
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, EPERM);
}
TEST_F(mount_setattr, wrong_mount_namespace)
{
int fd, ret;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_RDONLY,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
fd = open("/mnt/A", O_DIRECTORY | O_CLOEXEC);
ASSERT_GE(fd, 0);
ASSERT_EQ(unshare(CLONE_NEWNS), 0);
ret = sys_mount_setattr(fd, "", AT_EMPTY_PATH | AT_RECURSIVE, &attr, sizeof(attr));
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, EINVAL);
}
FIXTURE(mount_setattr_idmapped) {
};
FIXTURE_SETUP(mount_setattr_idmapped)
{
int img_fd = -EBADF;
ASSERT_EQ(unshare(CLONE_NEWNS), 0);
ASSERT_EQ(mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0), 0);
(void)umount2("/mnt", MNT_DETACH);
(void)umount2("/tmp", MNT_DETACH);
ASSERT_EQ(mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/tmp/B", 0777), 0);
ASSERT_EQ(mknodat(-EBADF, "/tmp/B/b", S_IFREG | 0644, 0), 0);
ASSERT_EQ(chown("/tmp/B/b", 0, 0), 0);
ASSERT_EQ(mount("testing", "/tmp/B", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
ASSERT_EQ(mknodat(-EBADF, "/tmp/B/BB/b", S_IFREG | 0644, 0), 0);
ASSERT_EQ(chown("/tmp/B/BB/b", 0, 0), 0);
ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
ASSERT_EQ(mount("testing", "/mnt/A", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
ASSERT_EQ(mkdir("/mnt/A/AA", 0777), 0);
ASSERT_EQ(mount("/tmp", "/mnt/A/AA", NULL, MS_BIND | MS_REC, NULL), 0);
ASSERT_EQ(mkdir("/mnt/B", 0777), 0);
ASSERT_EQ(mount("testing", "/mnt/B", "ramfs",
MS_NOATIME | MS_NODEV | MS_NOSUID, 0), 0);
ASSERT_EQ(mkdir("/mnt/B/BB", 0777), 0);
ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts",
MS_RELATIME | MS_NOEXEC | MS_RDONLY, 0), 0);
ASSERT_EQ(mkdir("/mnt/C", 0777), 0);
ASSERT_EQ(mkdir("/mnt/D", 0777), 0);
img_fd = openat(-EBADF, "/mnt/C/ext4.img", O_CREAT | O_WRONLY, 0600);
ASSERT_GE(img_fd, 0);
ASSERT_EQ(ftruncate(img_fd, 1024 * 2048), 0);
ASSERT_EQ(system("mkfs.ext4 -q /mnt/C/ext4.img"), 0);
ASSERT_EQ(system("mount -o loop -t ext4 /mnt/C/ext4.img /mnt/D/"), 0);
ASSERT_EQ(close(img_fd), 0);
}
FIXTURE_TEARDOWN(mount_setattr_idmapped)
{
(void)umount2("/mnt/A", MNT_DETACH);
(void)umount2("/tmp", MNT_DETACH);
}
/**
* Validate that negative fd values are rejected.
*/
TEST_F(mount_setattr_idmapped, invalid_fd_negative)
{
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
.userns_fd = -EBADF,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
TH_LOG("failure: created idmapped mount with negative fd");
}
}
/**
* Validate that excessively large fd values are rejected.
*/
TEST_F(mount_setattr_idmapped, invalid_fd_large)
{
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
.userns_fd = INT64_MAX,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
TH_LOG("failure: created idmapped mount with too large fd value");
}
}
/**
* Validate that closed fd values are rejected.
*/
TEST_F(mount_setattr_idmapped, invalid_fd_closed)
{
int fd;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(fd, 0);
ASSERT_GE(close(fd), 0);
attr.userns_fd = fd;
ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
TH_LOG("failure: created idmapped mount with closed fd");
}
}
/**
* Validate that the initial user namespace is rejected.
*/
TEST_F(mount_setattr_idmapped, invalid_fd_initial_userns)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
ASSERT_GE(open_tree_fd, 0);
attr.userns_fd = open("/proc/1/ns/user", O_RDONLY | O_CLOEXEC);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(errno, EPERM);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
static int map_ids(pid_t pid, unsigned long nsid, unsigned long hostid,
unsigned long range)
{
char map[100], procfile[256];
snprintf(procfile, sizeof(procfile), "/proc/%d/uid_map", pid);
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
if (write_file(procfile, map, strlen(map)))
return -1;
snprintf(procfile, sizeof(procfile), "/proc/%d/gid_map", pid);
snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
if (write_file(procfile, map, strlen(map)))
return -1;
return 0;
}
#define __STACK_SIZE (8 * 1024 * 1024)
static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
{
void *stack;
stack = malloc(__STACK_SIZE);
if (!stack)
return -ENOMEM;
#ifdef __ia64__
return __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, NULL);
#else
return clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, NULL);
#endif
}
static int get_userns_fd_cb(void *data)
{
return kill(getpid(), SIGSTOP);
}
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
{
int ret;
pid_t pid;
char path[256];
pid = do_clone(get_userns_fd_cb, NULL, CLONE_NEWUSER);
if (pid < 0)
return -errno;
ret = map_ids(pid, nsid, hostid, range);
if (ret < 0)
return ret;
snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
ret = open(path, O_RDONLY | O_CLOEXEC);
kill(pid, SIGKILL);
wait_for_pid(pid);
return ret;
}
/**
* Validate that an attached mount in our mount namespace cannot be idmapped.
* (The kernel enforces that the mount's mount namespace and the caller's mount
* namespace match.)
*/
TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC);
ASSERT_GE(open_tree_fd, 0);
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
/**
* Validate that idmapping a mount is rejected if the mount's mount namespace
* and our mount namespace don't match.
* (The kernel enforces that the mount's mount namespace and the caller's mount
* namespace match.)
*/
TEST_F(mount_setattr_idmapped, attached_mount_outside_current_mount_namespace)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC);
ASSERT_GE(open_tree_fd, 0);
ASSERT_EQ(unshare(CLONE_NEWNS), 0);
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr,
sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
/**
* Validate that an attached mount in our mount namespace can be idmapped.
*/
TEST_F(mount_setattr_idmapped, detached_mount_inside_current_mount_namespace)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC |
OPEN_TREE_CLONE);
ASSERT_GE(open_tree_fd, 0);
/* Changing mount properties on a detached mount. */
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
/**
* Validate that a detached mount not in our mount namespace can be idmapped.
*/
TEST_F(mount_setattr_idmapped, detached_mount_outside_current_mount_namespace)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC |
OPEN_TREE_CLONE);
ASSERT_GE(open_tree_fd, 0);
ASSERT_EQ(unshare(CLONE_NEWNS), 0);
/* Changing mount properties on a detached mount. */
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
/**
* Validate that currently changing the idmapping of an idmapped mount fails.
*/
TEST_F(mount_setattr_idmapped, change_idmapping)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC |
OPEN_TREE_CLONE);
ASSERT_GE(open_tree_fd, 0);
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
/* Change idmapping on a detached mount that is already idmapped. */
attr.userns_fd = get_userns_fd(0, 20000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
static bool expected_uid_gid(int dfd, const char *path, int flags,
uid_t expected_uid, gid_t expected_gid)
{
int ret;
struct stat st;
ret = fstatat(dfd, path, &st, flags);
if (ret < 0)
return false;
return st.st_uid == expected_uid && st.st_gid == expected_gid;
}
TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
{
int open_tree_fd = -EBADF;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_IDMAP,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
open_tree_fd = sys_open_tree(-EBADF, "/mnt/A",
AT_RECURSIVE |
AT_EMPTY_PATH |
AT_NO_AUTOMOUNT |
AT_SYMLINK_NOFOLLOW |
OPEN_TREE_CLOEXEC |
OPEN_TREE_CLONE);
ASSERT_GE(open_tree_fd, 0);
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/b", 0, 0, 0), 0);
ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/BB/b", 0, 0, 0), 0);
}
TEST_F(mount_setattr, mount_attr_nosymfollow)
{
int fd;
unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
struct mount_attr attr = {
.attr_set = MOUNT_ATTR_NOSYMFOLLOW,
};
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
ASSERT_GT(fd, 0);
ASSERT_EQ(close(fd), 0);
old_flags = read_mnt_flags("/mnt/A");
ASSERT_GT(old_flags, 0);
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags = old_flags;
expected_flags |= ST_NOSYMFOLLOW;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
ASSERT_LT(fd, 0);
ASSERT_EQ(errno, ELOOP);
attr.attr_set &= ~MOUNT_ATTR_NOSYMFOLLOW;
attr.attr_clr |= MOUNT_ATTR_NOSYMFOLLOW;
ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
expected_flags &= ~ST_NOSYMFOLLOW;
new_flags = read_mnt_flags("/mnt/A");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B");
ASSERT_EQ(new_flags, expected_flags);
new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
ASSERT_EQ(new_flags, expected_flags);
fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
ASSERT_GT(fd, 0);
ASSERT_EQ(close(fd), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/mount_setattr/mount_setattr_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for perf events with SIGTRAP across all threads.
*
* Copyright (C) 2021, Google LLC.
*/
#define _GNU_SOURCE
/* We need the latest siginfo from the kernel repo. */
#include <sys/types.h>
#include <asm/siginfo.h>
#define __have_siginfo_t 1
#define __have_sigval_t 1
#define __have_sigevent_t 1
#define __siginfo_t_defined
#define __sigval_t_defined
#define __sigevent_t_defined
#define _BITS_SIGINFO_CONSTS_H 1
#define _BITS_SIGEVENT_CONSTS_H 1
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <pthread.h>
#include <signal.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#define NUM_THREADS 5
/* Data shared between test body, threads, and signal handler. */
static struct {
int tids_want_signal; /* Which threads still want a signal. */
int signal_count; /* Sanity check number of signals received. */
volatile int iterate_on; /* Variable to set breakpoint on. */
siginfo_t first_siginfo; /* First observed siginfo_t. */
} ctx;
/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
#define TEST_SIG_DATA(addr, id) (~(unsigned long)(addr) + id)
static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr,
unsigned long id)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(attr),
.sample_period = 1,
.disabled = !enabled,
.bp_addr = (unsigned long)addr,
.bp_type = HW_BREAKPOINT_RW,
.bp_len = HW_BREAKPOINT_LEN_1,
.inherit = 1, /* Children inherit events ... */
.inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */
.remove_on_exec = 1, /* Required by sigtrap. */
.sigtrap = 1, /* Request synchronous SIGTRAP on event. */
.sig_data = TEST_SIG_DATA(addr, id),
.exclude_kernel = 1, /* To allow */
.exclude_hv = 1, /* running as !root */
};
return attr;
}
static void sigtrap_handler(int signum, siginfo_t *info, void *ucontext)
{
if (info->si_code != TRAP_PERF) {
fprintf(stderr, "%s: unexpected si_code %d\n", __func__, info->si_code);
return;
}
/*
* The data in siginfo_t we're interested in should all be the same
* across threads.
*/
if (!__atomic_fetch_add(&ctx.signal_count, 1, __ATOMIC_RELAXED))
ctx.first_siginfo = *info;
__atomic_fetch_sub(&ctx.tids_want_signal, syscall(__NR_gettid), __ATOMIC_RELAXED);
}
static void *test_thread(void *arg)
{
pthread_barrier_t *barrier = (pthread_barrier_t *)arg;
pid_t tid = syscall(__NR_gettid);
int iter;
int i;
pthread_barrier_wait(barrier);
__atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
iter = ctx.iterate_on; /* read */
if (iter >= 0) {
for (i = 0; i < iter - 1; i++) {
__atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
ctx.iterate_on = iter; /* idempotent write */
}
} else {
while (ctx.iterate_on);
}
return NULL;
}
FIXTURE(sigtrap_threads)
{
struct sigaction oldact;
pthread_t threads[NUM_THREADS];
pthread_barrier_t barrier;
int fd;
};
FIXTURE_SETUP(sigtrap_threads)
{
struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on, 0);
struct sigaction action = {};
int i;
memset(&ctx, 0, sizeof(ctx));
/* Initialize sigtrap handler. */
action.sa_flags = SA_SIGINFO | SA_NODEFER;
action.sa_sigaction = sigtrap_handler;
sigemptyset(&action.sa_mask);
ASSERT_EQ(sigaction(SIGTRAP, &action, &self->oldact), 0);
/* Initialize perf event. */
self->fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
ASSERT_NE(self->fd, -1);
/* Spawn threads inheriting perf event. */
pthread_barrier_init(&self->barrier, NULL, NUM_THREADS + 1);
for (i = 0; i < NUM_THREADS; i++)
ASSERT_EQ(pthread_create(&self->threads[i], NULL, test_thread, &self->barrier), 0);
}
FIXTURE_TEARDOWN(sigtrap_threads)
{
pthread_barrier_destroy(&self->barrier);
close(self->fd);
sigaction(SIGTRAP, &self->oldact, NULL);
}
static void run_test_threads(struct __test_metadata *_metadata,
FIXTURE_DATA(sigtrap_threads) *self)
{
int i;
pthread_barrier_wait(&self->barrier);
for (i = 0; i < NUM_THREADS; i++)
ASSERT_EQ(pthread_join(self->threads[i], NULL), 0);
}
TEST_F(sigtrap_threads, remain_disabled)
{
run_test_threads(_metadata, self);
EXPECT_EQ(ctx.signal_count, 0);
EXPECT_NE(ctx.tids_want_signal, 0);
}
TEST_F(sigtrap_threads, enable_event)
{
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
run_test_threads(_metadata, self);
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
/* Check enabled for parent. */
ctx.iterate_on = 0;
EXPECT_EQ(ctx.signal_count, NUM_THREADS + 1);
}
/* Test that modification propagates to all inherited events. */
TEST_F(sigtrap_threads, modify_and_enable_event)
{
struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on, 42);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr), 0);
run_test_threads(_metadata, self);
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 42));
/* Check enabled for parent. */
ctx.iterate_on = 0;
EXPECT_EQ(ctx.signal_count, NUM_THREADS + 1);
}
/* Stress test event + signal handling. */
TEST_F(sigtrap_threads, signal_stress)
{
ctx.iterate_on = 3000;
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
run_test_threads(_metadata, self);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
}
TEST_F(sigtrap_threads, signal_stress_with_disable)
{
const int target_count = NUM_THREADS * 3000;
int i;
ctx.iterate_on = -1;
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
pthread_barrier_wait(&self->barrier);
while (__atomic_load_n(&ctx.signal_count, __ATOMIC_RELAXED) < target_count) {
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
}
ctx.iterate_on = 0;
for (i = 0; i < NUM_THREADS; i++)
ASSERT_EQ(pthread_join(self->threads[i], NULL), 0);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/perf_events/sigtrap_threads.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for remove_on_exec.
*
* Copyright (C) 2021, Google LLC.
*/
#define _GNU_SOURCE
/* We need the latest siginfo from the kernel repo. */
#include <sys/types.h>
#include <asm/siginfo.h>
#define __have_siginfo_t 1
#define __have_sigval_t 1
#define __have_sigevent_t 1
#define __siginfo_t_defined
#define __sigval_t_defined
#define __sigevent_t_defined
#define _BITS_SIGINFO_CONSTS_H 1
#define _BITS_SIGEVENT_CONSTS_H 1
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <linux/perf_event.h>
#include <pthread.h>
#include <signal.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "../kselftest_harness.h"
static volatile int signal_count;
static struct perf_event_attr make_event_attr(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.size = sizeof(attr),
.config = PERF_COUNT_HW_INSTRUCTIONS,
.sample_period = 1000,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 1,
.inherit = 1,
/*
* Children normally retain their inherited event on exec; with
* remove_on_exec, we'll remove their event, but the parent and
* any other non-exec'd children will keep their events.
*/
.remove_on_exec = 1,
.sigtrap = 1,
};
return attr;
}
static void sigtrap_handler(int signum, siginfo_t *info, void *ucontext)
{
if (info->si_code != TRAP_PERF) {
fprintf(stderr, "%s: unexpected si_code %d\n", __func__, info->si_code);
return;
}
signal_count++;
}
FIXTURE(remove_on_exec)
{
struct sigaction oldact;
int fd;
};
FIXTURE_SETUP(remove_on_exec)
{
struct perf_event_attr attr = make_event_attr();
struct sigaction action = {};
signal_count = 0;
/* Initialize sigtrap handler. */
action.sa_flags = SA_SIGINFO | SA_NODEFER;
action.sa_sigaction = sigtrap_handler;
sigemptyset(&action.sa_mask);
ASSERT_EQ(sigaction(SIGTRAP, &action, &self->oldact), 0);
/* Initialize perf event. */
self->fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
ASSERT_NE(self->fd, -1);
}
FIXTURE_TEARDOWN(remove_on_exec)
{
close(self->fd);
sigaction(SIGTRAP, &self->oldact, NULL);
}
/* Verify event propagates to fork'd child. */
TEST_F(remove_on_exec, fork_only)
{
int status;
pid_t pid = fork();
if (pid == 0) {
ASSERT_EQ(signal_count, 0);
ASSERT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
while (!signal_count);
_exit(42);
}
while (!signal_count); /* Child enables event. */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(WEXITSTATUS(status), 42);
}
/*
* Verify that event does _not_ propagate to fork+exec'd child; event enabled
* after fork+exec.
*/
TEST_F(remove_on_exec, fork_exec_then_enable)
{
pid_t pid_exec, pid_only_fork;
int pipefd[2];
int tmp;
/*
* Non-exec child, to ensure exec does not affect inherited events of
* other children.
*/
pid_only_fork = fork();
if (pid_only_fork == 0) {
/* Block until parent enables event. */
while (!signal_count);
_exit(42);
}
ASSERT_NE(pipe(pipefd), -1);
pid_exec = fork();
if (pid_exec == 0) {
ASSERT_NE(dup2(pipefd[1], STDOUT_FILENO), -1);
close(pipefd[0]);
execl("/proc/self/exe", "exec_child", NULL);
_exit((perror("exec failed"), 1));
}
close(pipefd[1]);
ASSERT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Child is running. */
/* Wait for exec'd child to start spinning. */
EXPECT_EQ(read(pipefd[0], &tmp, sizeof(int)), sizeof(int));
EXPECT_EQ(tmp, 42);
close(pipefd[0]);
/* Now we can enable the event, knowing the child is doing work. */
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
/* If the event propagated to the exec'd child, it will exit normally... */
usleep(100000); /* ... give time for event to trigger (in case of bug). */
EXPECT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Should still be running. */
EXPECT_EQ(kill(pid_exec, SIGKILL), 0);
/* Verify removal from child did not affect this task's event. */
tmp = signal_count;
while (signal_count == tmp); /* Should not hang! */
/* Nor should it have affected the first child. */
EXPECT_EQ(waitpid(pid_only_fork, &tmp, 0), pid_only_fork);
EXPECT_EQ(WEXITSTATUS(tmp), 42);
}
/*
* Verify that event does _not_ propagate to fork+exec'd child; event enabled
* before fork+exec.
*/
TEST_F(remove_on_exec, enable_then_fork_exec)
{
pid_t pid_exec;
int tmp;
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
pid_exec = fork();
if (pid_exec == 0) {
execl("/proc/self/exe", "exec_child", NULL);
_exit((perror("exec failed"), 1));
}
/*
* The child may exit abnormally at any time if the event propagated and
* a SIGTRAP is sent before the handler was set up.
*/
usleep(100000); /* ... give time for event to trigger (in case of bug). */
EXPECT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Should still be running. */
EXPECT_EQ(kill(pid_exec, SIGKILL), 0);
/* Verify removal from child did not affect this task's event. */
tmp = signal_count;
while (signal_count == tmp); /* Should not hang! */
}
TEST_F(remove_on_exec, exec_stress)
{
pid_t pids[30];
int i, tmp;
for (i = 0; i < sizeof(pids) / sizeof(pids[0]); i++) {
pids[i] = fork();
if (pids[i] == 0) {
execl("/proc/self/exe", "exec_child", NULL);
_exit((perror("exec failed"), 1));
}
/* Some forked with event disabled, rest with enabled. */
if (i > 10)
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
}
usleep(100000); /* ... give time for event to trigger (in case of bug). */
for (i = 0; i < sizeof(pids) / sizeof(pids[0]); i++) {
/* All children should still be running. */
EXPECT_EQ(waitpid(pids[i], &tmp, WNOHANG), 0);
EXPECT_EQ(kill(pids[i], SIGKILL), 0);
}
/* Verify event is still alive. */
tmp = signal_count;
while (signal_count == tmp);
}
/* For exec'd child. */
static void exec_child(void)
{
struct sigaction action = {};
const int val = 42;
/* Set up sigtrap handler in case we erroneously receive a trap. */
action.sa_flags = SA_SIGINFO | SA_NODEFER;
action.sa_sigaction = sigtrap_handler;
sigemptyset(&action.sa_mask);
if (sigaction(SIGTRAP, &action, NULL))
_exit((perror("sigaction failed"), 1));
/* Signal parent that we're starting to spin. */
if (write(STDOUT_FILENO, &val, sizeof(int)) == -1)
_exit((perror("write failed"), 1));
/* Should hang here until killed. */
while (!signal_count);
}
#define main test_main
TEST_HARNESS_MAIN
#undef main
int main(int argc, char *argv[])
{
if (!strcmp(argv[0], "exec_child")) {
exec_child();
return 1;
}
return test_main(argc, argv);
}
| linux-master | tools/testing/selftests/perf_events/remove_on_exec.c |
/*
* This application is Copyright 2012 Red Hat, Inc.
* Doug Ledford <[email protected]>
*
* mq_open_tests is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 3.
*
* mq_open_tests is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For the full text of the license, see <http://www.gnu.org/licenses/>.
*
* mq_open_tests.c
* Tests the various situations that should either succeed or fail to
* open a posix message queue and then reports whether or not they
* did as they were supposed to.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <limits.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <mqueue.h>
#include <error.h>
#include "../kselftest.h"
static char *usage =
"Usage:\n"
" %s path\n"
"\n"
" path Path name of the message queue to create\n"
"\n"
" Note: this program must be run as root in order to enable all tests\n"
"\n";
char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
int default_settings;
struct rlimit saved_limits, cur_limits;
int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
char *queue_path;
char *default_queue_path = "/test1";
mqd_t queue = -1;
static inline void __set(FILE *stream, int value, char *err_msg);
void shutdown(int exit_val, char *err_cause, int line_no);
static inline int get(FILE *stream);
static inline void set(FILE *stream, int value);
static inline void getr(int type, struct rlimit *rlim);
static inline void setr(int type, struct rlimit *rlim);
void validate_current_settings();
static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
static inline void __set(FILE *stream, int value, char *err_msg)
{
rewind(stream);
if (fprintf(stream, "%d", value) < 0)
perror(err_msg);
}
void shutdown(int exit_val, char *err_cause, int line_no)
{
static int in_shutdown = 0;
/* In case we get called recursively by a set() call below */
if (in_shutdown++)
return;
if (seteuid(0) == -1)
perror("seteuid() failed");
if (queue != -1)
if (mq_close(queue))
perror("mq_close() during shutdown");
if (queue_path)
/*
* Be silent if this fails, if we cleaned up already it's
* expected to fail
*/
mq_unlink(queue_path);
if (default_settings) {
if (saved_def_msgs)
__set(def_msgs, saved_def_msgs,
"failed to restore saved_def_msgs");
if (saved_def_msgsize)
__set(def_msgsize, saved_def_msgsize,
"failed to restore saved_def_msgsize");
}
if (saved_max_msgs)
__set(max_msgs, saved_max_msgs,
"failed to restore saved_max_msgs");
if (saved_max_msgsize)
__set(max_msgsize, saved_max_msgsize,
"failed to restore saved_max_msgsize");
if (exit_val)
error(exit_val, errno, "%s at %d", err_cause, line_no);
exit(0);
}
static inline int get(FILE *stream)
{
int value;
rewind(stream);
if (fscanf(stream, "%d", &value) != 1)
shutdown(4, "Error reading /proc entry", __LINE__ - 1);
return value;
}
static inline void set(FILE *stream, int value)
{
int new_value;
rewind(stream);
if (fprintf(stream, "%d", value) < 0)
return shutdown(5, "Failed writing to /proc file",
__LINE__ - 1);
new_value = get(stream);
if (new_value != value)
return shutdown(5, "We didn't get what we wrote to /proc back",
__LINE__ - 1);
}
static inline void getr(int type, struct rlimit *rlim)
{
if (getrlimit(type, rlim))
shutdown(6, "getrlimit()", __LINE__ - 1);
}
static inline void setr(int type, struct rlimit *rlim)
{
if (setrlimit(type, rlim))
shutdown(7, "setrlimit()", __LINE__ - 1);
}
void validate_current_settings()
{
int rlim_needed;
if (cur_limits.rlim_cur < 4096) {
printf("Current rlimit value for POSIX message queue bytes is "
"unreasonably low,\nincreasing.\n\n");
cur_limits.rlim_cur = 8192;
cur_limits.rlim_max = 16384;
setr(RLIMIT_MSGQUEUE, &cur_limits);
}
if (default_settings) {
rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
2 * sizeof(void *));
if (rlim_needed > cur_limits.rlim_cur) {
printf("Temporarily lowering default queue parameters "
"to something that will work\n"
"with the current rlimit values.\n\n");
set(def_msgs, 10);
cur_def_msgs = 10;
set(def_msgsize, 128);
cur_def_msgsize = 128;
}
} else {
rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
2 * sizeof(void *));
if (rlim_needed > cur_limits.rlim_cur) {
printf("Temporarily lowering maximum queue parameters "
"to something that will work\n"
"with the current rlimit values in case this is "
"a kernel that ties the default\n"
"queue parameters to the maximum queue "
"parameters.\n\n");
set(max_msgs, 10);
cur_max_msgs = 10;
set(max_msgsize, 128);
cur_max_msgsize = 128;
}
}
}
/*
* test_queue - Test opening a queue, shutdown if we fail. This should
* only be called in situations that should never fail. We clean up
* after ourselves and return the queue attributes in *result.
*/
static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
{
int flags = O_RDWR | O_EXCL | O_CREAT;
int perms = DEFFILEMODE;
if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
shutdown(1, "mq_open()", __LINE__);
if (mq_getattr(queue, result))
shutdown(1, "mq_getattr()", __LINE__);
if (mq_close(queue))
shutdown(1, "mq_close()", __LINE__);
queue = -1;
if (mq_unlink(queue_path))
shutdown(1, "mq_unlink()", __LINE__);
}
/*
* Same as test_queue above, but failure is not fatal.
* Returns:
* 0 - Failed to create a queue
* 1 - Created a queue, attributes in *result
*/
static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
{
int flags = O_RDWR | O_EXCL | O_CREAT;
int perms = DEFFILEMODE;
if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
return 0;
if (mq_getattr(queue, result))
shutdown(1, "mq_getattr()", __LINE__);
if (mq_close(queue))
shutdown(1, "mq_close()", __LINE__);
queue = -1;
if (mq_unlink(queue_path))
shutdown(1, "mq_unlink()", __LINE__);
return 1;
}
int main(int argc, char *argv[])
{
struct mq_attr attr, result;
if (argc != 2) {
printf("Using Default queue path - %s\n", default_queue_path);
queue_path = default_queue_path;
} else {
/*
* Although we can create a msg queue with a non-absolute path name,
* unlink will fail. So, if the name doesn't start with a /, add one
* when we save it.
*/
if (*argv[1] == '/')
queue_path = strdup(argv[1]);
else {
queue_path = malloc(strlen(argv[1]) + 2);
if (!queue_path) {
perror("malloc()");
exit(1);
}
queue_path[0] = '/';
queue_path[1] = 0;
strcat(queue_path, argv[1]);
}
}
if (getuid() != 0)
ksft_exit_skip("Not running as root, but almost all tests "
"require root in order to modify\nsystem settings. "
"Exiting.\n");
/* Find out what files there are for us to make tweaks in */
def_msgs = fopen(DEF_MSGS, "r+");
def_msgsize = fopen(DEF_MSGSIZE, "r+");
max_msgs = fopen(MAX_MSGS, "r+");
max_msgsize = fopen(MAX_MSGSIZE, "r+");
if (!max_msgs)
shutdown(2, "Failed to open msg_max", __LINE__);
if (!max_msgsize)
shutdown(2, "Failed to open msgsize_max", __LINE__);
if (def_msgs || def_msgsize)
default_settings = 1;
/* Load up the current system values for everything we can */
getr(RLIMIT_MSGQUEUE, &saved_limits);
cur_limits = saved_limits;
if (default_settings) {
saved_def_msgs = cur_def_msgs = get(def_msgs);
saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
}
saved_max_msgs = cur_max_msgs = get(max_msgs);
saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
/* Tell the user our initial state */
printf("\nInitial system state:\n");
printf("\tUsing queue path:\t\t%s\n", queue_path);
printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n",
(long) saved_limits.rlim_cur);
printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n",
(long) saved_limits.rlim_max);
printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
if (default_settings) {
printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
} else {
printf("\tDefault Message Size:\t\tNot Supported\n");
printf("\tDefault Queue Size:\t\tNot Supported\n");
}
printf("\n");
validate_current_settings();
printf("Adjusted system state for testing:\n");
printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n", (long) cur_limits.rlim_cur);
printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n", (long) cur_limits.rlim_max);
printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
if (default_settings) {
printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
}
printf("\n\nTest series 1, behavior when no attr struct "
"passed to mq_open:\n");
if (!default_settings) {
test_queue(NULL, &result);
printf("Given sane system settings, mq_open without an attr "
"struct succeeds:\tPASS\n");
if (result.mq_maxmsg != cur_max_msgs ||
result.mq_msgsize != cur_max_msgsize) {
printf("Kernel does not support setting the default "
"mq attributes,\nbut also doesn't tie the "
"defaults to the maximums:\t\t\tPASS\n");
} else {
set(max_msgs, ++cur_max_msgs);
set(max_msgsize, ++cur_max_msgsize);
test_queue(NULL, &result);
if (result.mq_maxmsg == cur_max_msgs &&
result.mq_msgsize == cur_max_msgsize)
printf("Kernel does not support setting the "
"default mq attributes and\n"
"also ties system wide defaults to "
"the system wide maximums:\t\t"
"FAIL\n");
else
printf("Kernel does not support setting the "
"default mq attributes,\n"
"but also doesn't tie the defaults to "
"the maximums:\t\t\tPASS\n");
}
} else {
printf("Kernel supports setting defaults separately from "
"maximums:\t\tPASS\n");
/*
* While we are here, go ahead and test that the kernel
* properly follows the default settings
*/
test_queue(NULL, &result);
printf("Given sane values, mq_open without an attr struct "
"succeeds:\t\tPASS\n");
if (result.mq_maxmsg != cur_def_msgs ||
result.mq_msgsize != cur_def_msgsize)
printf("Kernel supports setting defaults, but does "
"not actually honor them:\tFAIL\n\n");
else {
set(def_msgs, ++cur_def_msgs);
set(def_msgsize, ++cur_def_msgsize);
/* In case max was the same as the default */
set(max_msgs, ++cur_max_msgs);
set(max_msgsize, ++cur_max_msgsize);
test_queue(NULL, &result);
if (result.mq_maxmsg != cur_def_msgs ||
result.mq_msgsize != cur_def_msgsize)
printf("Kernel supports setting defaults, but "
"does not actually honor them:\t"
"FAIL\n");
else
printf("Kernel properly honors default setting "
"knobs:\t\t\t\tPASS\n");
}
set(def_msgs, cur_max_msgs + 1);
cur_def_msgs = cur_max_msgs + 1;
set(def_msgsize, cur_max_msgsize + 1);
cur_def_msgsize = cur_max_msgsize + 1;
if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
cur_limits.rlim_cur) {
cur_limits.rlim_cur = (cur_def_msgs + 2) *
(cur_def_msgsize + 2 * sizeof(void *));
cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
setr(RLIMIT_MSGQUEUE, &cur_limits);
}
if (test_queue_fail(NULL, &result)) {
if (result.mq_maxmsg == cur_max_msgs &&
result.mq_msgsize == cur_max_msgsize)
printf("Kernel properly limits default values "
"to lesser of default/max:\t\tPASS\n");
else
printf("Kernel does not properly set default "
"queue parameters when\ndefaults > "
"max:\t\t\t\t\t\t\t\tFAIL\n");
} else
printf("Kernel fails to open mq because defaults are "
"greater than maximums:\tFAIL\n");
set(def_msgs, --cur_def_msgs);
set(def_msgsize, --cur_def_msgsize);
cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
cur_def_msgsize;
setr(RLIMIT_MSGQUEUE, &cur_limits);
if (test_queue_fail(NULL, &result))
printf("Kernel creates queue even though defaults "
"would exceed\nrlimit setting:"
"\t\t\t\t\t\t\t\tFAIL\n");
else
printf("Kernel properly fails to create queue when "
"defaults would\nexceed rlimit:"
"\t\t\t\t\t\t\t\tPASS\n");
}
/*
* Test #2 - open with an attr struct that exceeds rlimit
*/
printf("\n\nTest series 2, behavior when attr struct is "
"passed to mq_open:\n");
cur_max_msgs = 32;
cur_max_msgsize = cur_limits.rlim_max >> 4;
set(max_msgs, cur_max_msgs);
set(max_msgsize, cur_max_msgsize);
attr.mq_maxmsg = cur_max_msgs;
attr.mq_msgsize = cur_max_msgsize;
if (test_queue_fail(&attr, &result))
printf("Queue open in excess of rlimit max when euid = 0 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open in excess of rlimit max when euid = 0 "
"failed:\t\tPASS\n");
attr.mq_maxmsg = cur_max_msgs + 1;
attr.mq_msgsize = 10;
if (test_queue_fail(&attr, &result))
printf("Queue open with mq_maxmsg > limit when euid = 0 "
"succeeded:\t\tPASS\n");
else
printf("Queue open with mq_maxmsg > limit when euid = 0 "
"failed:\t\tFAIL\n");
attr.mq_maxmsg = 1;
attr.mq_msgsize = cur_max_msgsize + 1;
if (test_queue_fail(&attr, &result))
printf("Queue open with mq_msgsize > limit when euid = 0 "
"succeeded:\t\tPASS\n");
else
printf("Queue open with mq_msgsize > limit when euid = 0 "
"failed:\t\tFAIL\n");
attr.mq_maxmsg = 65536;
attr.mq_msgsize = 65536;
if (test_queue_fail(&attr, &result))
printf("Queue open with total size > 2GB when euid = 0 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open with total size > 2GB when euid = 0 "
"failed:\t\t\tPASS\n");
if (seteuid(99) == -1) {
perror("seteuid() failed");
exit(1);
}
attr.mq_maxmsg = cur_max_msgs;
attr.mq_msgsize = cur_max_msgsize;
if (test_queue_fail(&attr, &result))
printf("Queue open in excess of rlimit max when euid = 99 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open in excess of rlimit max when euid = 99 "
"failed:\t\tPASS\n");
attr.mq_maxmsg = cur_max_msgs + 1;
attr.mq_msgsize = 10;
if (test_queue_fail(&attr, &result))
printf("Queue open with mq_maxmsg > limit when euid = 99 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open with mq_maxmsg > limit when euid = 99 "
"failed:\t\tPASS\n");
attr.mq_maxmsg = 1;
attr.mq_msgsize = cur_max_msgsize + 1;
if (test_queue_fail(&attr, &result))
printf("Queue open with mq_msgsize > limit when euid = 99 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open with mq_msgsize > limit when euid = 99 "
"failed:\t\tPASS\n");
attr.mq_maxmsg = 65536;
attr.mq_msgsize = 65536;
if (test_queue_fail(&attr, &result))
printf("Queue open with total size > 2GB when euid = 99 "
"succeeded:\t\tFAIL\n");
else
printf("Queue open with total size > 2GB when euid = 99 "
"failed:\t\t\tPASS\n");
shutdown(0,"",0);
}
| linux-master | tools/testing/selftests/mqueue/mq_open_tests.c |
/*
* This application is Copyright 2012 Red Hat, Inc.
* Doug Ledford <[email protected]>
*
* mq_perf_tests is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 3.
*
* mq_perf_tests is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For the full text of the license, see <http://www.gnu.org/licenses/>.
*
* mq_perf_tests.c
* Tests various types of message queue workloads, concentrating on those
* situations that invole large message sizes, large message queue depths,
* or both, and reports back useful metrics about kernel message queue
* performance.
*
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <limits.h>
#include <errno.h>
#include <signal.h>
#include <pthread.h>
#include <sched.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <mqueue.h>
#include <popt.h>
#include <error.h>
#include "../kselftest.h"
static char *usage =
"Usage:\n"
" %s [-c #[,#..] -f] path\n"
"\n"
" -c # Skip most tests and go straight to a high queue depth test\n"
" and then run that test continuously (useful for running at\n"
" the same time as some other workload to see how much the\n"
" cache thrashing caused by adding messages to a very deep\n"
" queue impacts the performance of other programs). The number\n"
" indicates which CPU core we should bind the process to during\n"
" the run. If you have more than one physical CPU, then you\n"
" will need one copy per physical CPU package, and you should\n"
" specify the CPU cores to pin ourself to via a comma separated\n"
" list of CPU values.\n"
" -f Only usable with continuous mode. Pin ourself to the CPUs\n"
" as requested, then instead of looping doing a high mq\n"
" workload, just busy loop. This will allow us to lock up a\n"
" single CPU just like we normally would, but without actually\n"
" thrashing the CPU cache. This is to make it easier to get\n"
" comparable numbers from some other workload running on the\n"
" other CPUs. One set of numbers with # CPUs locked up running\n"
" an mq workload, and another set of numbers with those same\n"
" CPUs locked away from the test workload, but not doing\n"
" anything to trash the cache like the mq workload might.\n"
" path Path name of the message queue to create\n"
"\n"
" Note: this program must be run as root in order to enable all tests\n"
"\n";
char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
#define MAX_CPUS 64
char *cpu_option_string;
int cpus_to_pin[MAX_CPUS];
int num_cpus_to_pin;
pthread_t cpu_threads[MAX_CPUS];
pthread_t main_thread;
cpu_set_t *cpu_set;
int cpu_set_size;
int cpus_online;
#define MSG_SIZE 16
#define TEST1_LOOPS 10000000
#define TEST2_LOOPS 100000
int continuous_mode;
int continuous_mode_fake;
struct rlimit saved_limits, cur_limits;
int saved_max_msgs, saved_max_msgsize;
int cur_max_msgs, cur_max_msgsize;
FILE *max_msgs, *max_msgsize;
int cur_nice;
char *queue_path = "/mq_perf_tests";
mqd_t queue = -1;
struct mq_attr result;
int mq_prio_max;
const struct poptOption options[] = {
{
.longName = "continuous",
.shortName = 'c',
.argInfo = POPT_ARG_STRING,
.arg = &cpu_option_string,
.val = 'c',
.descrip = "Run continuous tests at a high queue depth in "
"order to test the effects of cache thrashing on "
"other tasks on the system. This test is intended "
"to be run on one core of each physical CPU while "
"some other CPU intensive task is run on all the other "
"cores of that same physical CPU and the other task "
"is timed. It is assumed that the process of adding "
"messages to the message queue in a tight loop will "
"impact that other task to some degree. Once the "
"tests are performed in this way, you should then "
"re-run the tests using fake mode in order to check "
"the difference in time required to perform the CPU "
"intensive task",
.argDescrip = "cpu[,cpu]",
},
{
.longName = "fake",
.shortName = 'f',
.argInfo = POPT_ARG_NONE,
.arg = &continuous_mode_fake,
.val = 0,
.descrip = "Tie up the CPUs that we would normally tie up in"
"continuous mode, but don't actually do any mq stuff, "
"just keep the CPU busy so it can't be used to process "
"system level tasks as this would free up resources on "
"the other CPU cores and skew the comparison between "
"the no-mqueue work and mqueue work tests",
.argDescrip = NULL,
},
{
.longName = "path",
.shortName = 'p',
.argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
.arg = &queue_path,
.val = 'p',
.descrip = "The name of the path to use in the mqueue "
"filesystem for our tests",
.argDescrip = "pathname",
},
POPT_AUTOHELP
POPT_TABLEEND
};
static inline void __set(FILE *stream, int value, char *err_msg);
void shutdown(int exit_val, char *err_cause, int line_no);
void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
void sig_action(int signum, siginfo_t *info, void *context);
static inline int get(FILE *stream);
static inline void set(FILE *stream, int value);
static inline int try_set(FILE *stream, int value);
static inline void getr(int type, struct rlimit *rlim);
static inline void setr(int type, struct rlimit *rlim);
static inline void open_queue(struct mq_attr *attr);
void increase_limits(void);
static inline void __set(FILE *stream, int value, char *err_msg)
{
rewind(stream);
if (fprintf(stream, "%d", value) < 0)
perror(err_msg);
}
void shutdown(int exit_val, char *err_cause, int line_no)
{
static int in_shutdown = 0;
int errno_at_shutdown = errno;
int i;
/* In case we get called by multiple threads or from an sighandler */
if (in_shutdown++)
return;
/* Free the cpu_set allocated using CPU_ALLOC in main function */
CPU_FREE(cpu_set);
for (i = 0; i < num_cpus_to_pin; i++)
if (cpu_threads[i]) {
pthread_kill(cpu_threads[i], SIGUSR1);
pthread_join(cpu_threads[i], NULL);
}
if (queue != -1)
if (mq_close(queue))
perror("mq_close() during shutdown");
if (queue_path)
/*
* Be silent if this fails, if we cleaned up already it's
* expected to fail
*/
mq_unlink(queue_path);
if (saved_max_msgs)
__set(max_msgs, saved_max_msgs,
"failed to restore saved_max_msgs");
if (saved_max_msgsize)
__set(max_msgsize, saved_max_msgsize,
"failed to restore saved_max_msgsize");
if (exit_val)
error(exit_val, errno_at_shutdown, "%s at %d",
err_cause, line_no);
exit(0);
}
void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
{
if (pthread_self() != main_thread)
pthread_exit(0);
else {
fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
"exiting\n", signum);
shutdown(0, "", 0);
fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
exit(0);
}
}
void sig_action(int signum, siginfo_t *info, void *context)
{
if (pthread_self() != main_thread)
pthread_kill(main_thread, signum);
else {
fprintf(stderr, "Caught signal %d, exiting\n", signum);
shutdown(0, "", 0);
fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
exit(0);
}
}
static inline int get(FILE *stream)
{
int value;
rewind(stream);
if (fscanf(stream, "%d", &value) != 1)
shutdown(4, "Error reading /proc entry", __LINE__);
return value;
}
static inline void set(FILE *stream, int value)
{
int new_value;
rewind(stream);
if (fprintf(stream, "%d", value) < 0)
return shutdown(5, "Failed writing to /proc file", __LINE__);
new_value = get(stream);
if (new_value != value)
return shutdown(5, "We didn't get what we wrote to /proc back",
__LINE__);
}
static inline int try_set(FILE *stream, int value)
{
int new_value;
rewind(stream);
fprintf(stream, "%d", value);
new_value = get(stream);
return new_value == value;
}
static inline void getr(int type, struct rlimit *rlim)
{
if (getrlimit(type, rlim))
shutdown(6, "getrlimit()", __LINE__);
}
static inline void setr(int type, struct rlimit *rlim)
{
if (setrlimit(type, rlim))
shutdown(7, "setrlimit()", __LINE__);
}
/**
* open_queue - open the global queue for testing
* @attr - An attr struct specifying the desired queue traits
* @result - An attr struct that lists the actual traits the queue has
*
* This open is not allowed to fail, failure will result in an orderly
* shutdown of the program. The global queue_path is used to set what
* queue to open, the queue descriptor is saved in the global queue
* variable.
*/
static inline void open_queue(struct mq_attr *attr)
{
int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
int perms = DEFFILEMODE;
queue = mq_open(queue_path, flags, perms, attr);
if (queue == -1)
shutdown(1, "mq_open()", __LINE__);
if (mq_getattr(queue, &result))
shutdown(1, "mq_getattr()", __LINE__);
printf("\n\tQueue %s created:\n", queue_path);
printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
"O_NONBLOCK" : "(null)");
printf("\t\tmq_maxmsg:\t\t\t%lu\n", result.mq_maxmsg);
printf("\t\tmq_msgsize:\t\t\t%lu\n", result.mq_msgsize);
printf("\t\tmq_curmsgs:\t\t\t%lu\n", result.mq_curmsgs);
}
void *fake_cont_thread(void *arg)
{
int i;
for (i = 0; i < num_cpus_to_pin; i++)
if (cpu_threads[i] == pthread_self())
break;
printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
cpus_to_pin[i]);
while (1)
;
}
void *cont_thread(void *arg)
{
char buff[MSG_SIZE];
int i, priority;
for (i = 0; i < num_cpus_to_pin; i++)
if (cpu_threads[i] == pthread_self())
break;
printf("\tStarted continuous mode thread %d on CPU %d\n", i,
cpus_to_pin[i]);
while (1) {
while (mq_send(queue, buff, sizeof(buff), 0) == 0)
;
mq_receive(queue, buff, sizeof(buff), &priority);
}
}
#define drain_queue() \
while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
#define do_untimed_send() \
do { \
if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
shutdown(3, "Test send failure", __LINE__); \
} while (0)
#define do_send_recv() \
do { \
clock_gettime(clock, &start); \
if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
shutdown(3, "Test send failure", __LINE__); \
clock_gettime(clock, &middle); \
if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
shutdown(3, "Test receive failure", __LINE__); \
clock_gettime(clock, &end); \
nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
(middle.tv_nsec - start.tv_nsec); \
send_total.tv_nsec += nsec; \
if (send_total.tv_nsec >= 1000000000) { \
send_total.tv_sec++; \
send_total.tv_nsec -= 1000000000; \
} \
nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
(end.tv_nsec - middle.tv_nsec); \
recv_total.tv_nsec += nsec; \
if (recv_total.tv_nsec >= 1000000000) { \
recv_total.tv_sec++; \
recv_total.tv_nsec -= 1000000000; \
} \
} while (0)
struct test {
char *desc;
void (*func)(int *);
};
void const_prio(int *prio)
{
return;
}
void inc_prio(int *prio)
{
if (++*prio == mq_prio_max)
*prio = 0;
}
void dec_prio(int *prio)
{
if (--*prio < 0)
*prio = mq_prio_max - 1;
}
void random_prio(int *prio)
{
*prio = random() % mq_prio_max;
}
struct test test2[] = {
{"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
const_prio},
{"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
inc_prio},
{"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
dec_prio},
{"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
random_prio},
{NULL, NULL}
};
/**
* Tests to perform (all done with MSG_SIZE messages):
*
* 1) Time to add/remove message with 0 messages on queue
* 1a) with constant prio
* 2) Time to add/remove message when queue close to capacity:
* 2a) with constant prio
* 2b) with increasing prio
* 2c) with decreasing prio
* 2d) with random prio
* 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
*/
void *perf_test_thread(void *arg)
{
char buff[MSG_SIZE];
int prio_out, prio_in;
int i;
clockid_t clock;
pthread_t *t;
struct timespec res, start, middle, end, send_total, recv_total;
unsigned long long nsec;
struct test *cur_test;
t = &cpu_threads[0];
printf("\n\tStarted mqueue performance test thread on CPU %d\n",
cpus_to_pin[0]);
mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
if (mq_prio_max == -1)
shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
shutdown(2, "pthread_getcpuclockid", __LINE__);
if (clock_getres(clock, &res))
shutdown(2, "clock_getres()", __LINE__);
printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
printf("\t\tClock resolution:\t\t%lu nsec%s\n", res.tv_nsec,
res.tv_nsec > 1 ? "s" : "");
printf("\n\tTest #1: Time send/recv message, queue empty\n");
printf("\t\t(%d iterations)\n", TEST1_LOOPS);
prio_out = 0;
send_total.tv_sec = 0;
send_total.tv_nsec = 0;
recv_total.tv_sec = 0;
recv_total.tv_nsec = 0;
for (i = 0; i < TEST1_LOOPS; i++)
do_send_recv();
printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
send_total.tv_sec, send_total.tv_nsec);
nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
send_total.tv_nsec) / TEST1_LOOPS;
printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
recv_total.tv_sec, recv_total.tv_nsec);
nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
recv_total.tv_nsec) / TEST1_LOOPS;
printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
printf("%s:\n", cur_test->desc);
printf("\t\t(%d iterations)\n", TEST2_LOOPS);
prio_out = 0;
send_total.tv_sec = 0;
send_total.tv_nsec = 0;
recv_total.tv_sec = 0;
recv_total.tv_nsec = 0;
printf("\t\tFilling queue...");
fflush(stdout);
clock_gettime(clock, &start);
for (i = 0; i < result.mq_maxmsg - 1; i++) {
do_untimed_send();
cur_test->func(&prio_out);
}
clock_gettime(clock, &end);
nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
1000000000) + (end.tv_nsec - start.tv_nsec);
printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
nsec % 1000000000);
printf("\t\tTesting...");
fflush(stdout);
for (i = 0; i < TEST2_LOOPS; i++) {
do_send_recv();
cur_test->func(&prio_out);
}
printf("done.\n");
printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
send_total.tv_sec, send_total.tv_nsec);
nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
send_total.tv_nsec) / TEST2_LOOPS;
printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
recv_total.tv_sec, recv_total.tv_nsec);
nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
recv_total.tv_nsec) / TEST2_LOOPS;
printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
printf("\t\tDraining queue...");
fflush(stdout);
clock_gettime(clock, &start);
drain_queue();
clock_gettime(clock, &end);
nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
1000000000) + (end.tv_nsec - start.tv_nsec);
printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
nsec % 1000000000);
}
return 0;
}
void increase_limits(void)
{
cur_limits.rlim_cur = RLIM_INFINITY;
cur_limits.rlim_max = RLIM_INFINITY;
setr(RLIMIT_MSGQUEUE, &cur_limits);
while (try_set(max_msgs, cur_max_msgs += 10))
;
cur_max_msgs = get(max_msgs);
while (try_set(max_msgsize, cur_max_msgsize += 1024))
;
cur_max_msgsize = get(max_msgsize);
if (setpriority(PRIO_PROCESS, 0, -20) != 0)
shutdown(2, "setpriority()", __LINE__);
cur_nice = -20;
}
int main(int argc, char *argv[])
{
struct mq_attr attr;
char *option, *next_option;
int i, cpu, rc;
struct sigaction sa;
poptContext popt_context;
void *retval;
main_thread = pthread_self();
num_cpus_to_pin = 0;
if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
perror("sysconf(_SC_NPROCESSORS_ONLN)");
exit(1);
}
if (getuid() != 0)
ksft_exit_skip("Not running as root, but almost all tests "
"require root in order to modify\nsystem settings. "
"Exiting.\n");
cpus_online = MIN(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
cpu_set = CPU_ALLOC(cpus_online);
if (cpu_set == NULL) {
perror("CPU_ALLOC()");
exit(1);
}
cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
CPU_ZERO_S(cpu_set_size, cpu_set);
popt_context = poptGetContext(NULL, argc, (const char **)argv,
options, 0);
while ((rc = poptGetNextOpt(popt_context)) > 0) {
switch (rc) {
case 'c':
continuous_mode = 1;
option = cpu_option_string;
do {
next_option = strchr(option, ',');
if (next_option)
*next_option = '\0';
cpu = atoi(option);
if (cpu >= cpus_online)
fprintf(stderr, "CPU %d exceeds "
"cpus online, ignoring.\n",
cpu);
else
cpus_to_pin[num_cpus_to_pin++] = cpu;
if (next_option)
option = ++next_option;
} while (next_option && num_cpus_to_pin < MAX_CPUS);
/* Double check that they didn't give us the same CPU
* more than once */
for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
cpu_set)) {
fprintf(stderr, "Any given CPU may "
"only be given once.\n");
goto err_code;
} else
CPU_SET_S(cpus_to_pin[cpu],
cpu_set_size, cpu_set);
}
break;
case 'p':
/*
* Although we can create a msg queue with a
* non-absolute path name, unlink will fail. So,
* if the name doesn't start with a /, add one
* when we save it.
*/
option = queue_path;
if (*option != '/') {
queue_path = malloc(strlen(option) + 2);
if (!queue_path) {
perror("malloc()");
goto err_code;
}
queue_path[0] = '/';
queue_path[1] = 0;
strcat(queue_path, option);
free(option);
}
break;
}
}
if (continuous_mode && num_cpus_to_pin == 0) {
fprintf(stderr, "Must pass at least one CPU to continuous "
"mode.\n");
poptPrintUsage(popt_context, stderr, 0);
goto err_code;
} else if (!continuous_mode) {
num_cpus_to_pin = 1;
cpus_to_pin[0] = cpus_online - 1;
}
max_msgs = fopen(MAX_MSGS, "r+");
max_msgsize = fopen(MAX_MSGSIZE, "r+");
if (!max_msgs)
shutdown(2, "Failed to open msg_max", __LINE__);
if (!max_msgsize)
shutdown(2, "Failed to open msgsize_max", __LINE__);
/* Load up the current system values for everything we can */
getr(RLIMIT_MSGQUEUE, &saved_limits);
cur_limits = saved_limits;
saved_max_msgs = cur_max_msgs = get(max_msgs);
saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
errno = 0;
cur_nice = getpriority(PRIO_PROCESS, 0);
if (errno)
shutdown(2, "getpriority()", __LINE__);
/* Tell the user our initial state */
printf("\nInitial system state:\n");
printf("\tUsing queue path:\t\t\t%s\n", queue_path);
printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
(long) saved_limits.rlim_cur);
printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
(long) saved_limits.rlim_max);
printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
printf("\tNice value:\t\t\t\t%d\n", cur_nice);
printf("\n");
increase_limits();
printf("Adjusted system state for testing:\n");
if (cur_limits.rlim_cur == RLIM_INFINITY) {
printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
} else {
printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
(long) cur_limits.rlim_cur);
printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
(long) cur_limits.rlim_max);
}
printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
printf("\tNice value:\t\t\t\t%d\n", cur_nice);
printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
(continuous_mode_fake ? "fake mode" : "enabled") :
"disabled");
printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
printf(",%d", cpus_to_pin[cpu]);
printf("\n");
sa.sa_sigaction = sig_action_SIGUSR1;
sigemptyset(&sa.sa_mask);
sigaddset(&sa.sa_mask, SIGHUP);
sigaddset(&sa.sa_mask, SIGINT);
sigaddset(&sa.sa_mask, SIGQUIT);
sigaddset(&sa.sa_mask, SIGTERM);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGUSR1, &sa, NULL) == -1)
shutdown(1, "sigaction(SIGUSR1)", __LINE__);
sa.sa_sigaction = sig_action;
if (sigaction(SIGHUP, &sa, NULL) == -1)
shutdown(1, "sigaction(SIGHUP)", __LINE__);
if (sigaction(SIGINT, &sa, NULL) == -1)
shutdown(1, "sigaction(SIGINT)", __LINE__);
if (sigaction(SIGQUIT, &sa, NULL) == -1)
shutdown(1, "sigaction(SIGQUIT)", __LINE__);
if (sigaction(SIGTERM, &sa, NULL) == -1)
shutdown(1, "sigaction(SIGTERM)", __LINE__);
if (!continuous_mode_fake) {
attr.mq_flags = O_NONBLOCK;
attr.mq_maxmsg = cur_max_msgs;
attr.mq_msgsize = MSG_SIZE;
open_queue(&attr);
}
for (i = 0; i < num_cpus_to_pin; i++) {
pthread_attr_t thread_attr;
void *thread_func;
if (continuous_mode_fake)
thread_func = &fake_cont_thread;
else if (continuous_mode)
thread_func = &cont_thread;
else
thread_func = &perf_test_thread;
CPU_ZERO_S(cpu_set_size, cpu_set);
CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
pthread_attr_init(&thread_attr);
pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
cpu_set);
if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
NULL))
shutdown(1, "pthread_create()", __LINE__);
pthread_attr_destroy(&thread_attr);
}
if (!continuous_mode) {
pthread_join(cpu_threads[0], &retval);
shutdown((long)retval, "perf_test_thread()", __LINE__);
} else {
while (1)
sleep(1);
}
shutdown(0, "", 0);
err_code:
CPU_FREE(cpu_set);
exit(1);
}
| linux-master | tools/testing/selftests/mqueue/mq_perf_tests.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#define _LARGEFILE64_SOURCE
/* libc-specific include files
* The program may be built in 3 ways:
* $(CC) -nostdlib -include /path/to/nolibc.h => NOLIBC already defined
* $(CC) -nostdlib -I/path/to/nolibc/sysroot => _NOLIBC_* guards are present
* $(CC) with default libc => NOLIBC* never defined
*/
#ifndef NOLIBC
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _NOLIBC_STDIO_H
/* standard libcs need more includes */
#include <sys/auxv.h>
#include <sys/io.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/reboot.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/sysmacros.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <sched.h>
#include <signal.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <limits.h>
#endif
#endif
/* for the type of int_fast16_t and int_fast32_t, musl differs from glibc and nolibc */
#define SINT_MAX_OF_TYPE(type) (((type)1 << (sizeof(type) * 8 - 2)) - (type)1 + ((type)1 << (sizeof(type) * 8 - 2)))
#define SINT_MIN_OF_TYPE(type) (-SINT_MAX_OF_TYPE(type) - 1)
/* will be used to test initialization of environ */
static char **test_envp;
/* will be used to test initialization of argv */
static char **test_argv;
/* will be used to test initialization of argc */
static int test_argc;
/* will be used by some test cases as readable file, please don't write it */
static const char *argv0;
/* definition of a series of tests */
struct test {
const char *name; /* test name */
int (*func)(int min, int max); /* handler */
};
#ifndef _NOLIBC_STDLIB_H
char *itoa(int i)
{
static char buf[12];
int ret;
ret = snprintf(buf, sizeof(buf), "%d", i);
return (ret >= 0 && ret < sizeof(buf)) ? buf : "#err";
}
#endif
#define CASE_ERR(err) \
case err: return #err
/* returns the error name (e.g. "ENOENT") for common errors, "SUCCESS" for 0,
* or the decimal value for less common ones.
*/
static const char *errorname(int err)
{
switch (err) {
case 0: return "SUCCESS";
CASE_ERR(EPERM);
CASE_ERR(ENOENT);
CASE_ERR(ESRCH);
CASE_ERR(EINTR);
CASE_ERR(EIO);
CASE_ERR(ENXIO);
CASE_ERR(E2BIG);
CASE_ERR(ENOEXEC);
CASE_ERR(EBADF);
CASE_ERR(ECHILD);
CASE_ERR(EAGAIN);
CASE_ERR(ENOMEM);
CASE_ERR(EACCES);
CASE_ERR(EFAULT);
CASE_ERR(ENOTBLK);
CASE_ERR(EBUSY);
CASE_ERR(EEXIST);
CASE_ERR(EXDEV);
CASE_ERR(ENODEV);
CASE_ERR(ENOTDIR);
CASE_ERR(EISDIR);
CASE_ERR(EINVAL);
CASE_ERR(ENFILE);
CASE_ERR(EMFILE);
CASE_ERR(ENOTTY);
CASE_ERR(ETXTBSY);
CASE_ERR(EFBIG);
CASE_ERR(ENOSPC);
CASE_ERR(ESPIPE);
CASE_ERR(EROFS);
CASE_ERR(EMLINK);
CASE_ERR(EPIPE);
CASE_ERR(EDOM);
CASE_ERR(ERANGE);
CASE_ERR(ENOSYS);
CASE_ERR(EOVERFLOW);
default:
return itoa(err);
}
}
static void putcharn(char c, size_t n)
{
char buf[64];
memset(buf, c, n);
buf[n] = '\0';
fputs(buf, stdout);
}
enum RESULT {
OK,
FAIL,
SKIPPED,
};
static void result(int llen, enum RESULT r)
{
const char *msg;
if (r == OK)
msg = " [OK]";
else if (r == SKIPPED)
msg = "[SKIPPED]";
else
msg = "[FAIL]";
if (llen < 64)
putcharn(' ', 64 - llen);
puts(msg);
}
/* The tests below are intended to be used by the macroes, which evaluate
* expression <expr>, print the status to stdout, and update the "ret"
* variable to count failures. The functions themselves return the number
* of failures, thus either 0 or 1.
*/
#define EXPECT_ZR(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_zr(expr, llen); } while (0)
static __attribute__((unused))
int expect_zr(int expr, int llen)
{
int ret = !(expr == 0);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_NZ(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0)
static __attribute__((unused))
int expect_nz(int expr, int llen)
{
int ret = !(expr != 0);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_EQ(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_eq(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_eq(uint64_t expr, int llen, uint64_t val)
{
int ret = !(expr == val);
llen += printf(" = %lld ", (long long)expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_NE(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ne(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_ne(int expr, int llen, int val)
{
int ret = !(expr != val);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_GE(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ge(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_ge(int expr, int llen, int val)
{
int ret = !(expr >= val);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_GT(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_gt(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_gt(int expr, int llen, int val)
{
int ret = !(expr > val);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_LE(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_le(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_le(int expr, int llen, int val)
{
int ret = !(expr <= val);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_LT(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_lt(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_lt(int expr, int llen, int val)
{
int ret = !(expr < val);
llen += printf(" = %d ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_SYSZR(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syszr(expr, llen); } while (0)
static __attribute__((unused))
int expect_syszr(int expr, int llen)
{
int ret = 0;
if (expr) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
result(llen, OK);
}
return ret;
}
#define EXPECT_SYSEQ(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syseq(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_syseq(int expr, int llen, int val)
{
int ret = 0;
if (expr != val) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
result(llen, OK);
}
return ret;
}
#define EXPECT_SYSNE(cond, expr, val) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_sysne(expr, llen, val); } while (0)
static __attribute__((unused))
int expect_sysne(int expr, int llen, int val)
{
int ret = 0;
if (expr == val) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
result(llen, OK);
}
return ret;
}
#define EXPECT_SYSER2(cond, expr, expret, experr1, experr2) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syserr2(expr, expret, experr1, experr2, llen); } while (0)
#define EXPECT_SYSER(cond, expr, expret, experr) \
EXPECT_SYSER2(cond, expr, expret, experr, 0)
static __attribute__((unused))
int expect_syserr2(int expr, int expret, int experr1, int experr2, int llen)
{
int ret = 0;
int _errno = errno;
llen += printf(" = %d %s ", expr, errorname(_errno));
if (expr != expret || (_errno != experr1 && _errno != experr2)) {
ret = 1;
if (experr2 == 0)
llen += printf(" != (%d %s) ", expret, errorname(experr1));
else
llen += printf(" != (%d %s %s) ", expret, errorname(experr1), errorname(experr2));
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_PTRZR(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrzr(expr, llen); } while (0)
static __attribute__((unused))
int expect_ptrzr(const void *expr, int llen)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (expr) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_PTRNZ(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrnz(expr, llen); } while (0)
static __attribute__((unused))
int expect_ptrnz(const void *expr, int llen)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (!expr) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_PTREQ(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptreq(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptreq(const void *expr, int llen, const void *cmp)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (expr != cmp) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_PTRNE(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrne(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptrne(const void *expr, int llen, const void *cmp)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (expr == cmp) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_PTRGE(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrge(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptrge(const void *expr, int llen, const void *cmp)
{
int ret = !(expr >= cmp);
llen += printf(" = <%p> ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_PTRGT(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrgt(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptrgt(const void *expr, int llen, const void *cmp)
{
int ret = !(expr > cmp);
llen += printf(" = <%p> ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_PTRLE(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrle(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptrle(const void *expr, int llen, const void *cmp)
{
int ret = !(expr <= cmp);
llen += printf(" = <%p> ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_PTRLT(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrlt(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_ptrlt(const void *expr, int llen, const void *cmp)
{
int ret = !(expr < cmp);
llen += printf(" = <%p> ", expr);
result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_PTRER2(cond, expr, expret, experr1, experr2) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrerr2(expr, expret, experr1, experr2, llen); } while (0)
#define EXPECT_PTRER(cond, expr, expret, experr) \
EXPECT_PTRER2(cond, expr, expret, experr, 0)
static __attribute__((unused))
int expect_ptrerr2(const void *expr, const void *expret, int experr1, int experr2, int llen)
{
int ret = 0;
int _errno = errno;
llen += printf(" = <%p> %s ", expr, errorname(_errno));
if (expr != expret || (_errno != experr1 && _errno != experr2)) {
ret = 1;
if (experr2 == 0)
llen += printf(" != (<%p> %s) ", expret, errorname(experr1));
else
llen += printf(" != (<%p> %s %s) ", expret, errorname(experr1), errorname(experr2));
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_STRZR(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strzr(expr, llen); } while (0)
static __attribute__((unused))
int expect_strzr(const char *expr, int llen)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (expr) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_STRNZ(cond, expr) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strnz(expr, llen); } while (0)
static __attribute__((unused))
int expect_strnz(const char *expr, int llen)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (!expr) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_STREQ(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_streq(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_streq(const char *expr, int llen, const char *cmp)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (strcmp(expr, cmp) != 0) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
#define EXPECT_STRNE(cond, expr, cmp) \
do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strne(expr, llen, cmp); } while (0)
static __attribute__((unused))
int expect_strne(const char *expr, int llen, const char *cmp)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (strcmp(expr, cmp) == 0) {
ret = 1;
result(llen, FAIL);
} else {
result(llen, OK);
}
return ret;
}
/* declare tests based on line numbers. There must be exactly one test per line. */
#define CASE_TEST(name) \
case __LINE__: llen += printf("%d %s", test, #name);
int run_startup(int min, int max)
{
int test;
int ret = 0;
/* kernel at least passes HOME and TERM, shell passes more */
int env_total = 2;
/* checking NULL for argv/argv0, environ and _auxv is not enough, let's compare with sbrk(0) or &end */
extern char end;
char *brk = sbrk(0) != (void *)-1 ? sbrk(0) : &end;
/* differ from nolibc, both glibc and musl have no global _auxv */
const unsigned long *test_auxv = (void *)-1;
#ifdef NOLIBC
test_auxv = _auxv;
#endif
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
/* avoid leaving empty lines below, this will insert holes into
* test numbers.
*/
switch (test + __LINE__ + 1) {
CASE_TEST(argc); EXPECT_GE(1, test_argc, 1); break;
CASE_TEST(argv_addr); EXPECT_PTRGT(1, test_argv, brk); break;
CASE_TEST(argv_environ); EXPECT_PTRLT(1, test_argv, environ); break;
CASE_TEST(argv_total); EXPECT_EQ(1, environ - test_argv - 1, test_argc ?: 1); break;
CASE_TEST(argv0_addr); EXPECT_PTRGT(1, argv0, brk); break;
CASE_TEST(argv0_str); EXPECT_STRNZ(1, argv0 > brk ? argv0 : NULL); break;
CASE_TEST(argv0_len); EXPECT_GE(1, argv0 > brk ? strlen(argv0) : 0, 1); break;
CASE_TEST(environ_addr); EXPECT_PTRGT(1, environ, brk); break;
CASE_TEST(environ_envp); EXPECT_PTREQ(1, environ, test_envp); break;
CASE_TEST(environ_auxv); EXPECT_PTRLT(test_auxv != (void *)-1, environ, test_auxv); break;
CASE_TEST(environ_total); EXPECT_GE(test_auxv != (void *)-1, (void *)test_auxv - (void *)environ - 1, env_total); break;
CASE_TEST(environ_HOME); EXPECT_PTRNZ(1, getenv("HOME")); break;
CASE_TEST(auxv_addr); EXPECT_PTRGT(test_auxv != (void *)-1, test_auxv, brk); break;
CASE_TEST(auxv_AT_UID); EXPECT_EQ(1, getauxval(AT_UID), getuid()); break;
CASE_TEST(auxv_AT_PAGESZ); EXPECT_GE(1, getauxval(AT_PAGESZ), 4096); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
}
}
return ret;
}
/* used by some syscall tests below */
int test_getdents64(const char *dir)
{
char buffer[4096];
int fd, ret;
int err;
ret = fd = open(dir, O_RDONLY | O_DIRECTORY, 0);
if (ret < 0)
return ret;
ret = getdents64(fd, (void *)buffer, sizeof(buffer));
err = errno;
close(fd);
errno = err;
return ret;
}
int test_getpagesize(void)
{
int x = getpagesize();
int c;
if (x < 0)
return x;
#if defined(__x86_64__) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
/*
* x86 family is always 4K page.
*/
c = (x == 4096);
#elif defined(__aarch64__)
/*
* Linux aarch64 supports three values of page size: 4K, 16K, and 64K
* which are selected at kernel compilation time.
*/
c = (x == 4096 || x == (16 * 1024) || x == (64 * 1024));
#else
/*
* Assuming other architectures must have at least 4K page.
*/
c = (x >= 4096);
#endif
return !c;
}
int test_fork(void)
{
int status;
pid_t pid;
/* flush the printf buffer to avoid child flush it */
fflush(stdout);
fflush(stderr);
pid = fork();
switch (pid) {
case -1:
return 1;
case 0:
exit(123);
default:
pid = waitpid(pid, &status, 0);
return pid == -1 || !WIFEXITED(status) || WEXITSTATUS(status) != 123;
}
}
int test_stat_timestamps(void)
{
struct stat st;
if (sizeof(st.st_atim.tv_sec) != sizeof(st.st_atime))
return 1;
if (stat("/proc/self/", &st) && stat(argv0, &st) && stat("/", &st))
return 1;
if (st.st_atim.tv_sec != st.st_atime || st.st_atim.tv_nsec > 1000000000)
return 1;
if (st.st_mtim.tv_sec != st.st_mtime || st.st_mtim.tv_nsec > 1000000000)
return 1;
if (st.st_ctim.tv_sec != st.st_ctime || st.st_ctim.tv_nsec > 1000000000)
return 1;
return 0;
}
int test_mmap_munmap(void)
{
int ret, fd, i, page_size;
void *mem;
size_t file_size, length;
off_t offset, pa_offset;
struct stat stat_buf;
const char * const files[] = {
"/dev/zero",
"/proc/1/exe", "/proc/self/exe",
argv0,
NULL
};
page_size = getpagesize();
if (page_size < 0)
return 1;
/* find a right file to mmap, existed and accessible */
for (i = 0; files[i] != NULL; i++) {
ret = fd = open(files[i], O_RDONLY);
if (ret == -1)
continue;
else
break;
}
if (ret == -1)
return 1;
ret = stat(files[i], &stat_buf);
if (ret == -1)
goto end;
/* file size of the special /dev/zero is 0, let's assign one manually */
if (i == 0)
file_size = 3*page_size;
else
file_size = stat_buf.st_size;
offset = file_size - 1;
if (offset < 0)
offset = 0;
length = file_size - offset;
pa_offset = offset & ~(page_size - 1);
mem = mmap(NULL, length + offset - pa_offset, PROT_READ, MAP_SHARED, fd, pa_offset);
if (mem == MAP_FAILED) {
ret = 1;
goto end;
}
ret = munmap(mem, length + offset - pa_offset);
end:
close(fd);
return !!ret;
}
int test_pipe(void)
{
const char *const msg = "hello, nolibc";
int pipefd[2];
char buf[32];
size_t len;
if (pipe(pipefd) == -1)
return 1;
write(pipefd[1], msg, strlen(msg));
close(pipefd[1]);
len = read(pipefd[0], buf, sizeof(buf));
close(pipefd[0]);
if (len != strlen(msg))
return 1;
return !!memcmp(buf, msg, len);
}
/* Run syscall tests between IDs <min> and <max>.
* Return 0 on success, non-zero on failure.
*/
int run_syscall(int min, int max)
{
struct timeval tv;
struct timezone tz;
struct stat stat_buf;
int euid0;
int proc;
int test;
int tmp;
int ret = 0;
void *p1, *p2;
int has_gettid = 1;
/* <proc> indicates whether or not /proc is mounted */
proc = stat("/proc", &stat_buf) == 0;
/* this will be used to skip certain tests that can't be run unprivileged */
euid0 = geteuid() == 0;
/* from 2.30, glibc provides gettid() */
#if defined(__GLIBC_MINOR__) && defined(__GLIBC__)
has_gettid = __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30);
#endif
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
/* avoid leaving empty lines below, this will insert holes into
* test numbers.
*/
switch (test + __LINE__ + 1) {
CASE_TEST(getpid); EXPECT_SYSNE(1, getpid(), -1); break;
CASE_TEST(getppid); EXPECT_SYSNE(1, getppid(), -1); break;
CASE_TEST(gettid); EXPECT_SYSNE(has_gettid, gettid(), -1); break;
CASE_TEST(getpgid_self); EXPECT_SYSNE(1, getpgid(0), -1); break;
CASE_TEST(getpgid_bad); EXPECT_SYSER(1, getpgid(-1), -1, ESRCH); break;
CASE_TEST(kill_0); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_CONT); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_BADPID); EXPECT_SYSER(1, kill(INT_MAX, 0), -1, ESRCH); break;
CASE_TEST(sbrk_0); EXPECT_PTRNE(1, sbrk(0), (void *)-1); break;
CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(1, (p2 == (void *)-1) || p2 == p1); break;
CASE_TEST(brk); EXPECT_SYSZR(1, brk(sbrk(0))); break;
CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); chdir(getenv("PWD")); break;
CASE_TEST(chdir_dot); EXPECT_SYSZR(1, chdir(".")); break;
CASE_TEST(chdir_blah); EXPECT_SYSER(1, chdir("/blah"), -1, ENOENT); break;
CASE_TEST(chmod_argv0); EXPECT_SYSZR(1, chmod(argv0, 0555)); break;
CASE_TEST(chmod_self); EXPECT_SYSER(proc, chmod("/proc/self", 0555), -1, EPERM); break;
CASE_TEST(chown_self); EXPECT_SYSER(proc, chown("/proc/self", 0, 0), -1, EPERM); break;
CASE_TEST(chroot_root); EXPECT_SYSZR(euid0, chroot("/")); break;
CASE_TEST(chroot_blah); EXPECT_SYSER(1, chroot("/proc/self/blah"), -1, ENOENT); break;
CASE_TEST(chroot_exe); EXPECT_SYSER(1, chroot(argv0), -1, ENOTDIR); break;
CASE_TEST(close_m1); EXPECT_SYSER(1, close(-1), -1, EBADF); break;
CASE_TEST(close_dup); EXPECT_SYSZR(1, close(dup(0))); break;
CASE_TEST(dup_0); tmp = dup(0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
CASE_TEST(dup_m1); tmp = dup(-1); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
CASE_TEST(dup2_0); tmp = dup2(0, 100); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
CASE_TEST(dup2_m1); tmp = dup2(-1, 100); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
CASE_TEST(dup3_0); tmp = dup3(0, 100, 0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
CASE_TEST(dup3_m1); tmp = dup3(-1, 100, 0); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
CASE_TEST(execve_root); EXPECT_SYSER(1, execve("/", (char*[]){ [0] = "/", [1] = NULL }, NULL), -1, EACCES); break;
CASE_TEST(fork); EXPECT_SYSZR(1, test_fork()); break;
CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
CASE_TEST(getdents64_null); EXPECT_SYSER(1, test_getdents64("/dev/null"), -1, ENOTDIR); break;
CASE_TEST(gettimeofday_tv); EXPECT_SYSZR(1, gettimeofday(&tv, NULL)); break;
CASE_TEST(gettimeofday_tv_tz);EXPECT_SYSZR(1, gettimeofday(&tv, &tz)); break;
CASE_TEST(getpagesize); EXPECT_SYSZR(1, test_getpagesize()); break;
CASE_TEST(ioctl_tiocinq); EXPECT_SYSZR(1, ioctl(0, TIOCINQ, &tmp)); break;
CASE_TEST(ioctl_tiocinq); EXPECT_SYSZR(1, ioctl(0, TIOCINQ, &tmp)); break;
CASE_TEST(link_root1); EXPECT_SYSER(1, link("/", "/"), -1, EEXIST); break;
CASE_TEST(link_blah); EXPECT_SYSER(1, link("/proc/self/blah", "/blah"), -1, ENOENT); break;
CASE_TEST(link_dir); EXPECT_SYSER(euid0, link("/", "/blah"), -1, EPERM); break;
CASE_TEST(link_cross); EXPECT_SYSER(proc, link("/proc/self/cmdline", "/blah"), -1, EXDEV); break;
CASE_TEST(lseek_m1); EXPECT_SYSER(1, lseek(-1, 0, SEEK_SET), -1, EBADF); break;
CASE_TEST(lseek_0); EXPECT_SYSER(1, lseek(0, 0, SEEK_SET), -1, ESPIPE); break;
CASE_TEST(mkdir_root); EXPECT_SYSER(1, mkdir("/", 0755), -1, EEXIST); break;
CASE_TEST(mmap_bad); EXPECT_PTRER(1, mmap(NULL, 0, PROT_READ, MAP_PRIVATE, 0, 0), MAP_FAILED, EINVAL); break;
CASE_TEST(munmap_bad); EXPECT_SYSER(1, munmap((void *)1, 0), -1, EINVAL); break;
CASE_TEST(mmap_munmap_good); EXPECT_SYSZR(1, test_mmap_munmap()); break;
CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", 0), -1); if (tmp != -1) close(tmp); break;
CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", 0), -1, ENOENT); if (tmp != -1) close(tmp); break;
CASE_TEST(pipe); EXPECT_SYSZR(1, test_pipe()); break;
CASE_TEST(poll_null); EXPECT_SYSZR(1, poll(NULL, 0, 0)); break;
CASE_TEST(poll_stdout); EXPECT_SYSNE(1, ({ struct pollfd fds = { 1, POLLOUT, 0}; poll(&fds, 1, 0); }), -1); break;
CASE_TEST(poll_fault); EXPECT_SYSER(1, poll((void *)1, 1, 0), -1, EFAULT); break;
CASE_TEST(prctl); EXPECT_SYSER(1, prctl(PR_SET_NAME, (unsigned long)NULL, 0, 0, 0), -1, EFAULT); break;
CASE_TEST(read_badf); EXPECT_SYSER(1, read(-1, &tmp, 1), -1, EBADF); break;
CASE_TEST(rmdir_blah); EXPECT_SYSER(1, rmdir("/blah"), -1, ENOENT); break;
CASE_TEST(sched_yield); EXPECT_SYSZR(1, sched_yield()); break;
CASE_TEST(select_null); EXPECT_SYSZR(1, ({ struct timeval tv = { 0 }; select(0, NULL, NULL, NULL, &tv); })); break;
CASE_TEST(select_stdout); EXPECT_SYSNE(1, ({ fd_set fds; FD_ZERO(&fds); FD_SET(1, &fds); select(2, NULL, &fds, NULL, NULL); }), -1); break;
CASE_TEST(select_fault); EXPECT_SYSER(1, select(1, (void *)1, NULL, NULL, 0), -1, EFAULT); break;
CASE_TEST(stat_blah); EXPECT_SYSER(1, stat("/proc/self/blah", &stat_buf), -1, ENOENT); break;
CASE_TEST(stat_fault); EXPECT_SYSER(1, stat((void *)1, &stat_buf), -1, EFAULT); break;
CASE_TEST(stat_timestamps); EXPECT_SYSZR(1, test_stat_timestamps()); break;
CASE_TEST(symlink_root); EXPECT_SYSER(1, symlink("/", "/"), -1, EEXIST); break;
CASE_TEST(unlink_root); EXPECT_SYSER(1, unlink("/"), -1, EISDIR); break;
CASE_TEST(unlink_blah); EXPECT_SYSER(1, unlink("/proc/self/blah"), -1, ENOENT); break;
CASE_TEST(wait_child); EXPECT_SYSER(1, wait(&tmp), -1, ECHILD); break;
CASE_TEST(waitpid_min); EXPECT_SYSER(1, waitpid(INT_MIN, &tmp, WNOHANG), -1, ESRCH); break;
CASE_TEST(waitpid_child); EXPECT_SYSER(1, waitpid(getpid(), &tmp, WNOHANG), -1, ECHILD); break;
CASE_TEST(write_badf); EXPECT_SYSER(1, write(-1, &tmp, 1), -1, EBADF); break;
CASE_TEST(write_zero); EXPECT_SYSZR(1, write(1, &tmp, 0)); break;
CASE_TEST(syscall_noargs); EXPECT_SYSEQ(1, syscall(__NR_getpid), getpid()); break;
CASE_TEST(syscall_args); EXPECT_SYSER(1, syscall(__NR_statx, 0, NULL, 0, 0, NULL), -1, EFAULT); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
}
}
return ret;
}
int run_stdlib(int min, int max)
{
int test;
int ret = 0;
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
/* avoid leaving empty lines below, this will insert holes into
* test numbers.
*/
switch (test + __LINE__ + 1) {
CASE_TEST(getenv_TERM); EXPECT_STRNZ(1, getenv("TERM")); break;
CASE_TEST(getenv_blah); EXPECT_STRZR(1, getenv("blah")); break;
CASE_TEST(setcmp_blah_blah); EXPECT_EQ(1, strcmp("blah", "blah"), 0); break;
CASE_TEST(setcmp_blah_blah2); EXPECT_NE(1, strcmp("blah", "blah2"), 0); break;
CASE_TEST(setncmp_blah_blah); EXPECT_EQ(1, strncmp("blah", "blah", 10), 0); break;
CASE_TEST(setncmp_blah_blah4); EXPECT_EQ(1, strncmp("blah", "blah4", 4), 0); break;
CASE_TEST(setncmp_blah_blah5); EXPECT_NE(1, strncmp("blah", "blah5", 5), 0); break;
CASE_TEST(setncmp_blah_blah6); EXPECT_NE(1, strncmp("blah", "blah6", 6), 0); break;
CASE_TEST(strchr_foobar_o); EXPECT_STREQ(1, strchr("foobar", 'o'), "oobar"); break;
CASE_TEST(strchr_foobar_z); EXPECT_STRZR(1, strchr("foobar", 'z')); break;
CASE_TEST(strrchr_foobar_o); EXPECT_STREQ(1, strrchr("foobar", 'o'), "obar"); break;
CASE_TEST(strrchr_foobar_z); EXPECT_STRZR(1, strrchr("foobar", 'z')); break;
CASE_TEST(memcmp_20_20); EXPECT_EQ(1, memcmp("aaa\x20", "aaa\x20", 4), 0); break;
CASE_TEST(memcmp_20_60); EXPECT_LT(1, memcmp("aaa\x20", "aaa\x60", 4), 0); break;
CASE_TEST(memcmp_60_20); EXPECT_GT(1, memcmp("aaa\x60", "aaa\x20", 4), 0); break;
CASE_TEST(memcmp_20_e0); EXPECT_LT(1, memcmp("aaa\x20", "aaa\xe0", 4), 0); break;
CASE_TEST(memcmp_e0_20); EXPECT_GT(1, memcmp("aaa\xe0", "aaa\x20", 4), 0); break;
CASE_TEST(memcmp_80_e0); EXPECT_LT(1, memcmp("aaa\x80", "aaa\xe0", 4), 0); break;
CASE_TEST(memcmp_e0_80); EXPECT_GT(1, memcmp("aaa\xe0", "aaa\x80", 4), 0); break;
CASE_TEST(limit_int8_max); EXPECT_EQ(1, INT8_MAX, (int8_t) 0x7f); break;
CASE_TEST(limit_int8_min); EXPECT_EQ(1, INT8_MIN, (int8_t) 0x80); break;
CASE_TEST(limit_uint8_max); EXPECT_EQ(1, UINT8_MAX, (uint8_t) 0xff); break;
CASE_TEST(limit_int16_max); EXPECT_EQ(1, INT16_MAX, (int16_t) 0x7fff); break;
CASE_TEST(limit_int16_min); EXPECT_EQ(1, INT16_MIN, (int16_t) 0x8000); break;
CASE_TEST(limit_uint16_max); EXPECT_EQ(1, UINT16_MAX, (uint16_t) 0xffff); break;
CASE_TEST(limit_int32_max); EXPECT_EQ(1, INT32_MAX, (int32_t) 0x7fffffff); break;
CASE_TEST(limit_int32_min); EXPECT_EQ(1, INT32_MIN, (int32_t) 0x80000000); break;
CASE_TEST(limit_uint32_max); EXPECT_EQ(1, UINT32_MAX, (uint32_t) 0xffffffff); break;
CASE_TEST(limit_int64_max); EXPECT_EQ(1, INT64_MAX, (int64_t) 0x7fffffffffffffff); break;
CASE_TEST(limit_int64_min); EXPECT_EQ(1, INT64_MIN, (int64_t) 0x8000000000000000); break;
CASE_TEST(limit_uint64_max); EXPECT_EQ(1, UINT64_MAX, (uint64_t) 0xffffffffffffffff); break;
CASE_TEST(limit_int_least8_max); EXPECT_EQ(1, INT_LEAST8_MAX, (int_least8_t) 0x7f); break;
CASE_TEST(limit_int_least8_min); EXPECT_EQ(1, INT_LEAST8_MIN, (int_least8_t) 0x80); break;
CASE_TEST(limit_uint_least8_max); EXPECT_EQ(1, UINT_LEAST8_MAX, (uint_least8_t) 0xff); break;
CASE_TEST(limit_int_least16_max); EXPECT_EQ(1, INT_LEAST16_MAX, (int_least16_t) 0x7fff); break;
CASE_TEST(limit_int_least16_min); EXPECT_EQ(1, INT_LEAST16_MIN, (int_least16_t) 0x8000); break;
CASE_TEST(limit_uint_least16_max); EXPECT_EQ(1, UINT_LEAST16_MAX, (uint_least16_t) 0xffff); break;
CASE_TEST(limit_int_least32_max); EXPECT_EQ(1, INT_LEAST32_MAX, (int_least32_t) 0x7fffffff); break;
CASE_TEST(limit_int_least32_min); EXPECT_EQ(1, INT_LEAST32_MIN, (int_least32_t) 0x80000000); break;
CASE_TEST(limit_uint_least32_max); EXPECT_EQ(1, UINT_LEAST32_MAX, (uint_least32_t) 0xffffffffU); break;
CASE_TEST(limit_int_least64_min); EXPECT_EQ(1, INT_LEAST64_MIN, (int_least64_t) 0x8000000000000000LL); break;
CASE_TEST(limit_int_least64_max); EXPECT_EQ(1, INT_LEAST64_MAX, (int_least64_t) 0x7fffffffffffffffLL); break;
CASE_TEST(limit_uint_least64_max); EXPECT_EQ(1, UINT_LEAST64_MAX, (uint_least64_t) 0xffffffffffffffffULL); break;
CASE_TEST(limit_int_fast8_max); EXPECT_EQ(1, INT_FAST8_MAX, (int_fast8_t) 0x7f); break;
CASE_TEST(limit_int_fast8_min); EXPECT_EQ(1, INT_FAST8_MIN, (int_fast8_t) 0x80); break;
CASE_TEST(limit_uint_fast8_max); EXPECT_EQ(1, UINT_FAST8_MAX, (uint_fast8_t) 0xff); break;
CASE_TEST(limit_int_fast16_min); EXPECT_EQ(1, INT_FAST16_MIN, (int_fast16_t) SINT_MIN_OF_TYPE(int_fast16_t)); break;
CASE_TEST(limit_int_fast16_max); EXPECT_EQ(1, INT_FAST16_MAX, (int_fast16_t) SINT_MAX_OF_TYPE(int_fast16_t)); break;
CASE_TEST(limit_uint_fast16_max); EXPECT_EQ(1, UINT_FAST16_MAX, (uint_fast16_t) UINTPTR_MAX); break;
CASE_TEST(limit_int_fast32_min); EXPECT_EQ(1, INT_FAST32_MIN, (int_fast32_t) SINT_MIN_OF_TYPE(int_fast32_t)); break;
CASE_TEST(limit_int_fast32_max); EXPECT_EQ(1, INT_FAST32_MAX, (int_fast32_t) SINT_MAX_OF_TYPE(int_fast32_t)); break;
CASE_TEST(limit_uint_fast32_max); EXPECT_EQ(1, UINT_FAST32_MAX, (uint_fast32_t) UINTPTR_MAX); break;
CASE_TEST(limit_int_fast64_min); EXPECT_EQ(1, INT_FAST64_MIN, (int_fast64_t) INT64_MIN); break;
CASE_TEST(limit_int_fast64_max); EXPECT_EQ(1, INT_FAST64_MAX, (int_fast64_t) INT64_MAX); break;
CASE_TEST(limit_uint_fast64_max); EXPECT_EQ(1, UINT_FAST64_MAX, (uint_fast64_t) UINT64_MAX); break;
CASE_TEST(sizeof_long_sane); EXPECT_EQ(1, sizeof(long) == 8 || sizeof(long) == 4, 1); break;
CASE_TEST(limit_intptr_min); EXPECT_EQ(1, INTPTR_MIN, sizeof(long) == 8 ? (intptr_t) 0x8000000000000000LL : (intptr_t) 0x80000000); break;
CASE_TEST(limit_intptr_max); EXPECT_EQ(1, INTPTR_MAX, sizeof(long) == 8 ? (intptr_t) 0x7fffffffffffffffLL : (intptr_t) 0x7fffffff); break;
CASE_TEST(limit_uintptr_max); EXPECT_EQ(1, UINTPTR_MAX, sizeof(long) == 8 ? (uintptr_t) 0xffffffffffffffffULL : (uintptr_t) 0xffffffffU); break;
CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, sizeof(long) == 8 ? (ptrdiff_t) 0x8000000000000000LL : (ptrdiff_t) 0x80000000); break;
CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, sizeof(long) == 8 ? (ptrdiff_t) 0x7fffffffffffffffLL : (ptrdiff_t) 0x7fffffff); break;
CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, sizeof(long) == 8 ? (size_t) 0xffffffffffffffffULL : (size_t) 0xffffffffU); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
}
}
return ret;
}
#define EXPECT_VFPRINTF(c, expected, fmt, ...) \
ret += expect_vfprintf(llen, c, expected, fmt, ##__VA_ARGS__)
static int expect_vfprintf(int llen, int c, const char *expected, const char *fmt, ...)
{
int ret, fd;
ssize_t w, r;
char buf[100];
FILE *memfile;
va_list args;
fd = open("/tmp", O_TMPFILE | O_EXCL | O_RDWR, 0600);
if (fd == -1) {
result(llen, SKIPPED);
return 0;
}
memfile = fdopen(fd, "w+");
if (!memfile) {
result(llen, FAIL);
return 1;
}
va_start(args, fmt);
w = vfprintf(memfile, fmt, args);
va_end(args);
if (w != c) {
llen += printf(" written(%d) != %d", (int)w, c);
result(llen, FAIL);
return 1;
}
fflush(memfile);
lseek(fd, 0, SEEK_SET);
r = read(fd, buf, sizeof(buf) - 1);
fclose(memfile);
if (r != w) {
llen += printf(" written(%d) != read(%d)", (int)w, (int)r);
result(llen, FAIL);
return 1;
}
buf[r] = '\0';
llen += printf(" \"%s\" = \"%s\"", expected, buf);
ret = strncmp(expected, buf, c);
result(llen, ret ? FAIL : OK);
return ret;
}
static int run_vfprintf(int min, int max)
{
int test;
int ret = 0;
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
/* avoid leaving empty lines below, this will insert holes into
* test numbers.
*/
switch (test + __LINE__ + 1) {
CASE_TEST(empty); EXPECT_VFPRINTF(0, "", ""); break;
CASE_TEST(simple); EXPECT_VFPRINTF(3, "foo", "foo"); break;
CASE_TEST(string); EXPECT_VFPRINTF(3, "foo", "%s", "foo"); break;
CASE_TEST(number); EXPECT_VFPRINTF(4, "1234", "%d", 1234); break;
CASE_TEST(negnumber); EXPECT_VFPRINTF(5, "-1234", "%d", -1234); break;
CASE_TEST(unsigned); EXPECT_VFPRINTF(5, "12345", "%u", 12345); break;
CASE_TEST(char); EXPECT_VFPRINTF(1, "c", "%c", 'c'); break;
CASE_TEST(hex); EXPECT_VFPRINTF(1, "f", "%x", 0xf); break;
CASE_TEST(pointer); EXPECT_VFPRINTF(3, "0x1", "%p", (void *) 0x1); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
}
}
return ret;
}
static int smash_stack(void)
{
char buf[100];
volatile char *ptr = buf;
size_t i;
for (i = 0; i < 200; i++)
ptr[i] = 'P';
return 1;
}
static int run_protection(int min __attribute__((unused)),
int max __attribute__((unused)))
{
pid_t pid;
int llen = 0, status;
llen += printf("0 -fstackprotector ");
#if !defined(_NOLIBC_STACKPROTECTOR)
llen += printf("not supported");
result(llen, SKIPPED);
return 0;
#endif
#if defined(_NOLIBC_STACKPROTECTOR)
if (!__stack_chk_guard) {
llen += printf("__stack_chk_guard not initialized");
result(llen, FAIL);
return 1;
}
#endif
pid = -1;
pid = fork();
switch (pid) {
case -1:
llen += printf("fork()");
result(llen, FAIL);
return 1;
case 0:
close(STDOUT_FILENO);
close(STDERR_FILENO);
prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
smash_stack();
return 1;
default:
pid = waitpid(pid, &status, 0);
if (pid == -1 || !WIFSIGNALED(status) || WTERMSIG(status) != SIGABRT) {
llen += printf("waitpid()");
result(llen, FAIL);
return 1;
}
result(llen, OK);
return 0;
}
}
/* prepare what needs to be prepared for pid 1 (stdio, /dev, /proc, etc) */
int prepare(void)
{
struct stat stat_buf;
/* It's possible that /dev doesn't even exist or was not mounted, so
* we'll try to create it, mount it, or create minimal entries into it.
* We want at least /dev/null and /dev/console.
*/
if (stat("/dev/.", &stat_buf) == 0 || mkdir("/dev", 0755) == 0) {
if (stat("/dev/console", &stat_buf) != 0 ||
stat("/dev/null", &stat_buf) != 0 ||
stat("/dev/zero", &stat_buf) != 0) {
/* try devtmpfs first, otherwise fall back to manual creation */
if (mount("/dev", "/dev", "devtmpfs", 0, 0) != 0) {
mknod("/dev/console", 0600 | S_IFCHR, makedev(5, 1));
mknod("/dev/null", 0666 | S_IFCHR, makedev(1, 3));
mknod("/dev/zero", 0666 | S_IFCHR, makedev(1, 5));
}
}
}
/* If no /dev/console was found before calling init, stdio is closed so
* we need to reopen it from /dev/console. If it failed above, it will
* still fail here and we cannot emit a message anyway.
*/
if (close(dup(1)) == -1) {
int fd = open("/dev/console", O_RDWR);
if (fd >= 0) {
if (fd != 0)
dup2(fd, 0);
if (fd != 1)
dup2(fd, 1);
if (fd != 2)
dup2(fd, 2);
if (fd > 2)
close(fd);
puts("\nSuccessfully reopened /dev/console.");
}
}
/* try to mount /proc if not mounted. Silently fail otherwise */
if (stat("/proc/.", &stat_buf) == 0 || mkdir("/proc", 0755) == 0) {
if (stat("/proc/self", &stat_buf) != 0) {
/* If not mountable, remove /proc completely to avoid misuse */
if (mount("none", "/proc", "proc", 0, 0) != 0)
rmdir("/proc");
}
}
/* some tests rely on a writable /tmp */
mkdir("/tmp", 0755);
return 0;
}
/* This is the definition of known test names, with their functions */
static const struct test test_names[] = {
/* add new tests here */
{ .name = "startup", .func = run_startup },
{ .name = "syscall", .func = run_syscall },
{ .name = "stdlib", .func = run_stdlib },
{ .name = "vfprintf", .func = run_vfprintf },
{ .name = "protection", .func = run_protection },
{ 0 }
};
static int is_setting_valid(char *test)
{
int idx, len, test_len, valid = 0;
char delimiter;
if (!test)
return valid;
test_len = strlen(test);
for (idx = 0; test_names[idx].name; idx++) {
len = strlen(test_names[idx].name);
if (test_len < len)
continue;
if (strncmp(test, test_names[idx].name, len) != 0)
continue;
delimiter = test[len];
if (delimiter != ':' && delimiter != ',' && delimiter != '\0')
continue;
valid = 1;
break;
}
return valid;
}
int main(int argc, char **argv, char **envp)
{
int min = 0;
int max = INT_MAX;
int ret = 0;
int err;
int idx;
char *test;
argv0 = argv[0];
test_argc = argc;
test_argv = argv;
test_envp = envp;
/* when called as init, it's possible that no console was opened, for
* example if no /dev file system was provided. We'll check that fd#1
* was opened, and if not we'll attempt to create and open /dev/console
* and /dev/null that we'll use for later tests.
*/
if (getpid() == 1)
prepare();
/* the definition of a series of tests comes from either argv[1] or the
* "NOLIBC_TEST" environment variable. It's made of a comma-delimited
* series of test names and optional ranges:
* syscall:5-15[:.*],stdlib:8-10
*/
test = argv[1];
if (!is_setting_valid(test))
test = getenv("NOLIBC_TEST");
if (is_setting_valid(test)) {
char *comma, *colon, *dash, *value;
do {
comma = strchr(test, ',');
if (comma)
*(comma++) = '\0';
colon = strchr(test, ':');
if (colon)
*(colon++) = '\0';
for (idx = 0; test_names[idx].name; idx++) {
if (strcmp(test, test_names[idx].name) == 0)
break;
}
if (test_names[idx].name) {
/* The test was named, it will be called at least
* once. We may have an optional range at <colon>
* here, which defaults to the full range.
*/
do {
min = 0; max = INT_MAX;
value = colon;
if (value && *value) {
colon = strchr(value, ':');
if (colon)
*(colon++) = '\0';
dash = strchr(value, '-');
if (dash)
*(dash++) = '\0';
/* support :val: :min-max: :min-: :-max: */
if (*value)
min = atoi(value);
if (!dash)
max = min;
else if (*dash)
max = atoi(dash);
value = colon;
}
/* now's time to call the test */
printf("Running test '%s'\n", test_names[idx].name);
err = test_names[idx].func(min, max);
ret += err;
printf("Errors during this test: %d\n\n", err);
} while (colon && *colon);
} else
printf("Ignoring unknown test name '%s'\n", test);
test = comma;
} while (test && *test);
} else {
/* no test mentioned, run everything */
for (idx = 0; test_names[idx].name; idx++) {
printf("Running test '%s'\n", test_names[idx].name);
err = test_names[idx].func(min, max);
ret += err;
printf("Errors during this test: %d\n\n", err);
}
}
printf("Total number of errors: %d\n", ret);
if (getpid() == 1) {
/* we're running as init, there's no other process on the
* system, thus likely started from a VM for a quick check.
* Exiting will provoke a kernel panic that may be reported
* as an error by Qemu or the hypervisor, while stopping
* cleanly will often be reported as a success. This allows
* to use the output of this program for bisecting kernels.
*/
printf("Leaving init with final status: %d\n", !!ret);
if (ret == 0)
reboot(RB_POWER_OFF);
#if defined(__x86_64__)
/* QEMU started with "-device isa-debug-exit -no-reboot" will
* exit with status code 2N+1 when N is written to 0x501. We
* hard-code the syscall here as it's arch-dependent.
*/
else if (syscall(__NR_ioperm, 0x501, 1, 1) == 0)
__asm__ volatile ("outb %%al, %%dx" :: "d"(0x501), "a"(0));
/* if it does nothing, fall back to the regular panic */
#endif
}
printf("Exiting with status %d\n", !!ret);
return !!ret;
}
| linux-master | tools/testing/selftests/nolibc/nolibc-test.c |
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/types.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/wait.h>
#include "../kselftest_harness.h"
#include "../pidfd/pidfd.h"
/*
* Regression test for:
* 35f71bc0a09a ("fork: report pid reservation failure properly")
* b26ebfe12f34 ("pid: Fix error return value in some cases")
*/
TEST(regression_enomem)
{
pid_t pid;
if (geteuid())
EXPECT_EQ(0, unshare(CLONE_NEWUSER));
EXPECT_EQ(0, unshare(CLONE_NEWPID));
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(EXIT_SUCCESS);
EXPECT_EQ(0, wait_for_pid(pid));
pid = fork();
ASSERT_LT(pid, 0);
ASSERT_EQ(errno, ENOMEM);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/pid_namespace/regression_enomem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Use the core scheduling prctl() to test core scheduling cookies control.
*
* Copyright (c) 2021 Oracle and/or its affiliates.
* Author: Chris Hyser <[email protected]>
*
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License as
* published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, see <http://www.gnu.org/licenses>.
*/
#define _GNU_SOURCE
#include <sys/eventfd.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sched.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if __GLIBC_PREREQ(2, 30) == 0
#include <sys/syscall.h>
static pid_t gettid(void)
{
return syscall(SYS_gettid);
}
#endif
#ifndef PR_SCHED_CORE
#define PR_SCHED_CORE 62
# define PR_SCHED_CORE_GET 0
# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
# define PR_SCHED_CORE_MAX 4
#endif
#define MAX_PROCESSES 128
#define MAX_THREADS 128
static const char USAGE[] = "cs_prctl_test [options]\n"
" options:\n"
" -P : number of processes to create.\n"
" -T : number of threads per process to create.\n"
" -d : delay time to keep tasks alive.\n"
" -k : keep tasks alive until keypress.\n";
enum pid_type {PIDTYPE_PID = 0, PIDTYPE_TGID, PIDTYPE_PGID};
const int THREAD_CLONE_FLAGS = CLONE_THREAD | CLONE_SIGHAND | CLONE_FS | CLONE_VM | CLONE_FILES;
struct child_args {
int num_threads;
int pfd[2];
int cpid;
int thr_tids[MAX_THREADS];
};
static struct child_args procs[MAX_PROCESSES];
static int num_processes = 2;
static int need_cleanup = 0;
static int _prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4,
unsigned long arg5)
{
int res;
res = prctl(option, arg2, arg3, arg4, arg5);
printf("%d = prctl(%d, %ld, %ld, %ld, %lx)\n", res, option, (long)arg2, (long)arg3,
(long)arg4, arg5);
return res;
}
#define STACK_SIZE (1024 * 1024)
#define handle_error(msg) __handle_error(__FILE__, __LINE__, msg)
static void __handle_error(char *fn, int ln, char *msg)
{
int pidx;
printf("(%s:%d) - ", fn, ln);
perror(msg);
if (need_cleanup) {
for (pidx = 0; pidx < num_processes; ++pidx)
kill(procs[pidx].cpid, 15);
need_cleanup = 0;
}
exit(EXIT_FAILURE);
}
static void handle_usage(int rc, char *msg)
{
puts(USAGE);
puts(msg);
putchar('\n');
exit(rc);
}
static unsigned long get_cs_cookie(int pid)
{
unsigned long long cookie;
int ret;
ret = prctl(PR_SCHED_CORE, PR_SCHED_CORE_GET, pid, PIDTYPE_PID,
(unsigned long)&cookie);
if (ret) {
printf("Not a core sched system\n");
return -1UL;
}
return cookie;
}
static int child_func_thread(void __attribute__((unused))*arg)
{
while (1)
usleep(20000);
return 0;
}
static void create_threads(int num_threads, int thr_tids[])
{
void *child_stack;
pid_t tid;
int i;
for (i = 0; i < num_threads; ++i) {
child_stack = malloc(STACK_SIZE);
if (!child_stack)
handle_error("child stack allocate");
tid = clone(child_func_thread, child_stack + STACK_SIZE, THREAD_CLONE_FLAGS, NULL);
if (tid == -1)
handle_error("clone thread");
thr_tids[i] = tid;
}
}
static int child_func_process(void *arg)
{
struct child_args *ca = (struct child_args *)arg;
int ret;
close(ca->pfd[0]);
create_threads(ca->num_threads, ca->thr_tids);
ret = write(ca->pfd[1], &ca->thr_tids, sizeof(int) * ca->num_threads);
if (ret == -1)
printf("write failed on pfd[%d] - error (%s)\n",
ca->pfd[1], strerror(errno));
close(ca->pfd[1]);
while (1)
usleep(20000);
return 0;
}
static unsigned char child_func_process_stack[STACK_SIZE];
void create_processes(int num_processes, int num_threads, struct child_args proc[])
{
pid_t cpid;
int i, ret;
for (i = 0; i < num_processes; ++i) {
proc[i].num_threads = num_threads;
if (pipe(proc[i].pfd) == -1)
handle_error("pipe() failed");
cpid = clone(child_func_process, child_func_process_stack + STACK_SIZE,
SIGCHLD, &proc[i]);
proc[i].cpid = cpid;
close(proc[i].pfd[1]);
}
for (i = 0; i < num_processes; ++i) {
ret = read(proc[i].pfd[0], &proc[i].thr_tids, sizeof(int) * proc[i].num_threads);
if (ret == -1)
printf("read failed on proc[%d].pfd[0] error (%s)\n",
i, strerror(errno));
close(proc[i].pfd[0]);
}
}
void disp_processes(int num_processes, struct child_args proc[])
{
int i, j;
printf("tid=%d, / tgid=%d / pgid=%d: %lx\n", gettid(), getpid(), getpgid(0),
get_cs_cookie(getpid()));
for (i = 0; i < num_processes; ++i) {
printf(" tid=%d, / tgid=%d / pgid=%d: %lx\n", proc[i].cpid, proc[i].cpid,
getpgid(proc[i].cpid), get_cs_cookie(proc[i].cpid));
for (j = 0; j < proc[i].num_threads; ++j) {
printf(" tid=%d, / tgid=%d / pgid=%d: %lx\n", proc[i].thr_tids[j],
proc[i].cpid, getpgid(0), get_cs_cookie(proc[i].thr_tids[j]));
}
}
puts("\n");
}
static int errors;
#define validate(v) _validate(__LINE__, v, #v)
void _validate(int line, int val, char *msg)
{
if (!val) {
++errors;
printf("(%d) FAILED: %s\n", line, msg);
} else {
printf("(%d) PASSED: %s\n", line, msg);
}
}
int main(int argc, char *argv[])
{
int keypress = 0;
int num_threads = 3;
int delay = 0;
int res = 0;
int pidx;
int pid;
int opt;
while ((opt = getopt(argc, argv, ":hkT:P:d:")) != -1) {
switch (opt) {
case 'P':
num_processes = (int)strtol(optarg, NULL, 10);
break;
case 'T':
num_threads = (int)strtoul(optarg, NULL, 10);
break;
case 'd':
delay = (int)strtol(optarg, NULL, 10);
break;
case 'k':
keypress = 1;
break;
case 'h':
printf(USAGE);
exit(EXIT_SUCCESS);
default:
handle_usage(20, "unknown option");
}
}
if (num_processes < 1 || num_processes > MAX_PROCESSES)
handle_usage(1, "Bad processes value");
if (num_threads < 1 || num_threads > MAX_THREADS)
handle_usage(2, "Bad thread value");
if (keypress)
delay = -1;
srand(time(NULL));
/* put into separate process group */
if (setpgid(0, 0) != 0)
handle_error("process group");
printf("\n## Create a thread/process/process group hiearchy\n");
create_processes(num_processes, num_threads, procs);
need_cleanup = 1;
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) == 0);
printf("\n## Set a cookie on entire process group\n");
if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, 0, PIDTYPE_PGID, 0) < 0)
handle_error("core_sched create failed -- PGID");
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) != 0);
/* get a random process pid */
pidx = rand() % num_processes;
pid = procs[pidx].cpid;
validate(get_cs_cookie(0) == get_cs_cookie(pid));
validate(get_cs_cookie(0) == get_cs_cookie(procs[pidx].thr_tids[0]));
printf("\n## Set a new cookie on entire process/TGID [%d]\n", pid);
if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, pid, PIDTYPE_TGID, 0) < 0)
handle_error("core_sched create failed -- TGID");
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) != get_cs_cookie(pid));
validate(get_cs_cookie(pid) != 0);
validate(get_cs_cookie(pid) == get_cs_cookie(procs[pidx].thr_tids[0]));
printf("\n## Copy the cookie of current/PGID[%d], to pid [%d] as PIDTYPE_PID\n",
getpid(), pid);
if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_TO, pid, PIDTYPE_PID, 0) < 0)
handle_error("core_sched share to itself failed -- PID");
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) == get_cs_cookie(pid));
validate(get_cs_cookie(pid) != 0);
validate(get_cs_cookie(pid) != get_cs_cookie(procs[pidx].thr_tids[0]));
printf("\n## Copy cookie from a thread [%d] to current/PGID [%d] as PIDTYPE_PID\n",
procs[pidx].thr_tids[0], getpid());
if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_FROM, procs[pidx].thr_tids[0],
PIDTYPE_PID, 0) < 0)
handle_error("core_sched share from thread failed -- PID");
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) == get_cs_cookie(procs[pidx].thr_tids[0]));
validate(get_cs_cookie(pid) != get_cs_cookie(procs[pidx].thr_tids[0]));
printf("\n## Copy cookie from current [%d] to current as pidtype PGID\n", getpid());
if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_TO, 0, PIDTYPE_PGID, 0) < 0)
handle_error("core_sched share to self failed -- PGID");
disp_processes(num_processes, procs);
validate(get_cs_cookie(0) == get_cs_cookie(pid));
validate(get_cs_cookie(pid) != 0);
validate(get_cs_cookie(pid) == get_cs_cookie(procs[pidx].thr_tids[0]));
validate(_prctl(PR_SCHED_CORE, PR_SCHED_CORE_MAX, 0, PIDTYPE_PGID, 0) < 0
&& errno == EINVAL);
validate(_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_TO, 0, PIDTYPE_PGID, 1) < 0
&& errno == EINVAL);
if (errors) {
printf("TESTS FAILED. errors: %d\n", errors);
res = 10;
} else {
printf("SUCCESS !!!\n");
}
if (keypress)
getchar();
else
sleep(delay);
for (pidx = 0; pidx < num_processes; ++pidx)
kill(procs[pidx].cpid, 15);
return res;
}
| linux-master | tools/testing/selftests/sched/cs_prctl_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#define PORT 12345
#define RUNTIME 10
static struct {
unsigned int timeout;
unsigned int port;
} opts = {
.timeout = RUNTIME,
.port = PORT,
};
static void handler(int sig)
{
_exit(sig == SIGALRM ? 0 : 1);
}
static void set_timeout(void)
{
struct sigaction action = {
.sa_handler = handler,
};
sigaction(SIGALRM, &action, NULL);
alarm(opts.timeout);
}
static void do_connect(const struct sockaddr_in *dst)
{
int s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (s >= 0)
fcntl(s, F_SETFL, O_NONBLOCK);
connect(s, (struct sockaddr *)dst, sizeof(*dst));
close(s);
}
static void do_accept(const struct sockaddr_in *src)
{
int c, one = 1, s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (s < 0)
return;
setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
bind(s, (struct sockaddr *)src, sizeof(*src));
listen(s, 16);
c = accept(s, NULL, NULL);
if (c >= 0)
close(c);
close(s);
}
static int accept_loop(void)
{
struct sockaddr_in src = {
.sin_family = AF_INET,
.sin_port = htons(opts.port),
};
inet_pton(AF_INET, "127.0.0.1", &src.sin_addr);
set_timeout();
for (;;)
do_accept(&src);
return 1;
}
static int connect_loop(void)
{
struct sockaddr_in dst = {
.sin_family = AF_INET,
.sin_port = htons(opts.port),
};
inet_pton(AF_INET, "127.0.0.1", &dst.sin_addr);
set_timeout();
for (;;)
do_connect(&dst);
return 1;
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "t:p:")) != -1) {
switch (c) {
case 't':
opts.timeout = atoi(optarg);
break;
case 'p':
opts.port = atoi(optarg);
break;
}
}
}
int main(int argc, char *argv[])
{
pid_t p;
parse_opts(argc, argv);
p = fork();
if (p < 0)
return 111;
if (p > 0)
return accept_loop();
return connect_loop();
}
| linux-master | tools/testing/selftests/netfilter/connect_close.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include <linux/audit.h>
#include <linux/netlink.h>
static int fd;
#define MAX_AUDIT_MESSAGE_LENGTH 8970
struct audit_message {
struct nlmsghdr nlh;
union {
struct audit_status s;
char data[MAX_AUDIT_MESSAGE_LENGTH];
} u;
};
int audit_recv(int fd, struct audit_message *rep)
{
struct sockaddr_nl addr;
socklen_t addrlen = sizeof(addr);
int ret;
do {
ret = recvfrom(fd, rep, sizeof(*rep), 0,
(struct sockaddr *)&addr, &addrlen);
} while (ret < 0 && errno == EINTR);
if (ret < 0 ||
addrlen != sizeof(addr) ||
addr.nl_pid != 0 ||
rep->nlh.nlmsg_type == NLMSG_ERROR) /* short-cut for now */
return -1;
return ret;
}
int audit_send(int fd, uint16_t type, uint32_t key, uint32_t val)
{
static int seq = 0;
struct audit_message msg = {
.nlh = {
.nlmsg_len = NLMSG_SPACE(sizeof(msg.u.s)),
.nlmsg_type = type,
.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
.nlmsg_seq = ++seq,
},
.u.s = {
.mask = key,
.enabled = key == AUDIT_STATUS_ENABLED ? val : 0,
.pid = key == AUDIT_STATUS_PID ? val : 0,
}
};
struct sockaddr_nl addr = {
.nl_family = AF_NETLINK,
};
int ret;
do {
ret = sendto(fd, &msg, msg.nlh.nlmsg_len, 0,
(struct sockaddr *)&addr, sizeof(addr));
} while (ret < 0 && errno == EINTR);
if (ret != (int)msg.nlh.nlmsg_len)
return -1;
return 0;
}
int audit_set(int fd, uint32_t key, uint32_t val)
{
struct audit_message rep = { 0 };
int ret;
ret = audit_send(fd, AUDIT_SET, key, val);
if (ret)
return ret;
ret = audit_recv(fd, &rep);
if (ret < 0)
return ret;
return 0;
}
int readlog(int fd)
{
struct audit_message rep = { 0 };
int ret = audit_recv(fd, &rep);
const char *sep = "";
char *k, *v;
if (ret < 0)
return ret;
if (rep.nlh.nlmsg_type != AUDIT_NETFILTER_CFG)
return 0;
/* skip the initial "audit(...): " part */
strtok(rep.u.data, " ");
while ((k = strtok(NULL, "="))) {
v = strtok(NULL, " ");
/* these vary and/or are uninteresting, ignore */
if (!strcmp(k, "pid") ||
!strcmp(k, "comm") ||
!strcmp(k, "subj"))
continue;
/* strip the varying sequence number */
if (!strcmp(k, "table"))
*strchrnul(v, ':') = '\0';
printf("%s%s=%s", sep, k, v);
sep = " ";
}
if (*sep) {
printf("\n");
fflush(stdout);
}
return 0;
}
void cleanup(int sig)
{
audit_set(fd, AUDIT_STATUS_ENABLED, 0);
close(fd);
if (sig)
exit(0);
}
int main(int argc, char **argv)
{
struct sigaction act = {
.sa_handler = cleanup,
};
fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_AUDIT);
if (fd < 0) {
perror("Can't open netlink socket");
return -1;
}
if (sigaction(SIGTERM, &act, NULL) < 0 ||
sigaction(SIGINT, &act, NULL) < 0) {
perror("Can't set signal handler");
close(fd);
return -1;
}
audit_set(fd, AUDIT_STATUS_ENABLED, 1);
audit_set(fd, AUDIT_STATUS_PID, getpid());
while (1)
readlog(fd);
}
| linux-master | tools/testing/selftests/netfilter/audit_logread.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <arpa/inet.h>
#include <libmnl/libmnl.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_queue.h>
struct options {
bool count_packets;
bool gso_enabled;
int verbose;
unsigned int queue_num;
unsigned int timeout;
uint32_t verdict;
uint32_t delay_ms;
};
static unsigned int queue_stats[5];
static struct options opts;
static void help(const char *p)
{
printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p);
}
static int parse_attr_cb(const struct nlattr *attr, void *data)
{
const struct nlattr **tb = data;
int type = mnl_attr_get_type(attr);
/* skip unsupported attribute in user-space */
if (mnl_attr_type_valid(attr, NFQA_MAX) < 0)
return MNL_CB_OK;
switch (type) {
case NFQA_MARK:
case NFQA_IFINDEX_INDEV:
case NFQA_IFINDEX_OUTDEV:
case NFQA_IFINDEX_PHYSINDEV:
case NFQA_IFINDEX_PHYSOUTDEV:
if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
perror("mnl_attr_validate");
return MNL_CB_ERROR;
}
break;
case NFQA_TIMESTAMP:
if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
sizeof(struct nfqnl_msg_packet_timestamp)) < 0) {
perror("mnl_attr_validate2");
return MNL_CB_ERROR;
}
break;
case NFQA_HWADDR:
if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
sizeof(struct nfqnl_msg_packet_hw)) < 0) {
perror("mnl_attr_validate2");
return MNL_CB_ERROR;
}
break;
case NFQA_PAYLOAD:
break;
}
tb[type] = attr;
return MNL_CB_OK;
}
static int queue_cb(const struct nlmsghdr *nlh, void *data)
{
struct nlattr *tb[NFQA_MAX+1] = { 0 };
struct nfqnl_msg_packet_hdr *ph = NULL;
uint32_t id = 0;
(void)data;
mnl_attr_parse(nlh, sizeof(struct nfgenmsg), parse_attr_cb, tb);
if (tb[NFQA_PACKET_HDR]) {
ph = mnl_attr_get_payload(tb[NFQA_PACKET_HDR]);
id = ntohl(ph->packet_id);
if (opts.verbose > 0)
printf("packet hook=%u, hwproto 0x%x",
ntohs(ph->hw_protocol), ph->hook);
if (ph->hook >= 5) {
fprintf(stderr, "Unknown hook %d\n", ph->hook);
return MNL_CB_ERROR;
}
if (opts.verbose > 0) {
uint32_t skbinfo = 0;
if (tb[NFQA_SKB_INFO])
skbinfo = ntohl(mnl_attr_get_u32(tb[NFQA_SKB_INFO]));
if (skbinfo & NFQA_SKB_CSUMNOTREADY)
printf(" csumnotready");
if (skbinfo & NFQA_SKB_GSO)
printf(" gso");
if (skbinfo & NFQA_SKB_CSUM_NOTVERIFIED)
printf(" csumnotverified");
puts("");
}
if (opts.count_packets)
queue_stats[ph->hook]++;
}
return MNL_CB_OK + id;
}
static struct nlmsghdr *
nfq_build_cfg_request(char *buf, uint8_t command, int queue_num)
{
struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
struct nfqnl_msg_config_cmd cmd = {
.command = command,
.pf = htons(AF_INET),
};
struct nfgenmsg *nfg;
nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
nlh->nlmsg_flags = NLM_F_REQUEST;
nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
nfg->nfgen_family = AF_UNSPEC;
nfg->version = NFNETLINK_V0;
nfg->res_id = htons(queue_num);
mnl_attr_put(nlh, NFQA_CFG_CMD, sizeof(cmd), &cmd);
return nlh;
}
static struct nlmsghdr *
nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num)
{
struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
struct nfqnl_msg_config_params params = {
.copy_range = htonl(range),
.copy_mode = mode,
};
struct nfgenmsg *nfg;
nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
nlh->nlmsg_flags = NLM_F_REQUEST;
nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
nfg->nfgen_family = AF_UNSPEC;
nfg->version = NFNETLINK_V0;
nfg->res_id = htons(queue_num);
mnl_attr_put(nlh, NFQA_CFG_PARAMS, sizeof(params), ¶ms);
return nlh;
}
static struct nlmsghdr *
nfq_build_verdict(char *buf, int id, int queue_num, uint32_t verd)
{
struct nfqnl_msg_verdict_hdr vh = {
.verdict = htonl(verd),
.id = htonl(id),
};
struct nlmsghdr *nlh;
struct nfgenmsg *nfg;
nlh = mnl_nlmsg_put_header(buf);
nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_VERDICT;
nlh->nlmsg_flags = NLM_F_REQUEST;
nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
nfg->nfgen_family = AF_UNSPEC;
nfg->version = NFNETLINK_V0;
nfg->res_id = htons(queue_num);
mnl_attr_put(nlh, NFQA_VERDICT_HDR, sizeof(vh), &vh);
return nlh;
}
static void print_stats(void)
{
unsigned int last, total;
int i;
total = 0;
last = queue_stats[0];
for (i = 0; i < 5; i++) {
printf("hook %d packets %08u\n", i, queue_stats[i]);
last = queue_stats[i];
total += last;
}
printf("%u packets total\n", total);
}
struct mnl_socket *open_queue(void)
{
char buf[MNL_SOCKET_BUFFER_SIZE];
unsigned int queue_num;
struct mnl_socket *nl;
struct nlmsghdr *nlh;
struct timeval tv;
uint32_t flags;
nl = mnl_socket_open(NETLINK_NETFILTER);
if (nl == NULL) {
perror("mnl_socket_open");
exit(EXIT_FAILURE);
}
if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
perror("mnl_socket_bind");
exit(EXIT_FAILURE);
}
queue_num = opts.queue_num;
nlh = nfq_build_cfg_request(buf, NFQNL_CFG_CMD_BIND, queue_num);
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
perror("mnl_socket_sendto");
exit(EXIT_FAILURE);
}
nlh = nfq_build_cfg_params(buf, NFQNL_COPY_PACKET, 0xFFFF, queue_num);
flags = opts.gso_enabled ? NFQA_CFG_F_GSO : 0;
flags |= NFQA_CFG_F_UID_GID;
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(flags));
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(flags));
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
perror("mnl_socket_sendto");
exit(EXIT_FAILURE);
}
memset(&tv, 0, sizeof(tv));
tv.tv_sec = opts.timeout;
if (opts.timeout && setsockopt(mnl_socket_get_fd(nl),
SOL_SOCKET, SO_RCVTIMEO,
&tv, sizeof(tv))) {
perror("setsockopt(SO_RCVTIMEO)");
exit(EXIT_FAILURE);
}
return nl;
}
static void sleep_ms(uint32_t delay)
{
struct timespec ts = { .tv_sec = delay / 1000 };
delay %= 1000;
ts.tv_nsec = delay * 1000llu * 1000llu;
nanosleep(&ts, NULL);
}
static int mainloop(void)
{
unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE;
struct mnl_socket *nl;
struct nlmsghdr *nlh;
unsigned int portid;
char *buf;
int ret;
buf = malloc(buflen);
if (!buf) {
perror("malloc");
exit(EXIT_FAILURE);
}
nl = open_queue();
portid = mnl_socket_get_portid(nl);
for (;;) {
uint32_t id;
ret = mnl_socket_recvfrom(nl, buf, buflen);
if (ret == -1) {
if (errno == ENOBUFS || errno == EINTR)
continue;
if (errno == EAGAIN) {
errno = 0;
ret = 0;
break;
}
perror("mnl_socket_recvfrom");
exit(EXIT_FAILURE);
}
ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL);
if (ret < 0) {
perror("mnl_cb_run");
exit(EXIT_FAILURE);
}
id = ret - MNL_CB_OK;
if (opts.delay_ms)
sleep_ms(opts.delay_ms);
nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict);
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
perror("mnl_socket_sendto");
exit(EXIT_FAILURE);
}
}
mnl_socket_close(nl);
return ret;
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "chvt:q:Q:d:G")) != -1) {
switch (c) {
case 'c':
opts.count_packets = true;
break;
case 'h':
help(argv[0]);
exit(0);
break;
case 'q':
opts.queue_num = atoi(optarg);
if (opts.queue_num > 0xffff)
opts.queue_num = 0;
break;
case 'Q':
opts.verdict = atoi(optarg);
if (opts.verdict > 0xffff) {
fprintf(stderr, "Expected destination queue number\n");
exit(1);
}
opts.verdict <<= 16;
opts.verdict |= NF_QUEUE;
break;
case 'd':
opts.delay_ms = atoi(optarg);
if (opts.delay_ms == 0) {
fprintf(stderr, "Expected nonzero delay (in milliseconds)\n");
exit(1);
}
break;
case 't':
opts.timeout = atoi(optarg);
break;
case 'G':
opts.gso_enabled = false;
break;
case 'v':
opts.verbose++;
break;
}
}
if (opts.verdict != NF_ACCEPT && (opts.verdict >> 16 == opts.queue_num)) {
fprintf(stderr, "Cannot use same destination and source queue\n");
exit(1);
}
}
int main(int argc, char *argv[])
{
int ret;
opts.verdict = NF_ACCEPT;
opts.gso_enabled = true;
parse_opts(argc, argv);
ret = mainloop();
if (opts.count_packets)
print_stats();
return ret;
}
| linux-master | tools/testing/selftests/netfilter/nf-queue.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/limits.h>
#include <sys/sysinfo.h>
#include <sys/wait.h>
#include <errno.h>
#include <pthread.h>
#include <stdio.h>
#include <time.h>
#include "../kselftest.h"
#include "cgroup_util.h"
enum hog_clock_type {
// Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
CPU_HOG_CLOCK_PROCESS,
// Count elapsed time using system wallclock time.
CPU_HOG_CLOCK_WALL,
};
struct cpu_hogger {
char *cgroup;
pid_t pid;
long usage;
};
struct cpu_hog_func_param {
int nprocs;
struct timespec ts;
enum hog_clock_type clock_type;
};
/*
* This test creates two nested cgroups with and without enabling
* the cpu controller.
*/
static int test_cpucg_subtree_control(const char *root)
{
char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
int ret = KSFT_FAIL;
// Create two nested cgroups with the cpu controller enabled.
parent = cg_name(root, "cpucg_test_0");
if (!parent)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
goto cleanup;
child = cg_name(parent, "cpucg_test_child");
if (!child)
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
goto cleanup;
// Create two nested cgroups without enabling the cpu controller.
parent2 = cg_name(root, "cpucg_test_1");
if (!parent2)
goto cleanup;
if (cg_create(parent2))
goto cleanup;
child2 = cg_name(parent2, "cpucg_test_child");
if (!child2)
goto cleanup;
if (cg_create(child2))
goto cleanup;
if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(child);
free(child);
cg_destroy(child2);
free(child2);
cg_destroy(parent);
free(parent);
cg_destroy(parent2);
free(parent2);
return ret;
}
static void *hog_cpu_thread_func(void *arg)
{
while (1)
;
return NULL;
}
static struct timespec
timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
{
struct timespec zero = {
.tv_sec = 0,
.tv_nsec = 0,
};
struct timespec ret;
if (lhs->tv_sec < rhs->tv_sec)
return zero;
ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
if (lhs->tv_nsec < rhs->tv_nsec) {
if (ret.tv_sec == 0)
return zero;
ret.tv_sec--;
ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
} else
ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
return ret;
}
static int hog_cpus_timed(const char *cgroup, void *arg)
{
const struct cpu_hog_func_param *param =
(struct cpu_hog_func_param *)arg;
struct timespec ts_run = param->ts;
struct timespec ts_remaining = ts_run;
struct timespec ts_start;
int i, ret;
ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
if (ret != 0)
return ret;
for (i = 0; i < param->nprocs; i++) {
pthread_t tid;
ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
if (ret != 0)
return ret;
}
while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
struct timespec ts_total;
ret = nanosleep(&ts_remaining, NULL);
if (ret && errno != EINTR)
return ret;
if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
if (ret != 0)
return ret;
} else {
struct timespec ts_current;
ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
if (ret != 0)
return ret;
ts_total = timespec_sub(&ts_current, &ts_start);
}
ts_remaining = timespec_sub(&ts_run, &ts_total);
}
return 0;
}
/*
* Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
* cpu.stat shows the expected output.
*/
static int test_cpucg_stats(const char *root)
{
int ret = KSFT_FAIL;
long usage_usec, user_usec, system_usec;
long usage_seconds = 2;
long expected_usage_usec = usage_seconds * USEC_PER_SEC;
char *cpucg;
cpucg = cg_name(root, "cpucg_test");
if (!cpucg)
goto cleanup;
if (cg_create(cpucg))
goto cleanup;
usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
goto cleanup;
struct cpu_hog_func_param param = {
.nprocs = 1,
.ts = {
.tv_sec = usage_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_PROCESS,
};
if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
goto cleanup;
usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
if (user_usec <= 0)
goto cleanup;
if (!values_close(usage_usec, expected_usage_usec, 1))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(cpucg);
free(cpucg);
return ret;
}
static int
run_cpucg_weight_test(
const char *root,
pid_t (*spawn_child)(const struct cpu_hogger *child),
int (*validate)(const struct cpu_hogger *children, int num_children))
{
int ret = KSFT_FAIL, i;
char *parent = NULL;
struct cpu_hogger children[3] = {NULL};
parent = cg_name(root, "cpucg_test_0");
if (!parent)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(children); i++) {
children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
if (!children[i].cgroup)
goto cleanup;
if (cg_create(children[i].cgroup))
goto cleanup;
if (cg_write_numeric(children[i].cgroup, "cpu.weight",
50 * (i + 1)))
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(children); i++) {
pid_t pid = spawn_child(&children[i]);
if (pid <= 0)
goto cleanup;
children[i].pid = pid;
}
for (i = 0; i < ARRAY_SIZE(children); i++) {
int retcode;
waitpid(children[i].pid, &retcode, 0);
if (!WIFEXITED(retcode))
goto cleanup;
if (WEXITSTATUS(retcode))
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(children); i++)
children[i].usage = cg_read_key_long(children[i].cgroup,
"cpu.stat", "usage_usec");
if (validate(children, ARRAY_SIZE(children)))
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (i = 0; i < ARRAY_SIZE(children); i++) {
cg_destroy(children[i].cgroup);
free(children[i].cgroup);
}
cg_destroy(parent);
free(parent);
return ret;
}
static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
{
long usage_seconds = 10;
struct cpu_hog_func_param param = {
.nprocs = ncpus,
.ts = {
.tv_sec = usage_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
};
return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)¶m);
}
static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
{
return weight_hog_ncpus(child, get_nprocs());
}
static int
overprovision_validate(const struct cpu_hogger *children, int num_children)
{
int ret = KSFT_FAIL, i;
for (i = 0; i < num_children - 1; i++) {
long delta;
if (children[i + 1].usage <= children[i].usage)
goto cleanup;
delta = children[i + 1].usage - children[i].usage;
if (!values_close(delta, children[0].usage, 35))
goto cleanup;
}
ret = KSFT_PASS;
cleanup:
return ret;
}
/*
* First, this test creates the following hierarchy:
* A
* A/B cpu.weight = 50
* A/C cpu.weight = 100
* A/D cpu.weight = 150
*
* A separate process is then created for each child cgroup which spawns as
* many threads as there are cores, and hogs each CPU as much as possible
* for some time interval.
*
* Once all of the children have exited, we verify that each child cgroup
* was given proportional runtime as informed by their cpu.weight.
*/
static int test_cpucg_weight_overprovisioned(const char *root)
{
return run_cpucg_weight_test(root, weight_hog_all_cpus,
overprovision_validate);
}
static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
{
return weight_hog_ncpus(child, 1);
}
static int
underprovision_validate(const struct cpu_hogger *children, int num_children)
{
int ret = KSFT_FAIL, i;
for (i = 0; i < num_children - 1; i++) {
if (!values_close(children[i + 1].usage, children[0].usage, 15))
goto cleanup;
}
ret = KSFT_PASS;
cleanup:
return ret;
}
/*
* First, this test creates the following hierarchy:
* A
* A/B cpu.weight = 50
* A/C cpu.weight = 100
* A/D cpu.weight = 150
*
* A separate process is then created for each child cgroup which spawns a
* single thread that hogs a CPU. The testcase is only run on systems that
* have at least one core per-thread in the child processes.
*
* Once all of the children have exited, we verify that each child cgroup
* had roughly the same runtime despite having different cpu.weight.
*/
static int test_cpucg_weight_underprovisioned(const char *root)
{
// Only run the test if there are enough cores to avoid overprovisioning
// the system.
if (get_nprocs() < 4)
return KSFT_SKIP;
return run_cpucg_weight_test(root, weight_hog_one_cpu,
underprovision_validate);
}
static int
run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
{
int ret = KSFT_FAIL, i;
char *parent = NULL, *child = NULL;
struct cpu_hogger leaf[3] = {NULL};
long nested_leaf_usage, child_usage;
int nprocs = get_nprocs();
if (!overprovisioned) {
if (nprocs < 4)
/*
* Only run the test if there are enough cores to avoid overprovisioning
* the system.
*/
return KSFT_SKIP;
nprocs /= 4;
}
parent = cg_name(root, "cpucg_test");
child = cg_name(parent, "cpucg_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(child, "cgroup.subtree_control", "+cpu"))
goto cleanup;
if (cg_write(child, "cpu.weight", "1000"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(leaf); i++) {
const char *ancestor;
long weight;
if (i == 0) {
ancestor = parent;
weight = 1000;
} else {
ancestor = child;
weight = 5000;
}
leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
if (!leaf[i].cgroup)
goto cleanup;
if (cg_create(leaf[i].cgroup))
goto cleanup;
if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(leaf); i++) {
pid_t pid;
struct cpu_hog_func_param param = {
.nprocs = nprocs,
.ts = {
.tv_sec = 10,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
};
pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
(void *)¶m);
if (pid <= 0)
goto cleanup;
leaf[i].pid = pid;
}
for (i = 0; i < ARRAY_SIZE(leaf); i++) {
int retcode;
waitpid(leaf[i].pid, &retcode, 0);
if (!WIFEXITED(retcode))
goto cleanup;
if (WEXITSTATUS(retcode))
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(leaf); i++) {
leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
"cpu.stat", "usage_usec");
if (leaf[i].usage <= 0)
goto cleanup;
}
nested_leaf_usage = leaf[1].usage + leaf[2].usage;
if (overprovisioned) {
if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
goto cleanup;
} else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
goto cleanup;
child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
if (child_usage <= 0)
goto cleanup;
if (!values_close(child_usage, nested_leaf_usage, 1))
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (i = 0; i < ARRAY_SIZE(leaf); i++) {
cg_destroy(leaf[i].cgroup);
free(leaf[i].cgroup);
}
cg_destroy(child);
free(child);
cg_destroy(parent);
free(parent);
return ret;
}
/*
* First, this test creates the following hierarchy:
* A
* A/B cpu.weight = 1000
* A/C cpu.weight = 1000
* A/C/D cpu.weight = 5000
* A/C/E cpu.weight = 5000
*
* A separate process is then created for each leaf, which spawn nproc threads
* that burn a CPU for a few seconds.
*
* Once all of those processes have exited, we verify that each of the leaf
* cgroups have roughly the same usage from cpu.stat.
*/
static int
test_cpucg_nested_weight_overprovisioned(const char *root)
{
return run_cpucg_nested_weight_test(root, true);
}
/*
* First, this test creates the following hierarchy:
* A
* A/B cpu.weight = 1000
* A/C cpu.weight = 1000
* A/C/D cpu.weight = 5000
* A/C/E cpu.weight = 5000
*
* A separate process is then created for each leaf, which nproc / 4 threads
* that burns a CPU for a few seconds.
*
* Once all of those processes have exited, we verify that each of the leaf
* cgroups have roughly the same usage from cpu.stat.
*/
static int
test_cpucg_nested_weight_underprovisioned(const char *root)
{
return run_cpucg_nested_weight_test(root, false);
}
/*
* This test creates a cgroup with some maximum value within a period, and
* verifies that a process in the cgroup is not overscheduled.
*/
static int test_cpucg_max(const char *root)
{
int ret = KSFT_FAIL;
long usage_usec, user_usec;
long usage_seconds = 1;
long expected_usage_usec = usage_seconds * USEC_PER_SEC;
char *cpucg;
cpucg = cg_name(root, "cpucg_test");
if (!cpucg)
goto cleanup;
if (cg_create(cpucg))
goto cleanup;
if (cg_write(cpucg, "cpu.max", "1000"))
goto cleanup;
struct cpu_hog_func_param param = {
.nprocs = 1,
.ts = {
.tv_sec = usage_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
};
if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
goto cleanup;
usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
if (user_usec <= 0)
goto cleanup;
if (user_usec >= expected_usage_usec)
goto cleanup;
if (values_close(usage_usec, expected_usage_usec, 95))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(cpucg);
free(cpucg);
return ret;
}
/*
* This test verifies that a process inside of a nested cgroup whose parent
* group has a cpu.max value set, is properly throttled.
*/
static int test_cpucg_max_nested(const char *root)
{
int ret = KSFT_FAIL;
long usage_usec, user_usec;
long usage_seconds = 1;
long expected_usage_usec = usage_seconds * USEC_PER_SEC;
char *parent, *child;
parent = cg_name(root, "cpucg_parent");
child = cg_name(parent, "cpucg_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cpu.max", "1000"))
goto cleanup;
struct cpu_hog_func_param param = {
.nprocs = 1,
.ts = {
.tv_sec = usage_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
};
if (cg_run(child, hog_cpus_timed, (void *)¶m))
goto cleanup;
usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
if (user_usec <= 0)
goto cleanup;
if (user_usec >= expected_usage_usec)
goto cleanup;
if (values_close(usage_usec, expected_usage_usec, 95))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(child);
free(child);
cg_destroy(parent);
free(parent);
return ret;
}
#define T(x) { x, #x }
struct cpucg_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_cpucg_subtree_control),
T(test_cpucg_stats),
T(test_cpucg_weight_overprovisioned),
T(test_cpucg_weight_underprovisioned),
T(test_cpucg_nested_weight_overprovisioned),
T(test_cpucg_nested_weight_underprovisioned),
T(test_cpucg_max),
T(test_cpucg_max_nested),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
if (cg_write(root, "cgroup.subtree_control", "+cpu"))
ksft_exit_skip("Failed to set cpu controller\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_cpu.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <stdbool.h>
#include <linux/limits.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
#include "../kselftest.h"
#include "cgroup_util.h"
#define DEBUG
#ifdef DEBUG
#define debug(args...) fprintf(stderr, args)
#else
#define debug(args...)
#endif
/*
* Check if the cgroup is frozen by looking at the cgroup.events::frozen value.
*/
static int cg_check_frozen(const char *cgroup, bool frozen)
{
if (frozen) {
if (cg_read_strstr(cgroup, "cgroup.events", "frozen 1") != 0) {
debug("Cgroup %s isn't frozen\n", cgroup);
return -1;
}
} else {
/*
* Check the cgroup.events::frozen value.
*/
if (cg_read_strstr(cgroup, "cgroup.events", "frozen 0") != 0) {
debug("Cgroup %s is frozen\n", cgroup);
return -1;
}
}
return 0;
}
/*
* Freeze the given cgroup.
*/
static int cg_freeze_nowait(const char *cgroup, bool freeze)
{
return cg_write(cgroup, "cgroup.freeze", freeze ? "1" : "0");
}
/*
* Attach a task to the given cgroup and wait for a cgroup frozen event.
* All transient events (e.g. populated) are ignored.
*/
static int cg_enter_and_wait_for_frozen(const char *cgroup, int pid,
bool frozen)
{
int fd, ret = -1;
int attempts;
fd = cg_prepare_for_wait(cgroup);
if (fd < 0)
return fd;
ret = cg_enter(cgroup, pid);
if (ret)
goto out;
for (attempts = 0; attempts < 10; attempts++) {
ret = cg_wait_for(fd);
if (ret)
break;
ret = cg_check_frozen(cgroup, frozen);
if (ret)
continue;
}
out:
close(fd);
return ret;
}
/*
* Freeze the given cgroup and wait for the inotify signal.
* If there are no events in 10 seconds, treat this as an error.
* Then check that the cgroup is in the desired state.
*/
static int cg_freeze_wait(const char *cgroup, bool freeze)
{
int fd, ret = -1;
fd = cg_prepare_for_wait(cgroup);
if (fd < 0)
return fd;
ret = cg_freeze_nowait(cgroup, freeze);
if (ret) {
debug("Error: cg_freeze_nowait() failed\n");
goto out;
}
ret = cg_wait_for(fd);
if (ret)
goto out;
ret = cg_check_frozen(cgroup, freeze);
out:
close(fd);
return ret;
}
/*
* A simple process running in a sleep loop until being
* re-parented.
*/
static int child_fn(const char *cgroup, void *arg)
{
int ppid = getppid();
while (getppid() == ppid)
usleep(1000);
return getppid() == ppid;
}
/*
* A simple test for the cgroup freezer: populated the cgroup with 100
* running processes and freeze it. Then unfreeze it. Then it kills all
* processes and destroys the cgroup.
*/
static int test_cgfreezer_simple(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup = NULL;
int i;
cgroup = cg_name(root, "cg_test_simple");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
for (i = 0; i < 100; i++)
cg_run_nowait(cgroup, child_fn, NULL);
if (cg_wait_for_proc_count(cgroup, 100))
goto cleanup;
if (cg_check_frozen(cgroup, false))
goto cleanup;
if (cg_freeze_wait(cgroup, true))
goto cleanup;
if (cg_freeze_wait(cgroup, false))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
/*
* The test creates the following hierarchy:
* A
* / / \ \
* B E I K
* /\ |
* C D F
* |
* G
* |
* H
*
* with a process in C, H and 3 processes in K.
* Then it tries to freeze and unfreeze the whole tree.
*/
static int test_cgfreezer_tree(const char *root)
{
char *cgroup[10] = {0};
int ret = KSFT_FAIL;
int i;
cgroup[0] = cg_name(root, "cg_test_tree_A");
if (!cgroup[0])
goto cleanup;
cgroup[1] = cg_name(cgroup[0], "B");
if (!cgroup[1])
goto cleanup;
cgroup[2] = cg_name(cgroup[1], "C");
if (!cgroup[2])
goto cleanup;
cgroup[3] = cg_name(cgroup[1], "D");
if (!cgroup[3])
goto cleanup;
cgroup[4] = cg_name(cgroup[0], "E");
if (!cgroup[4])
goto cleanup;
cgroup[5] = cg_name(cgroup[4], "F");
if (!cgroup[5])
goto cleanup;
cgroup[6] = cg_name(cgroup[5], "G");
if (!cgroup[6])
goto cleanup;
cgroup[7] = cg_name(cgroup[6], "H");
if (!cgroup[7])
goto cleanup;
cgroup[8] = cg_name(cgroup[0], "I");
if (!cgroup[8])
goto cleanup;
cgroup[9] = cg_name(cgroup[0], "K");
if (!cgroup[9])
goto cleanup;
for (i = 0; i < 10; i++)
if (cg_create(cgroup[i]))
goto cleanup;
cg_run_nowait(cgroup[2], child_fn, NULL);
cg_run_nowait(cgroup[7], child_fn, NULL);
cg_run_nowait(cgroup[9], child_fn, NULL);
cg_run_nowait(cgroup[9], child_fn, NULL);
cg_run_nowait(cgroup[9], child_fn, NULL);
/*
* Wait until all child processes will enter
* corresponding cgroups.
*/
if (cg_wait_for_proc_count(cgroup[2], 1) ||
cg_wait_for_proc_count(cgroup[7], 1) ||
cg_wait_for_proc_count(cgroup[9], 3))
goto cleanup;
/*
* Freeze B.
*/
if (cg_freeze_wait(cgroup[1], true))
goto cleanup;
/*
* Freeze F.
*/
if (cg_freeze_wait(cgroup[5], true))
goto cleanup;
/*
* Freeze G.
*/
if (cg_freeze_wait(cgroup[6], true))
goto cleanup;
/*
* Check that A and E are not frozen.
*/
if (cg_check_frozen(cgroup[0], false))
goto cleanup;
if (cg_check_frozen(cgroup[4], false))
goto cleanup;
/*
* Freeze A. Check that A, B and E are frozen.
*/
if (cg_freeze_wait(cgroup[0], true))
goto cleanup;
if (cg_check_frozen(cgroup[1], true))
goto cleanup;
if (cg_check_frozen(cgroup[4], true))
goto cleanup;
/*
* Unfreeze B, F and G
*/
if (cg_freeze_nowait(cgroup[1], false))
goto cleanup;
if (cg_freeze_nowait(cgroup[5], false))
goto cleanup;
if (cg_freeze_nowait(cgroup[6], false))
goto cleanup;
/*
* Check that C and H are still frozen.
*/
if (cg_check_frozen(cgroup[2], true))
goto cleanup;
if (cg_check_frozen(cgroup[7], true))
goto cleanup;
/*
* Unfreeze A. Check that A, C and K are not frozen.
*/
if (cg_freeze_wait(cgroup[0], false))
goto cleanup;
if (cg_check_frozen(cgroup[2], false))
goto cleanup;
if (cg_check_frozen(cgroup[9], false))
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (i = 9; i >= 0 && cgroup[i]; i--) {
cg_destroy(cgroup[i]);
free(cgroup[i]);
}
return ret;
}
/*
* A fork bomb emulator.
*/
static int forkbomb_fn(const char *cgroup, void *arg)
{
int ppid;
fork();
fork();
ppid = getppid();
while (getppid() == ppid)
usleep(1000);
return getppid() == ppid;
}
/*
* The test runs a fork bomb in a cgroup and tries to freeze it.
* Then it kills all processes and checks that cgroup isn't populated
* anymore.
*/
static int test_cgfreezer_forkbomb(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup = NULL;
cgroup = cg_name(root, "cg_forkbomb_test");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
cg_run_nowait(cgroup, forkbomb_fn, NULL);
usleep(100000);
if (cg_freeze_wait(cgroup, true))
goto cleanup;
if (cg_killall(cgroup))
goto cleanup;
if (cg_wait_for_proc_count(cgroup, 0))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
/*
* The test creates a cgroups and freezes it. Then it creates a child cgroup
* and populates it with a task. After that it checks that the child cgroup
* is frozen and the parent cgroup remains frozen too.
*/
static int test_cgfreezer_mkdir(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child = NULL;
int pid;
parent = cg_name(root, "cg_test_mkdir_A");
if (!parent)
goto cleanup;
child = cg_name(parent, "cg_test_mkdir_B");
if (!child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_freeze_wait(parent, true))
goto cleanup;
if (cg_create(child))
goto cleanup;
pid = cg_run_nowait(child, child_fn, NULL);
if (pid < 0)
goto cleanup;
if (cg_wait_for_proc_count(child, 1))
goto cleanup;
if (cg_check_frozen(child, true))
goto cleanup;
if (cg_check_frozen(parent, true))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
free(child);
if (parent)
cg_destroy(parent);
free(parent);
return ret;
}
/*
* The test creates two nested cgroups, freezes the parent
* and removes the child. Then it checks that the parent cgroup
* remains frozen and it's possible to create a new child
* without unfreezing. The new child is frozen too.
*/
static int test_cgfreezer_rmdir(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child = NULL;
parent = cg_name(root, "cg_test_rmdir_A");
if (!parent)
goto cleanup;
child = cg_name(parent, "cg_test_rmdir_B");
if (!child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_freeze_wait(parent, true))
goto cleanup;
if (cg_destroy(child))
goto cleanup;
if (cg_check_frozen(parent, true))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_check_frozen(child, true))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
free(child);
if (parent)
cg_destroy(parent);
free(parent);
return ret;
}
/*
* The test creates two cgroups: A and B, runs a process in A
* and performs several migrations:
* 1) A (running) -> B (frozen)
* 2) B (frozen) -> A (running)
* 3) A (frozen) -> B (frozen)
*
* On each step it checks the actual state of both cgroups.
*/
static int test_cgfreezer_migrate(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup[2] = {0};
int pid;
cgroup[0] = cg_name(root, "cg_test_migrate_A");
if (!cgroup[0])
goto cleanup;
cgroup[1] = cg_name(root, "cg_test_migrate_B");
if (!cgroup[1])
goto cleanup;
if (cg_create(cgroup[0]))
goto cleanup;
if (cg_create(cgroup[1]))
goto cleanup;
pid = cg_run_nowait(cgroup[0], child_fn, NULL);
if (pid < 0)
goto cleanup;
if (cg_wait_for_proc_count(cgroup[0], 1))
goto cleanup;
/*
* Migrate from A (running) to B (frozen)
*/
if (cg_freeze_wait(cgroup[1], true))
goto cleanup;
if (cg_enter_and_wait_for_frozen(cgroup[1], pid, true))
goto cleanup;
if (cg_check_frozen(cgroup[0], false))
goto cleanup;
/*
* Migrate from B (frozen) to A (running)
*/
if (cg_enter_and_wait_for_frozen(cgroup[0], pid, false))
goto cleanup;
if (cg_check_frozen(cgroup[1], true))
goto cleanup;
/*
* Migrate from A (frozen) to B (frozen)
*/
if (cg_freeze_wait(cgroup[0], true))
goto cleanup;
if (cg_enter_and_wait_for_frozen(cgroup[1], pid, true))
goto cleanup;
if (cg_check_frozen(cgroup[0], true))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup[0])
cg_destroy(cgroup[0]);
free(cgroup[0]);
if (cgroup[1])
cg_destroy(cgroup[1]);
free(cgroup[1]);
return ret;
}
/*
* The test checks that ptrace works with a tracing process in a frozen cgroup.
*/
static int test_cgfreezer_ptrace(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup = NULL;
siginfo_t siginfo;
int pid;
cgroup = cg_name(root, "cg_test_ptrace");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
pid = cg_run_nowait(cgroup, child_fn, NULL);
if (pid < 0)
goto cleanup;
if (cg_wait_for_proc_count(cgroup, 1))
goto cleanup;
if (cg_freeze_wait(cgroup, true))
goto cleanup;
if (ptrace(PTRACE_SEIZE, pid, NULL, NULL))
goto cleanup;
if (ptrace(PTRACE_INTERRUPT, pid, NULL, NULL))
goto cleanup;
waitpid(pid, NULL, 0);
/*
* Cgroup has to remain frozen, however the test task
* is in traced state.
*/
if (cg_check_frozen(cgroup, true))
goto cleanup;
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo))
goto cleanup;
if (ptrace(PTRACE_DETACH, pid, NULL, NULL))
goto cleanup;
if (cg_check_frozen(cgroup, true))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
/*
* Check if the process is stopped.
*/
static int proc_check_stopped(int pid)
{
char buf[PAGE_SIZE];
int len;
len = proc_read_text(pid, 0, "stat", buf, sizeof(buf));
if (len == -1) {
debug("Can't get %d stat\n", pid);
return -1;
}
if (strstr(buf, "(test_freezer) T ") == NULL) {
debug("Process %d in the unexpected state: %s\n", pid, buf);
return -1;
}
return 0;
}
/*
* Test that it's possible to freeze a cgroup with a stopped process.
*/
static int test_cgfreezer_stopped(const char *root)
{
int pid, ret = KSFT_FAIL;
char *cgroup = NULL;
cgroup = cg_name(root, "cg_test_stopped");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
pid = cg_run_nowait(cgroup, child_fn, NULL);
if (cg_wait_for_proc_count(cgroup, 1))
goto cleanup;
if (kill(pid, SIGSTOP))
goto cleanup;
if (cg_check_frozen(cgroup, false))
goto cleanup;
if (cg_freeze_wait(cgroup, true))
goto cleanup;
if (cg_freeze_wait(cgroup, false))
goto cleanup;
if (proc_check_stopped(pid))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
/*
* Test that it's possible to freeze a cgroup with a ptraced process.
*/
static int test_cgfreezer_ptraced(const char *root)
{
int pid, ret = KSFT_FAIL;
char *cgroup = NULL;
siginfo_t siginfo;
cgroup = cg_name(root, "cg_test_ptraced");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
pid = cg_run_nowait(cgroup, child_fn, NULL);
if (cg_wait_for_proc_count(cgroup, 1))
goto cleanup;
if (ptrace(PTRACE_SEIZE, pid, NULL, NULL))
goto cleanup;
if (ptrace(PTRACE_INTERRUPT, pid, NULL, NULL))
goto cleanup;
waitpid(pid, NULL, 0);
if (cg_check_frozen(cgroup, false))
goto cleanup;
if (cg_freeze_wait(cgroup, true))
goto cleanup;
/*
* cg_check_frozen(cgroup, true) will fail here,
* because the task in in the TRACEd state.
*/
if (cg_freeze_wait(cgroup, false))
goto cleanup;
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo))
goto cleanup;
if (ptrace(PTRACE_DETACH, pid, NULL, NULL))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
static int vfork_fn(const char *cgroup, void *arg)
{
int pid = vfork();
if (pid == 0)
while (true)
sleep(1);
return pid;
}
/*
* Test that it's possible to freeze a cgroup with a process,
* which called vfork() and is waiting for a child.
*/
static int test_cgfreezer_vfork(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup = NULL;
cgroup = cg_name(root, "cg_test_vfork");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
cg_run_nowait(cgroup, vfork_fn, NULL);
if (cg_wait_for_proc_count(cgroup, 2))
goto cleanup;
if (cg_freeze_wait(cgroup, true))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
#define T(x) { x, #x }
struct cgfreezer_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_cgfreezer_simple),
T(test_cgfreezer_tree),
T(test_cgfreezer_forkbomb),
T(test_cgfreezer_mkdir),
T(test_cgfreezer_rmdir),
T(test_cgfreezer_migrate),
T(test_cgfreezer_ptrace),
T(test_cgfreezer_stopped),
T(test_cgfreezer_ptraced),
T(test_cgfreezer_vfork),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_freezer.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/limits.h>
#include <unistd.h>
#include <stdio.h>
#include <signal.h>
#include <sys/sysinfo.h>
#include <string.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include "../kselftest.h"
#include "cgroup_util.h"
static int read_int(const char *path, size_t *value)
{
FILE *file;
int ret = 0;
file = fopen(path, "r");
if (!file)
return -1;
if (fscanf(file, "%ld", value) != 1)
ret = -1;
fclose(file);
return ret;
}
static int set_min_free_kb(size_t value)
{
FILE *file;
int ret;
file = fopen("/proc/sys/vm/min_free_kbytes", "w");
if (!file)
return -1;
ret = fprintf(file, "%ld\n", value);
fclose(file);
return ret;
}
static int read_min_free_kb(size_t *value)
{
return read_int("/proc/sys/vm/min_free_kbytes", value);
}
static int get_zswap_stored_pages(size_t *value)
{
return read_int("/sys/kernel/debug/zswap/stored_pages", value);
}
static int get_zswap_written_back_pages(size_t *value)
{
return read_int("/sys/kernel/debug/zswap/written_back_pages", value);
}
static int allocate_bytes(const char *cgroup, void *arg)
{
size_t size = (size_t)arg;
char *mem = (char *)malloc(size);
if (!mem)
return -1;
for (int i = 0; i < size; i += 4095)
mem[i] = 'a';
free(mem);
return 0;
}
/*
* When trying to store a memcg page in zswap, if the memcg hits its memory
* limit in zswap, writeback should not be triggered.
*
* This was fixed with commit 0bdf0efa180a("zswap: do not shrink if cgroup may
* not zswap"). Needs to be revised when a per memcg writeback mechanism is
* implemented.
*/
static int test_no_invasive_cgroup_shrink(const char *root)
{
size_t written_back_before, written_back_after;
int ret = KSFT_FAIL;
char *test_group;
/* Set up */
test_group = cg_name(root, "no_shrink_test");
if (!test_group)
goto out;
if (cg_create(test_group))
goto out;
if (cg_write(test_group, "memory.max", "1M"))
goto out;
if (cg_write(test_group, "memory.zswap.max", "10K"))
goto out;
if (get_zswap_written_back_pages(&written_back_before))
goto out;
/* Allocate 10x memory.max to push memory into zswap */
if (cg_run(test_group, allocate_bytes, (void *)MB(10)))
goto out;
/* Verify that no writeback happened because of the memcg allocation */
if (get_zswap_written_back_pages(&written_back_after))
goto out;
if (written_back_after == written_back_before)
ret = KSFT_PASS;
out:
cg_destroy(test_group);
free(test_group);
return ret;
}
struct no_kmem_bypass_child_args {
size_t target_alloc_bytes;
size_t child_allocated;
};
static int no_kmem_bypass_child(const char *cgroup, void *arg)
{
struct no_kmem_bypass_child_args *values = arg;
void *allocation;
allocation = malloc(values->target_alloc_bytes);
if (!allocation) {
values->child_allocated = true;
return -1;
}
for (long i = 0; i < values->target_alloc_bytes; i += 4095)
((char *)allocation)[i] = 'a';
values->child_allocated = true;
pause();
free(allocation);
return 0;
}
/*
* When pages owned by a memcg are pushed to zswap by kswapd, they should be
* charged to that cgroup. This wasn't the case before commit
* cd08d80ecdac("mm: correctly charge compressed memory to its memcg").
*
* The test first allocates memory in a memcg, then raises min_free_kbytes to
* a very high value so that the allocation falls below low wm, then makes
* another allocation to trigger kswapd that should push the memcg-owned pages
* to zswap and verifies that the zswap pages are correctly charged.
*
* To be run on a VM with at most 4G of memory.
*/
static int test_no_kmem_bypass(const char *root)
{
size_t min_free_kb_high, min_free_kb_low, min_free_kb_original;
struct no_kmem_bypass_child_args *values;
size_t trigger_allocation_size;
int wait_child_iteration = 0;
long stored_pages_threshold;
struct sysinfo sys_info;
int ret = KSFT_FAIL;
int child_status;
char *test_group;
pid_t child_pid;
/* Read sys info and compute test values accordingly */
if (sysinfo(&sys_info) != 0)
return KSFT_FAIL;
if (sys_info.totalram > 5000000000)
return KSFT_SKIP;
values = mmap(0, sizeof(struct no_kmem_bypass_child_args), PROT_READ |
PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (values == MAP_FAILED)
return KSFT_FAIL;
if (read_min_free_kb(&min_free_kb_original))
return KSFT_FAIL;
min_free_kb_high = sys_info.totalram / 2000;
min_free_kb_low = sys_info.totalram / 500000;
values->target_alloc_bytes = (sys_info.totalram - min_free_kb_high * 1000) +
sys_info.totalram * 5 / 100;
stored_pages_threshold = sys_info.totalram / 5 / 4096;
trigger_allocation_size = sys_info.totalram / 20;
/* Set up test memcg */
if (cg_write(root, "cgroup.subtree_control", "+memory"))
goto out;
test_group = cg_name(root, "kmem_bypass_test");
if (!test_group)
goto out;
/* Spawn memcg child and wait for it to allocate */
set_min_free_kb(min_free_kb_low);
if (cg_create(test_group))
goto out;
values->child_allocated = false;
child_pid = cg_run_nowait(test_group, no_kmem_bypass_child, values);
if (child_pid < 0)
goto out;
while (!values->child_allocated && wait_child_iteration++ < 10000)
usleep(1000);
/* Try to wakeup kswapd and let it push child memory to zswap */
set_min_free_kb(min_free_kb_high);
for (int i = 0; i < 20; i++) {
size_t stored_pages;
char *trigger_allocation = malloc(trigger_allocation_size);
if (!trigger_allocation)
break;
for (int i = 0; i < trigger_allocation_size; i += 4095)
trigger_allocation[i] = 'b';
usleep(100000);
free(trigger_allocation);
if (get_zswap_stored_pages(&stored_pages))
break;
if (stored_pages < 0)
break;
/* If memory was pushed to zswap, verify it belongs to memcg */
if (stored_pages > stored_pages_threshold) {
int zswapped = cg_read_key_long(test_group, "memory.stat", "zswapped ");
int delta = stored_pages * 4096 - zswapped;
int result_ok = delta < stored_pages * 4096 / 4;
ret = result_ok ? KSFT_PASS : KSFT_FAIL;
break;
}
}
kill(child_pid, SIGTERM);
waitpid(child_pid, &child_status, 0);
out:
set_min_free_kb(min_free_kb_original);
cg_destroy(test_group);
free(test_group);
return ret;
}
#define T(x) { x, #x }
struct zswap_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_no_kmem_bypass),
T(test_no_invasive_cgroup_shrink),
};
#undef T
static bool zswap_configured(void)
{
return access("/sys/module/zswap", F_OK) == 0;
}
int main(int argc, char **argv)
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (!zswap_configured())
ksft_exit_skip("zswap isn't configured\n");
/*
* Check that memory controller is available:
* memory is listed in cgroup.controllers
*/
if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_zswap.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <linux/limits.h>
#include <linux/oom.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <netdb.h>
#include <errno.h>
#include <sys/mman.h>
#include "../kselftest.h"
#include "cgroup_util.h"
static bool has_localevents;
static bool has_recursiveprot;
/*
* This test creates two nested cgroups with and without enabling
* the memory controller.
*/
static int test_memcg_subtree_control(const char *root)
{
char *parent, *child, *parent2 = NULL, *child2 = NULL;
int ret = KSFT_FAIL;
char buf[PAGE_SIZE];
/* Create two nested cgroups with the memory controller enabled */
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
if (!parent || !child)
goto cleanup_free;
if (cg_create(parent))
goto cleanup_free;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup_parent;
if (cg_create(child))
goto cleanup_parent;
if (cg_read_strstr(child, "cgroup.controllers", "memory"))
goto cleanup_child;
/* Create two nested cgroups without enabling memory controller */
parent2 = cg_name(root, "memcg_test_1");
child2 = cg_name(root, "memcg_test_1/memcg_test_1");
if (!parent2 || !child2)
goto cleanup_free2;
if (cg_create(parent2))
goto cleanup_free2;
if (cg_create(child2))
goto cleanup_parent2;
if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
goto cleanup_all;
if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
goto cleanup_all;
ret = KSFT_PASS;
cleanup_all:
cg_destroy(child2);
cleanup_parent2:
cg_destroy(parent2);
cleanup_free2:
free(parent2);
free(child2);
cleanup_child:
cg_destroy(child);
cleanup_parent:
cg_destroy(parent);
cleanup_free:
free(parent);
free(child);
return ret;
}
static int alloc_anon_50M_check(const char *cgroup, void *arg)
{
size_t size = MB(50);
char *buf, *ptr;
long anon, current;
int ret = -1;
buf = malloc(size);
if (buf == NULL) {
fprintf(stderr, "malloc() failed\n");
return -1;
}
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
*ptr = 0;
current = cg_read_long(cgroup, "memory.current");
if (current < size)
goto cleanup;
if (!values_close(size, current, 3))
goto cleanup;
anon = cg_read_key_long(cgroup, "memory.stat", "anon ");
if (anon < 0)
goto cleanup;
if (!values_close(anon, current, 3))
goto cleanup;
ret = 0;
cleanup:
free(buf);
return ret;
}
static int alloc_pagecache_50M_check(const char *cgroup, void *arg)
{
size_t size = MB(50);
int ret = -1;
long current, file;
int fd;
fd = get_temp_fd();
if (fd < 0)
return -1;
if (alloc_pagecache(fd, size))
goto cleanup;
current = cg_read_long(cgroup, "memory.current");
if (current < size)
goto cleanup;
file = cg_read_key_long(cgroup, "memory.stat", "file ");
if (file < 0)
goto cleanup;
if (!values_close(file, current, 10))
goto cleanup;
ret = 0;
cleanup:
close(fd);
return ret;
}
/*
* This test create a memory cgroup, allocates
* some anonymous memory and some pagecache
* and check memory.current and some memory.stat values.
*/
static int test_memcg_current(const char *root)
{
int ret = KSFT_FAIL;
long current;
char *memcg;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
current = cg_read_long(memcg, "memory.current");
if (current != 0)
goto cleanup;
if (cg_run(memcg, alloc_anon_50M_check, NULL))
goto cleanup;
if (cg_run(memcg, alloc_pagecache_50M_check, NULL))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
{
int fd = (long)arg;
int ppid = getppid();
if (alloc_pagecache(fd, MB(50)))
return -1;
while (getppid() == ppid)
sleep(1);
return 0;
}
static int alloc_anon_noexit(const char *cgroup, void *arg)
{
int ppid = getppid();
size_t size = (unsigned long)arg;
char *buf, *ptr;
buf = malloc(size);
if (buf == NULL) {
fprintf(stderr, "malloc() failed\n");
return -1;
}
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
*ptr = 0;
while (getppid() == ppid)
sleep(1);
free(buf);
return 0;
}
/*
* Wait until processes are killed asynchronously by the OOM killer
* If we exceed a timeout, fail.
*/
static int cg_test_proc_killed(const char *cgroup)
{
int limit;
for (limit = 10; limit > 0; limit--) {
if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
return 0;
usleep(100000);
}
return -1;
}
static bool reclaim_until(const char *memcg, long goal);
/*
* First, this test creates the following hierarchy:
* A memory.min = 0, memory.max = 200M
* A/B memory.min = 50M
* A/B/C memory.min = 75M, memory.current = 50M
* A/B/D memory.min = 25M, memory.current = 50M
* A/B/E memory.min = 0, memory.current = 50M
* A/B/F memory.min = 500M, memory.current = 0
*
* (or memory.low if we test soft protection)
*
* Usages are pagecache and the test keeps a running
* process in every leaf cgroup.
* Then it creates A/G and creates a significant
* memory pressure in A.
*
* Then it checks actual memory usages and expects that:
* A/B memory.current ~= 50M
* A/B/C memory.current ~= 29M
* A/B/D memory.current ~= 21M
* A/B/E memory.current ~= 0
* A/B/F memory.current = 0
* (for origin of the numbers, see model in memcg_protection.m.)
*
* After that it tries to allocate more than there is
* unprotected memory in A available, and checks that:
* a) memory.min protects pagecache even in this case,
* b) memory.low allows reclaiming page cache with low events.
*
* Then we try to reclaim from A/B/C using memory.reclaim until its
* usage reaches 10M.
* This makes sure that:
* (a) We ignore the protection of the reclaim target memcg.
* (b) The previously calculated emin value (~29M) should be dismissed.
*/
static int test_memcg_protection(const char *root, bool min)
{
int ret = KSFT_FAIL, rc;
char *parent[3] = {NULL};
char *children[4] = {NULL};
const char *attribute = min ? "memory.min" : "memory.low";
long c[4];
long current;
int i, attempts;
int fd;
fd = get_temp_fd();
if (fd < 0)
goto cleanup;
parent[0] = cg_name(root, "memcg_test_0");
if (!parent[0])
goto cleanup;
parent[1] = cg_name(parent[0], "memcg_test_1");
if (!parent[1])
goto cleanup;
parent[2] = cg_name(parent[0], "memcg_test_2");
if (!parent[2])
goto cleanup;
if (cg_create(parent[0]))
goto cleanup;
if (cg_read_long(parent[0], attribute)) {
/* No memory.min on older kernels is fine */
if (min)
ret = KSFT_SKIP;
goto cleanup;
}
if (cg_write(parent[0], "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_write(parent[0], "memory.max", "200M"))
goto cleanup;
if (cg_write(parent[0], "memory.swap.max", "0"))
goto cleanup;
if (cg_create(parent[1]))
goto cleanup;
if (cg_write(parent[1], "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_create(parent[2]))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(children); i++) {
children[i] = cg_name_indexed(parent[1], "child_memcg", i);
if (!children[i])
goto cleanup;
if (cg_create(children[i]))
goto cleanup;
if (i > 2)
continue;
cg_run_nowait(children[i], alloc_pagecache_50M_noexit,
(void *)(long)fd);
}
if (cg_write(parent[1], attribute, "50M"))
goto cleanup;
if (cg_write(children[0], attribute, "75M"))
goto cleanup;
if (cg_write(children[1], attribute, "25M"))
goto cleanup;
if (cg_write(children[2], attribute, "0"))
goto cleanup;
if (cg_write(children[3], attribute, "500M"))
goto cleanup;
attempts = 0;
while (!values_close(cg_read_long(parent[1], "memory.current"),
MB(150), 3)) {
if (attempts++ > 5)
break;
sleep(1);
}
if (cg_run(parent[2], alloc_anon, (void *)MB(148)))
goto cleanup;
if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(children); i++)
c[i] = cg_read_long(children[i], "memory.current");
if (!values_close(c[0], MB(29), 10))
goto cleanup;
if (!values_close(c[1], MB(21), 10))
goto cleanup;
if (c[3] != 0)
goto cleanup;
rc = cg_run(parent[2], alloc_anon, (void *)MB(170));
if (min && !rc)
goto cleanup;
else if (!min && rc) {
fprintf(stderr,
"memory.low prevents from allocating anon memory\n");
goto cleanup;
}
current = min ? MB(50) : MB(30);
if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3))
goto cleanup;
if (!reclaim_until(children[0], MB(10)))
goto cleanup;
if (min) {
ret = KSFT_PASS;
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(children); i++) {
int no_low_events_index = 1;
long low, oom;
oom = cg_read_key_long(children[i], "memory.events", "oom ");
low = cg_read_key_long(children[i], "memory.events", "low ");
if (oom)
goto cleanup;
if (i <= no_low_events_index && low <= 0)
goto cleanup;
if (i > no_low_events_index && low)
goto cleanup;
}
ret = KSFT_PASS;
cleanup:
for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) {
if (!children[i])
continue;
cg_destroy(children[i]);
free(children[i]);
}
for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) {
if (!parent[i])
continue;
cg_destroy(parent[i]);
free(parent[i]);
}
close(fd);
return ret;
}
static int test_memcg_min(const char *root)
{
return test_memcg_protection(root, true);
}
static int test_memcg_low(const char *root)
{
return test_memcg_protection(root, false);
}
static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
{
size_t size = MB(50);
int ret = -1;
long current, high, max;
int fd;
high = cg_read_long(cgroup, "memory.high");
max = cg_read_long(cgroup, "memory.max");
if (high != MB(30) && max != MB(30))
return -1;
fd = get_temp_fd();
if (fd < 0)
return -1;
if (alloc_pagecache(fd, size))
goto cleanup;
current = cg_read_long(cgroup, "memory.current");
if (!values_close(current, MB(30), 5))
goto cleanup;
ret = 0;
cleanup:
close(fd);
return ret;
}
/*
* This test checks that memory.high limits the amount of
* memory which can be consumed by either anonymous memory
* or pagecache.
*/
static int test_memcg_high(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
long high;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
if (cg_read_strcmp(memcg, "memory.high", "max\n"))
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(memcg, "memory.high", "30M"))
goto cleanup;
if (cg_run(memcg, alloc_anon, (void *)MB(31)))
goto cleanup;
if (!cg_run(memcg, alloc_pagecache_50M_check, NULL))
goto cleanup;
if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
goto cleanup;
high = cg_read_key_long(memcg, "memory.events", "high ");
if (high <= 0)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
static int alloc_anon_mlock(const char *cgroup, void *arg)
{
size_t size = (size_t)arg;
void *buf;
buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
0, 0);
if (buf == MAP_FAILED)
return -1;
mlock(buf, size);
munmap(buf, size);
return 0;
}
/*
* This test checks that memory.high is able to throttle big single shot
* allocation i.e. large allocation within one kernel entry.
*/
static int test_memcg_high_sync(const char *root)
{
int ret = KSFT_FAIL, pid, fd = -1;
char *memcg;
long pre_high, pre_max;
long post_high, post_max;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
pre_high = cg_read_key_long(memcg, "memory.events", "high ");
pre_max = cg_read_key_long(memcg, "memory.events", "max ");
if (pre_high < 0 || pre_max < 0)
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(memcg, "memory.high", "30M"))
goto cleanup;
if (cg_write(memcg, "memory.max", "140M"))
goto cleanup;
fd = memcg_prepare_for_wait(memcg);
if (fd < 0)
goto cleanup;
pid = cg_run_nowait(memcg, alloc_anon_mlock, (void *)MB(200));
if (pid < 0)
goto cleanup;
cg_wait_for(fd);
post_high = cg_read_key_long(memcg, "memory.events", "high ");
post_max = cg_read_key_long(memcg, "memory.events", "max ");
if (post_high < 0 || post_max < 0)
goto cleanup;
if (pre_high == post_high || pre_max != post_max)
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (fd >= 0)
close(fd);
cg_destroy(memcg);
free(memcg);
return ret;
}
/*
* This test checks that memory.max limits the amount of
* memory which can be consumed by either anonymous memory
* or pagecache.
*/
static int test_memcg_max(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
long current, max;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
if (cg_read_strcmp(memcg, "memory.max", "max\n"))
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(memcg, "memory.max", "30M"))
goto cleanup;
/* Should be killed by OOM killer */
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
goto cleanup;
current = cg_read_long(memcg, "memory.current");
if (current > MB(30) || !current)
goto cleanup;
max = cg_read_key_long(memcg, "memory.events", "max ");
if (max <= 0)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
/*
* Reclaim from @memcg until usage reaches @goal by writing to
* memory.reclaim.
*
* This function will return false if the usage is already below the
* goal.
*
* This function assumes that writing to memory.reclaim is the only
* source of change in memory.current (no concurrent allocations or
* reclaim).
*
* This function makes sure memory.reclaim is sane. It will return
* false if memory.reclaim's error codes do not make sense, even if
* the usage goal was satisfied.
*/
static bool reclaim_until(const char *memcg, long goal)
{
char buf[64];
int retries, err;
long current, to_reclaim;
bool reclaimed = false;
for (retries = 5; retries > 0; retries--) {
current = cg_read_long(memcg, "memory.current");
if (current < goal || values_close(current, goal, 3))
break;
/* Did memory.reclaim return 0 incorrectly? */
else if (reclaimed)
return false;
to_reclaim = current - goal;
snprintf(buf, sizeof(buf), "%ld", to_reclaim);
err = cg_write(memcg, "memory.reclaim", buf);
if (!err)
reclaimed = true;
else if (err != -EAGAIN)
return false;
}
return reclaimed;
}
/*
* This test checks that memory.reclaim reclaims the given
* amount of memory (from both anon and file, if possible).
*/
static int test_memcg_reclaim(const char *root)
{
int ret = KSFT_FAIL, fd, retries;
char *memcg;
long current, expected_usage;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
current = cg_read_long(memcg, "memory.current");
if (current != 0)
goto cleanup;
fd = get_temp_fd();
if (fd < 0)
goto cleanup;
cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd);
/*
* If swap is enabled, try to reclaim from both anon and file, else try
* to reclaim from file only.
*/
if (is_swap_enabled()) {
cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50));
expected_usage = MB(100);
} else
expected_usage = MB(50);
/*
* Wait until current usage reaches the expected usage (or we run out of
* retries).
*/
retries = 5;
while (!values_close(cg_read_long(memcg, "memory.current"),
expected_usage, 10)) {
if (retries--) {
sleep(1);
continue;
} else {
fprintf(stderr,
"failed to allocate %ld for memcg reclaim test\n",
expected_usage);
goto cleanup;
}
}
/*
* Reclaim until current reaches 30M, this makes sure we hit both anon
* and file if swap is enabled.
*/
if (!reclaim_until(memcg, MB(30)))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
close(fd);
return ret;
}
static int alloc_anon_50M_check_swap(const char *cgroup, void *arg)
{
long mem_max = (long)arg;
size_t size = MB(50);
char *buf, *ptr;
long mem_current, swap_current;
int ret = -1;
buf = malloc(size);
if (buf == NULL) {
fprintf(stderr, "malloc() failed\n");
return -1;
}
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
*ptr = 0;
mem_current = cg_read_long(cgroup, "memory.current");
if (!mem_current || !values_close(mem_current, mem_max, 3))
goto cleanup;
swap_current = cg_read_long(cgroup, "memory.swap.current");
if (!swap_current ||
!values_close(mem_current + swap_current, size, 3))
goto cleanup;
ret = 0;
cleanup:
free(buf);
return ret;
}
/*
* This test checks that memory.swap.max limits the amount of
* anonymous memory which can be swapped out.
*/
static int test_memcg_swap_max(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
long max;
if (!is_swap_enabled())
return KSFT_SKIP;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
if (cg_read_long(memcg, "memory.swap.current")) {
ret = KSFT_SKIP;
goto cleanup;
}
if (cg_read_strcmp(memcg, "memory.max", "max\n"))
goto cleanup;
if (cg_read_strcmp(memcg, "memory.swap.max", "max\n"))
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "30M"))
goto cleanup;
if (cg_write(memcg, "memory.max", "30M"))
goto cleanup;
/* Should be killed by OOM killer */
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
goto cleanup;
if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30)))
goto cleanup;
max = cg_read_key_long(memcg, "memory.events", "max ");
if (max <= 0)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
/*
* This test disables swapping and tries to allocate anonymous memory
* up to OOM. Then it checks for oom and oom_kill events in
* memory.events.
*/
static int test_memcg_oom_events(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
if (cg_write(memcg, "memory.max", "30M"))
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0"))
goto cleanup;
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_read_strcmp(memcg, "cgroup.procs", ""))
goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
struct tcp_server_args {
unsigned short port;
int ctl[2];
};
static int tcp_server(const char *cgroup, void *arg)
{
struct tcp_server_args *srv_args = arg;
struct sockaddr_in6 saddr = { 0 };
socklen_t slen = sizeof(saddr);
int sk, client_sk, ctl_fd, yes = 1, ret = -1;
close(srv_args->ctl[0]);
ctl_fd = srv_args->ctl[1];
saddr.sin6_family = AF_INET6;
saddr.sin6_addr = in6addr_any;
saddr.sin6_port = htons(srv_args->port);
sk = socket(AF_INET6, SOCK_STREAM, 0);
if (sk < 0)
return ret;
if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0)
goto cleanup;
if (bind(sk, (struct sockaddr *)&saddr, slen)) {
write(ctl_fd, &errno, sizeof(errno));
goto cleanup;
}
if (listen(sk, 1))
goto cleanup;
ret = 0;
if (write(ctl_fd, &ret, sizeof(ret)) != sizeof(ret)) {
ret = -1;
goto cleanup;
}
client_sk = accept(sk, NULL, NULL);
if (client_sk < 0)
goto cleanup;
ret = -1;
for (;;) {
uint8_t buf[0x100000];
if (write(client_sk, buf, sizeof(buf)) <= 0) {
if (errno == ECONNRESET)
ret = 0;
break;
}
}
close(client_sk);
cleanup:
close(sk);
return ret;
}
static int tcp_client(const char *cgroup, unsigned short port)
{
const char server[] = "localhost";
struct addrinfo *ai;
char servport[6];
int retries = 0x10; /* nice round number */
int sk, ret;
long allocated;
allocated = cg_read_long(cgroup, "memory.current");
snprintf(servport, sizeof(servport), "%hd", port);
ret = getaddrinfo(server, servport, NULL, &ai);
if (ret)
return ret;
sk = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
if (sk < 0)
goto free_ainfo;
ret = connect(sk, ai->ai_addr, ai->ai_addrlen);
if (ret < 0)
goto close_sk;
ret = KSFT_FAIL;
while (retries--) {
uint8_t buf[0x100000];
long current, sock;
if (read(sk, buf, sizeof(buf)) <= 0)
goto close_sk;
current = cg_read_long(cgroup, "memory.current");
sock = cg_read_key_long(cgroup, "memory.stat", "sock ");
if (current < 0 || sock < 0)
goto close_sk;
/* exclude the memory not related to socket connection */
if (values_close(current - allocated, sock, 10)) {
ret = KSFT_PASS;
break;
}
}
close_sk:
close(sk);
free_ainfo:
freeaddrinfo(ai);
return ret;
}
/*
* This test checks socket memory accounting.
* The test forks a TCP server listens on a random port between 1000
* and 61000. Once it gets a client connection, it starts writing to
* its socket.
* The TCP client interleaves reads from the socket with check whether
* memory.current and memory.stat.sock are similar.
*/
static int test_memcg_sock(const char *root)
{
int bind_retries = 5, ret = KSFT_FAIL, pid, err;
unsigned short port;
char *memcg;
memcg = cg_name(root, "memcg_test");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
while (bind_retries--) {
struct tcp_server_args args;
if (pipe(args.ctl))
goto cleanup;
port = args.port = 1000 + rand() % 60000;
pid = cg_run_nowait(memcg, tcp_server, &args);
if (pid < 0)
goto cleanup;
close(args.ctl[1]);
if (read(args.ctl[0], &err, sizeof(err)) != sizeof(err))
goto cleanup;
close(args.ctl[0]);
if (!err)
break;
if (err != EADDRINUSE)
goto cleanup;
waitpid(pid, NULL, 0);
}
if (err == EADDRINUSE) {
ret = KSFT_SKIP;
goto cleanup;
}
if (tcp_client(memcg, port) != KSFT_PASS)
goto cleanup;
waitpid(pid, &err, 0);
if (WEXITSTATUS(err))
goto cleanup;
if (cg_read_long(memcg, "memory.current") < 0)
goto cleanup;
if (cg_read_key_long(memcg, "memory.stat", "sock "))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
/*
* This test disables swapping and tries to allocate anonymous memory
* up to OOM with memory.group.oom set. Then it checks that all
* processes in the leaf were killed. It also checks that oom_events
* were propagated to the parent level.
*/
static int test_memcg_oom_group_leaf_events(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child;
long parent_oom_events;
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_write(child, "memory.max", "50M"))
goto cleanup;
if (cg_write(child, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(child, "memory.oom.group", "1"))
goto cleanup;
cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
if (!cg_run(child, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_test_proc_killed(child))
goto cleanup;
if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
goto cleanup;
parent_oom_events = cg_read_key_long(
parent, "memory.events", "oom_kill ");
/*
* If memory_localevents is not enabled (the default), the parent should
* count OOM events in its children groups. Otherwise, it should not
* have observed any events.
*/
if (has_localevents && parent_oom_events != 0)
goto cleanup;
else if (!has_localevents && parent_oom_events <= 0)
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* This test disables swapping and tries to allocate anonymous memory
* up to OOM with memory.group.oom set. Then it checks that all
* processes in the parent and leaf were killed.
*/
static int test_memcg_oom_group_parent_events(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child;
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "memory.max", "80M"))
goto cleanup;
if (cg_write(parent, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(parent, "memory.oom.group", "1"))
goto cleanup;
cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
if (!cg_run(child, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_test_proc_killed(child))
goto cleanup;
if (cg_test_proc_killed(parent))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* This test disables swapping and tries to allocate anonymous memory
* up to OOM with memory.group.oom set. Then it checks that all
* processes were killed except those set with OOM_SCORE_ADJ_MIN
*/
static int test_memcg_oom_group_score_events(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
int safe_pid;
memcg = cg_name(root, "memcg_test_0");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
if (cg_write(memcg, "memory.max", "50M"))
goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0"))
goto cleanup;
if (cg_write(memcg, "memory.oom.group", "1"))
goto cleanup;
safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
goto cleanup;
cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
goto cleanup;
if (kill(safe_pid, SIGKILL))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (memcg)
cg_destroy(memcg);
free(memcg);
return ret;
}
#define T(x) { x, #x }
struct memcg_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_memcg_subtree_control),
T(test_memcg_current),
T(test_memcg_min),
T(test_memcg_low),
T(test_memcg_high),
T(test_memcg_high_sync),
T(test_memcg_max),
T(test_memcg_reclaim),
T(test_memcg_oom_events),
T(test_memcg_swap_max),
T(test_memcg_sock),
T(test_memcg_oom_group_leaf_events),
T(test_memcg_oom_group_parent_events),
T(test_memcg_oom_group_score_events),
};
#undef T
int main(int argc, char **argv)
{
char root[PATH_MAX];
int i, proc_status, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
/*
* Check that memory controller is available:
* memory is listed in cgroup.controllers
*/
if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
proc_status = proc_mount_contains("memory_recursiveprot");
if (proc_status < 0)
ksft_exit_skip("Failed to query cgroup mount option\n");
has_recursiveprot = proc_status;
proc_status = proc_mount_contains("memory_localevents");
if (proc_status < 0)
ksft_exit_skip("Failed to query cgroup mount option\n");
has_localevents = proc_status;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_memcontrol.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Wait until an inotify event on the given cgroup file.
*/
#include <linux/limits.h>
#include <sys/inotify.h>
#include <sys/mman.h>
#include <sys/ptrace.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static const char usage[] = "Usage: %s [-v] <cgroup_file>\n";
static char *file;
static int verbose;
static inline void fail_message(char *msg)
{
fprintf(stderr, msg, file);
exit(1);
}
int main(int argc, char *argv[])
{
char *cmd = argv[0];
int c, fd;
struct pollfd fds = { .events = POLLIN, };
while ((c = getopt(argc, argv, "v")) != -1) {
switch (c) {
case 'v':
verbose++;
break;
}
argv++, argc--;
}
if (argc != 2) {
fprintf(stderr, usage, cmd);
return -1;
}
file = argv[1];
fd = open(file, O_RDONLY);
if (fd < 0)
fail_message("Cgroup file %s not found!\n");
close(fd);
fd = inotify_init();
if (fd < 0)
fail_message("inotify_init() fails on %s!\n");
if (inotify_add_watch(fd, file, IN_MODIFY) < 0)
fail_message("inotify_add_watch() fails on %s!\n");
fds.fd = fd;
/*
* poll waiting loop
*/
for (;;) {
int ret = poll(&fds, 1, 10000);
if (ret < 0) {
if (errno == EINTR)
continue;
perror("poll");
exit(1);
}
if ((ret > 0) && (fds.revents & POLLIN))
break;
}
if (verbose) {
struct inotify_event events[10];
long len;
usleep(1000);
len = read(fd, events, sizeof(events));
printf("Number of events read = %ld\n",
len/sizeof(struct inotify_event));
}
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/cgroup/wait_inotify.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/limits.h>
#include <poll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/inotify.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "cgroup_util.h"
#include "../clone3/clone3_selftests.h"
/* Returns read len on success, or -errno on failure. */
static ssize_t read_text(const char *path, char *buf, size_t max_len)
{
ssize_t len;
int fd;
fd = open(path, O_RDONLY);
if (fd < 0)
return -errno;
len = read(fd, buf, max_len - 1);
if (len >= 0)
buf[len] = 0;
close(fd);
return len < 0 ? -errno : len;
}
/* Returns written len on success, or -errno on failure. */
static ssize_t write_text(const char *path, char *buf, ssize_t len)
{
int fd;
fd = open(path, O_WRONLY | O_APPEND);
if (fd < 0)
return -errno;
len = write(fd, buf, len);
close(fd);
return len < 0 ? -errno : len;
}
char *cg_name(const char *root, const char *name)
{
size_t len = strlen(root) + strlen(name) + 2;
char *ret = malloc(len);
snprintf(ret, len, "%s/%s", root, name);
return ret;
}
char *cg_name_indexed(const char *root, const char *name, int index)
{
size_t len = strlen(root) + strlen(name) + 10;
char *ret = malloc(len);
snprintf(ret, len, "%s/%s_%d", root, name, index);
return ret;
}
char *cg_control(const char *cgroup, const char *control)
{
size_t len = strlen(cgroup) + strlen(control) + 2;
char *ret = malloc(len);
snprintf(ret, len, "%s/%s", cgroup, control);
return ret;
}
/* Returns 0 on success, or -errno on failure. */
int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
{
char path[PATH_MAX];
ssize_t ret;
snprintf(path, sizeof(path), "%s/%s", cgroup, control);
ret = read_text(path, buf, len);
return ret >= 0 ? 0 : ret;
}
int cg_read_strcmp(const char *cgroup, const char *control,
const char *expected)
{
size_t size;
char *buf;
int ret;
/* Handle the case of comparing against empty string */
if (!expected)
return -1;
else
size = strlen(expected) + 1;
buf = malloc(size);
if (!buf)
return -1;
if (cg_read(cgroup, control, buf, size)) {
free(buf);
return -1;
}
ret = strcmp(expected, buf);
free(buf);
return ret;
}
int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
{
char buf[PAGE_SIZE];
if (cg_read(cgroup, control, buf, sizeof(buf)))
return -1;
return strstr(buf, needle) ? 0 : -1;
}
long cg_read_long(const char *cgroup, const char *control)
{
char buf[128];
if (cg_read(cgroup, control, buf, sizeof(buf)))
return -1;
return atol(buf);
}
long cg_read_key_long(const char *cgroup, const char *control, const char *key)
{
char buf[PAGE_SIZE];
char *ptr;
if (cg_read(cgroup, control, buf, sizeof(buf)))
return -1;
ptr = strstr(buf, key);
if (!ptr)
return -1;
return atol(ptr + strlen(key));
}
long cg_read_lc(const char *cgroup, const char *control)
{
char buf[PAGE_SIZE];
const char delim[] = "\n";
char *line;
long cnt = 0;
if (cg_read(cgroup, control, buf, sizeof(buf)))
return -1;
for (line = strtok(buf, delim); line; line = strtok(NULL, delim))
cnt++;
return cnt;
}
/* Returns 0 on success, or -errno on failure. */
int cg_write(const char *cgroup, const char *control, char *buf)
{
char path[PATH_MAX];
ssize_t len = strlen(buf), ret;
snprintf(path, sizeof(path), "%s/%s", cgroup, control);
ret = write_text(path, buf, len);
return ret == len ? 0 : ret;
}
int cg_write_numeric(const char *cgroup, const char *control, long value)
{
char buf[64];
int ret;
ret = sprintf(buf, "%lu", value);
if (ret < 0)
return ret;
return cg_write(cgroup, control, buf);
}
int cg_find_unified_root(char *root, size_t len)
{
char buf[10 * PAGE_SIZE];
char *fs, *mount, *type;
const char delim[] = "\n\t ";
if (read_text("/proc/self/mounts", buf, sizeof(buf)) <= 0)
return -1;
/*
* Example:
* cgroup /sys/fs/cgroup cgroup2 rw,seclabel,noexec,relatime 0 0
*/
for (fs = strtok(buf, delim); fs; fs = strtok(NULL, delim)) {
mount = strtok(NULL, delim);
type = strtok(NULL, delim);
strtok(NULL, delim);
strtok(NULL, delim);
strtok(NULL, delim);
if (strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len);
return 0;
}
}
return -1;
}
int cg_create(const char *cgroup)
{
return mkdir(cgroup, 0755);
}
int cg_wait_for_proc_count(const char *cgroup, int count)
{
char buf[10 * PAGE_SIZE] = {0};
int attempts;
char *ptr;
for (attempts = 10; attempts >= 0; attempts--) {
int nr = 0;
if (cg_read(cgroup, "cgroup.procs", buf, sizeof(buf)))
break;
for (ptr = buf; *ptr; ptr++)
if (*ptr == '\n')
nr++;
if (nr >= count)
return 0;
usleep(100000);
}
return -1;
}
int cg_killall(const char *cgroup)
{
char buf[PAGE_SIZE];
char *ptr = buf;
/* If cgroup.kill exists use it. */
if (!cg_write(cgroup, "cgroup.kill", "1"))
return 0;
if (cg_read(cgroup, "cgroup.procs", buf, sizeof(buf)))
return -1;
while (ptr < buf + sizeof(buf)) {
int pid = strtol(ptr, &ptr, 10);
if (pid == 0)
break;
if (*ptr)
ptr++;
else
break;
if (kill(pid, SIGKILL))
return -1;
}
return 0;
}
int cg_destroy(const char *cgroup)
{
int ret;
if (!cgroup)
return 0;
retry:
ret = rmdir(cgroup);
if (ret && errno == EBUSY) {
cg_killall(cgroup);
usleep(100);
goto retry;
}
if (ret && errno == ENOENT)
ret = 0;
return ret;
}
int cg_enter(const char *cgroup, int pid)
{
char pidbuf[64];
snprintf(pidbuf, sizeof(pidbuf), "%d", pid);
return cg_write(cgroup, "cgroup.procs", pidbuf);
}
int cg_enter_current(const char *cgroup)
{
return cg_write(cgroup, "cgroup.procs", "0");
}
int cg_enter_current_thread(const char *cgroup)
{
return cg_write(cgroup, "cgroup.threads", "0");
}
int cg_run(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg)
{
int pid, retcode;
pid = fork();
if (pid < 0) {
return pid;
} else if (pid == 0) {
char buf[64];
snprintf(buf, sizeof(buf), "%d", getpid());
if (cg_write(cgroup, "cgroup.procs", buf))
exit(EXIT_FAILURE);
exit(fn(cgroup, arg));
} else {
waitpid(pid, &retcode, 0);
if (WIFEXITED(retcode))
return WEXITSTATUS(retcode);
else
return -1;
}
}
pid_t clone_into_cgroup(int cgroup_fd)
{
#ifdef CLONE_ARGS_SIZE_VER2
pid_t pid;
struct __clone_args args = {
.flags = CLONE_INTO_CGROUP,
.exit_signal = SIGCHLD,
.cgroup = cgroup_fd,
};
pid = sys_clone3(&args, sizeof(struct __clone_args));
/*
* Verify that this is a genuine test failure:
* ENOSYS -> clone3() not available
* E2BIG -> CLONE_INTO_CGROUP not available
*/
if (pid < 0 && (errno == ENOSYS || errno == E2BIG))
goto pretend_enosys;
return pid;
pretend_enosys:
#endif
errno = ENOSYS;
return -ENOSYS;
}
int clone_reap(pid_t pid, int options)
{
int ret;
siginfo_t info = {
.si_signo = 0,
};
again:
ret = waitid(P_PID, pid, &info, options | __WALL | __WNOTHREAD);
if (ret < 0) {
if (errno == EINTR)
goto again;
return -1;
}
if (options & WEXITED) {
if (WIFEXITED(info.si_status))
return WEXITSTATUS(info.si_status);
}
if (options & WSTOPPED) {
if (WIFSTOPPED(info.si_status))
return WSTOPSIG(info.si_status);
}
if (options & WCONTINUED) {
if (WIFCONTINUED(info.si_status))
return 0;
}
return -1;
}
int dirfd_open_opath(const char *dir)
{
return open(dir, O_DIRECTORY | O_CLOEXEC | O_NOFOLLOW | O_PATH);
}
#define close_prot_errno(fd) \
if (fd >= 0) { \
int _e_ = errno; \
close(fd); \
errno = _e_; \
}
static int clone_into_cgroup_run_nowait(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg)
{
int cgroup_fd;
pid_t pid;
cgroup_fd = dirfd_open_opath(cgroup);
if (cgroup_fd < 0)
return -1;
pid = clone_into_cgroup(cgroup_fd);
close_prot_errno(cgroup_fd);
if (pid == 0)
exit(fn(cgroup, arg));
return pid;
}
int cg_run_nowait(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg)
{
int pid;
pid = clone_into_cgroup_run_nowait(cgroup, fn, arg);
if (pid > 0)
return pid;
/* Genuine test failure. */
if (pid < 0 && errno != ENOSYS)
return -1;
pid = fork();
if (pid == 0) {
char buf[64];
snprintf(buf, sizeof(buf), "%d", getpid());
if (cg_write(cgroup, "cgroup.procs", buf))
exit(EXIT_FAILURE);
exit(fn(cgroup, arg));
}
return pid;
}
int get_temp_fd(void)
{
return open(".", O_TMPFILE | O_RDWR | O_EXCL);
}
int alloc_pagecache(int fd, size_t size)
{
char buf[PAGE_SIZE];
struct stat st;
int i;
if (fstat(fd, &st))
goto cleanup;
size += st.st_size;
if (ftruncate(fd, size))
goto cleanup;
for (i = 0; i < size; i += sizeof(buf))
read(fd, buf, sizeof(buf));
return 0;
cleanup:
return -1;
}
int alloc_anon(const char *cgroup, void *arg)
{
size_t size = (unsigned long)arg;
char *buf, *ptr;
buf = malloc(size);
for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
*ptr = 0;
free(buf);
return 0;
}
int is_swap_enabled(void)
{
char buf[PAGE_SIZE];
const char delim[] = "\n";
int cnt = 0;
char *line;
if (read_text("/proc/swaps", buf, sizeof(buf)) <= 0)
return -1;
for (line = strtok(buf, delim); line; line = strtok(NULL, delim))
cnt++;
return cnt > 1;
}
int set_oom_adj_score(int pid, int score)
{
char path[PATH_MAX];
int fd, len;
sprintf(path, "/proc/%d/oom_score_adj", pid);
fd = open(path, O_WRONLY | O_APPEND);
if (fd < 0)
return fd;
len = dprintf(fd, "%d", score);
if (len < 0) {
close(fd);
return len;
}
close(fd);
return 0;
}
int proc_mount_contains(const char *option)
{
char buf[4 * PAGE_SIZE];
ssize_t read;
read = read_text("/proc/mounts", buf, sizeof(buf));
if (read < 0)
return read;
return strstr(buf, option) != NULL;
}
ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
{
char path[PATH_MAX];
ssize_t ret;
if (!pid)
snprintf(path, sizeof(path), "/proc/%s/%s",
thread ? "thread-self" : "self", item);
else
snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
ret = read_text(path, buf, size);
return ret < 0 ? -1 : ret;
}
int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
{
char buf[PAGE_SIZE];
if (proc_read_text(pid, thread, item, buf, sizeof(buf)) < 0)
return -1;
return strstr(buf, needle) ? 0 : -1;
}
int clone_into_cgroup_run_wait(const char *cgroup)
{
int cgroup_fd;
pid_t pid;
cgroup_fd = dirfd_open_opath(cgroup);
if (cgroup_fd < 0)
return -1;
pid = clone_into_cgroup(cgroup_fd);
close_prot_errno(cgroup_fd);
if (pid < 0)
return -1;
if (pid == 0)
exit(EXIT_SUCCESS);
/*
* We don't care whether this fails. We only care whether the initial
* clone succeeded.
*/
(void)clone_reap(pid, WEXITED);
return 0;
}
static int __prepare_for_wait(const char *cgroup, const char *filename)
{
int fd, ret = -1;
fd = inotify_init1(0);
if (fd == -1)
return fd;
ret = inotify_add_watch(fd, cg_control(cgroup, filename), IN_MODIFY);
if (ret == -1) {
close(fd);
fd = -1;
}
return fd;
}
int cg_prepare_for_wait(const char *cgroup)
{
return __prepare_for_wait(cgroup, "cgroup.events");
}
int memcg_prepare_for_wait(const char *cgroup)
{
return __prepare_for_wait(cgroup, "memory.events");
}
int cg_wait_for(int fd)
{
int ret = -1;
struct pollfd fds = {
.fd = fd,
.events = POLLIN,
};
while (true) {
ret = poll(&fds, 1, 10000);
if (ret == -1) {
if (errno == EINTR)
continue;
break;
}
if (ret > 0 && fds.revents & POLLIN) {
ret = 0;
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/cgroup_util.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/limits.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/wait.h>
#include <errno.h>
#include <sys/sysinfo.h>
#include <pthread.h>
#include "../kselftest.h"
#include "cgroup_util.h"
/*
* Memory cgroup charging is performed using percpu batches 64 pages
* big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
* the maximum discrepancy between charge and vmstat entries is number
* of cpus multiplied by 64 pages.
*/
#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())
static int alloc_dcache(const char *cgroup, void *arg)
{
unsigned long i;
struct stat st;
char buf[128];
for (i = 0; i < (unsigned long)arg; i++) {
snprintf(buf, sizeof(buf),
"/something-non-existent-with-a-long-name-%64lu-%d",
i, getpid());
stat(buf, &st);
}
return 0;
}
/*
* This test allocates 100000 of negative dentries with long names.
* Then it checks that "slab" in memory.stat is larger than 1M.
* Then it sets memory.high to 1M and checks that at least 1/2
* of slab memory has been reclaimed.
*/
static int test_kmem_basic(const char *root)
{
int ret = KSFT_FAIL;
char *cg = NULL;
long slab0, slab1, current;
cg = cg_name(root, "kmem_basic_test");
if (!cg)
goto cleanup;
if (cg_create(cg))
goto cleanup;
if (cg_run(cg, alloc_dcache, (void *)100000))
goto cleanup;
slab0 = cg_read_key_long(cg, "memory.stat", "slab ");
if (slab0 < (1 << 20))
goto cleanup;
cg_write(cg, "memory.high", "1M");
/* wait for RCU freeing */
sleep(1);
slab1 = cg_read_key_long(cg, "memory.stat", "slab ");
if (slab1 < 0)
goto cleanup;
current = cg_read_long(cg, "memory.current");
if (current < 0)
goto cleanup;
if (slab1 < slab0 / 2 && current < slab0 / 2)
ret = KSFT_PASS;
cleanup:
cg_destroy(cg);
free(cg);
return ret;
}
static void *alloc_kmem_fn(void *arg)
{
alloc_dcache(NULL, (void *)100);
return NULL;
}
static int alloc_kmem_smp(const char *cgroup, void *arg)
{
int nr_threads = 2 * get_nprocs();
pthread_t *tinfo;
unsigned long i;
int ret = -1;
tinfo = calloc(nr_threads, sizeof(pthread_t));
if (tinfo == NULL)
return -1;
for (i = 0; i < nr_threads; i++) {
if (pthread_create(&tinfo[i], NULL, &alloc_kmem_fn,
(void *)i)) {
free(tinfo);
return -1;
}
}
for (i = 0; i < nr_threads; i++) {
ret = pthread_join(tinfo[i], NULL);
if (ret)
break;
}
free(tinfo);
return ret;
}
static int cg_run_in_subcgroups(const char *parent,
int (*fn)(const char *cgroup, void *arg),
void *arg, int times)
{
char *child;
int i;
for (i = 0; i < times; i++) {
child = cg_name_indexed(parent, "child", i);
if (!child)
return -1;
if (cg_create(child)) {
cg_destroy(child);
free(child);
return -1;
}
if (cg_run(child, fn, NULL)) {
cg_destroy(child);
free(child);
return -1;
}
cg_destroy(child);
free(child);
}
return 0;
}
/*
* The test creates and destroys a large number of cgroups. In each cgroup it
* allocates some slab memory (mostly negative dentries) using 2 * NR_CPUS
* threads. Then it checks the sanity of numbers on the parent level:
* the total size of the cgroups should be roughly equal to
* anon + file + kernel + sock.
*/
static int test_kmem_memcg_deletion(const char *root)
{
long current, anon, file, kernel, sock, sum;
int ret = KSFT_FAIL;
char *parent;
parent = cg_name(root, "kmem_memcg_deletion_test");
if (!parent)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_run_in_subcgroups(parent, alloc_kmem_smp, NULL, 100))
goto cleanup;
current = cg_read_long(parent, "memory.current");
anon = cg_read_key_long(parent, "memory.stat", "anon ");
file = cg_read_key_long(parent, "memory.stat", "file ");
kernel = cg_read_key_long(parent, "memory.stat", "kernel ");
sock = cg_read_key_long(parent, "memory.stat", "sock ");
if (current < 0 || anon < 0 || file < 0 || kernel < 0 || sock < 0)
goto cleanup;
sum = anon + file + kernel + sock;
if (abs(sum - current) < MAX_VMSTAT_ERROR) {
ret = KSFT_PASS;
} else {
printf("memory.current = %ld\n", current);
printf("anon + file + kernel + sock = %ld\n", sum);
printf("anon = %ld\n", anon);
printf("file = %ld\n", file);
printf("kernel = %ld\n", kernel);
printf("sock = %ld\n", sock);
}
cleanup:
cg_destroy(parent);
free(parent);
return ret;
}
/*
* The test reads the entire /proc/kpagecgroup. If the operation went
* successfully (and the kernel didn't panic), the test is treated as passed.
*/
static int test_kmem_proc_kpagecgroup(const char *root)
{
unsigned long buf[128];
int ret = KSFT_FAIL;
ssize_t len;
int fd;
fd = open("/proc/kpagecgroup", O_RDONLY);
if (fd < 0)
return ret;
do {
len = read(fd, buf, sizeof(buf));
} while (len > 0);
if (len == 0)
ret = KSFT_PASS;
close(fd);
return ret;
}
static void *pthread_wait_fn(void *arg)
{
sleep(100);
return NULL;
}
static int spawn_1000_threads(const char *cgroup, void *arg)
{
int nr_threads = 1000;
pthread_t *tinfo;
unsigned long i;
long stack;
int ret = -1;
tinfo = calloc(nr_threads, sizeof(pthread_t));
if (tinfo == NULL)
return -1;
for (i = 0; i < nr_threads; i++) {
if (pthread_create(&tinfo[i], NULL, &pthread_wait_fn,
(void *)i)) {
free(tinfo);
return(-1);
}
}
stack = cg_read_key_long(cgroup, "memory.stat", "kernel_stack ");
if (stack >= 4096 * 1000)
ret = 0;
free(tinfo);
return ret;
}
/*
* The test spawns a process, which spawns 1000 threads. Then it checks
* that memory.stat's kernel_stack is at least 1000 pages large.
*/
static int test_kmem_kernel_stacks(const char *root)
{
int ret = KSFT_FAIL;
char *cg = NULL;
cg = cg_name(root, "kmem_kernel_stacks_test");
if (!cg)
goto cleanup;
if (cg_create(cg))
goto cleanup;
if (cg_run(cg, spawn_1000_threads, NULL))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(cg);
free(cg);
return ret;
}
/*
* This test sequentionally creates 30 child cgroups, allocates some
* kernel memory in each of them, and deletes them. Then it checks
* that the number of dying cgroups on the parent level is 0.
*/
static int test_kmem_dead_cgroups(const char *root)
{
int ret = KSFT_FAIL;
char *parent;
long dead;
int i;
parent = cg_name(root, "kmem_dead_cgroups_test");
if (!parent)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_run_in_subcgroups(parent, alloc_dcache, (void *)100, 30))
goto cleanup;
for (i = 0; i < 5; i++) {
dead = cg_read_key_long(parent, "cgroup.stat",
"nr_dying_descendants ");
if (dead == 0) {
ret = KSFT_PASS;
break;
}
/*
* Reclaiming cgroups might take some time,
* let's wait a bit and repeat.
*/
sleep(1);
}
cleanup:
cg_destroy(parent);
free(parent);
return ret;
}
/*
* This test creates a sub-tree with 1000 memory cgroups.
* Then it checks that the memory.current on the parent level
* is greater than 0 and approximates matches the percpu value
* from memory.stat.
*/
static int test_percpu_basic(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child;
long current, percpu;
int i;
parent = cg_name(root, "percpu_basic_test");
if (!parent)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
for (i = 0; i < 1000; i++) {
child = cg_name_indexed(parent, "child", i);
if (!child)
return -1;
if (cg_create(child))
goto cleanup_children;
free(child);
}
current = cg_read_long(parent, "memory.current");
percpu = cg_read_key_long(parent, "memory.stat", "percpu ");
if (current > 0 && percpu > 0 && abs(current - percpu) <
MAX_VMSTAT_ERROR)
ret = KSFT_PASS;
else
printf("memory.current %ld\npercpu %ld\n",
current, percpu);
cleanup_children:
for (i = 0; i < 1000; i++) {
child = cg_name_indexed(parent, "child", i);
cg_destroy(child);
free(child);
}
cleanup:
cg_destroy(parent);
free(parent);
return ret;
}
#define T(x) { x, #x }
struct kmem_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_kmem_basic),
T(test_kmem_memcg_deletion),
T(test_kmem_proc_kpagecgroup),
T(test_kmem_kernel_stacks),
T(test_kmem_dead_cgroups),
T(test_percpu_basic),
};
#undef T
int main(int argc, char **argv)
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
/*
* Check that memory controller is available:
* memory is listed in cgroup.controllers
*/
if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_kmem.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/limits.h>
#include <signal.h>
#include "../kselftest.h"
#include "cgroup_util.h"
static int idle_process_fn(const char *cgroup, void *arg)
{
(void)pause();
return 0;
}
static int do_migration_fn(const char *cgroup, void *arg)
{
int object_pid = (int)(size_t)arg;
if (setuid(TEST_UID))
return EXIT_FAILURE;
// XXX checking /proc/$pid/cgroup would be quicker than wait
if (cg_enter(cgroup, object_pid) ||
cg_wait_for_proc_count(cgroup, 1))
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
static int do_controller_fn(const char *cgroup, void *arg)
{
const char *child = cgroup;
const char *parent = arg;
if (setuid(TEST_UID))
return EXIT_FAILURE;
if (!cg_read_strstr(child, "cgroup.controllers", "cpuset"))
return EXIT_FAILURE;
if (cg_write(parent, "cgroup.subtree_control", "+cpuset"))
return EXIT_FAILURE;
if (cg_read_strstr(child, "cgroup.controllers", "cpuset"))
return EXIT_FAILURE;
if (cg_write(parent, "cgroup.subtree_control", "-cpuset"))
return EXIT_FAILURE;
if (!cg_read_strstr(child, "cgroup.controllers", "cpuset"))
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
/*
* Migrate a process between two sibling cgroups.
* The success should only depend on the parent cgroup permissions and not the
* migrated process itself (cpuset controller is in place because it uses
* security_task_setscheduler() in cgroup v1).
*
* Deliberately don't set cpuset.cpus in children to avoid definining migration
* permissions between two different cpusets.
*/
static int test_cpuset_perms_object(const char *root, bool allow)
{
char *parent = NULL, *child_src = NULL, *child_dst = NULL;
char *parent_procs = NULL, *child_src_procs = NULL, *child_dst_procs = NULL;
const uid_t test_euid = TEST_UID;
int object_pid = 0;
int ret = KSFT_FAIL;
parent = cg_name(root, "cpuset_test_0");
if (!parent)
goto cleanup;
parent_procs = cg_name(parent, "cgroup.procs");
if (!parent_procs)
goto cleanup;
if (cg_create(parent))
goto cleanup;
child_src = cg_name(parent, "cpuset_test_1");
if (!child_src)
goto cleanup;
child_src_procs = cg_name(child_src, "cgroup.procs");
if (!child_src_procs)
goto cleanup;
if (cg_create(child_src))
goto cleanup;
child_dst = cg_name(parent, "cpuset_test_2");
if (!child_dst)
goto cleanup;
child_dst_procs = cg_name(child_dst, "cgroup.procs");
if (!child_dst_procs)
goto cleanup;
if (cg_create(child_dst))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpuset"))
goto cleanup;
if (cg_read_strstr(child_src, "cgroup.controllers", "cpuset") ||
cg_read_strstr(child_dst, "cgroup.controllers", "cpuset"))
goto cleanup;
/* Enable permissions along src->dst tree path */
if (chown(child_src_procs, test_euid, -1) ||
chown(child_dst_procs, test_euid, -1))
goto cleanup;
if (allow && chown(parent_procs, test_euid, -1))
goto cleanup;
/* Fork a privileged child as a test object */
object_pid = cg_run_nowait(child_src, idle_process_fn, NULL);
if (object_pid < 0)
goto cleanup;
/* Carry out migration in a child process that can drop all privileges
* (including capabilities), the main process must remain privileged for
* cleanup.
* Child process's cgroup is irrelevant but we place it into child_dst
* as hacky way to pass information about migration target to the child.
*/
if (allow ^ (cg_run(child_dst, do_migration_fn, (void *)(size_t)object_pid) == EXIT_SUCCESS))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (object_pid > 0) {
(void)kill(object_pid, SIGTERM);
(void)clone_reap(object_pid, WEXITED);
}
cg_destroy(child_dst);
free(child_dst_procs);
free(child_dst);
cg_destroy(child_src);
free(child_src_procs);
free(child_src);
cg_destroy(parent);
free(parent_procs);
free(parent);
return ret;
}
static int test_cpuset_perms_object_allow(const char *root)
{
return test_cpuset_perms_object(root, true);
}
static int test_cpuset_perms_object_deny(const char *root)
{
return test_cpuset_perms_object(root, false);
}
/*
* Migrate a process between parent and child implicitely
* Implicit migration happens when a controller is enabled/disabled.
*
*/
static int test_cpuset_perms_subtree(const char *root)
{
char *parent = NULL, *child = NULL;
char *parent_procs = NULL, *parent_subctl = NULL, *child_procs = NULL;
const uid_t test_euid = TEST_UID;
int object_pid = 0;
int ret = KSFT_FAIL;
parent = cg_name(root, "cpuset_test_0");
if (!parent)
goto cleanup;
parent_procs = cg_name(parent, "cgroup.procs");
if (!parent_procs)
goto cleanup;
parent_subctl = cg_name(parent, "cgroup.subtree_control");
if (!parent_subctl)
goto cleanup;
if (cg_create(parent))
goto cleanup;
child = cg_name(parent, "cpuset_test_1");
if (!child)
goto cleanup;
child_procs = cg_name(child, "cgroup.procs");
if (!child_procs)
goto cleanup;
if (cg_create(child))
goto cleanup;
/* Enable permissions as in a delegated subtree */
if (chown(parent_procs, test_euid, -1) ||
chown(parent_subctl, test_euid, -1) ||
chown(child_procs, test_euid, -1))
goto cleanup;
/* Put a privileged child in the subtree and modify controller state
* from an unprivileged process, the main process remains privileged
* for cleanup.
* The unprivileged child runs in subtree too to avoid parent and
* internal-node constraing violation.
*/
object_pid = cg_run_nowait(child, idle_process_fn, NULL);
if (object_pid < 0)
goto cleanup;
if (cg_run(child, do_controller_fn, parent) != EXIT_SUCCESS)
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (object_pid > 0) {
(void)kill(object_pid, SIGTERM);
(void)clone_reap(object_pid, WEXITED);
}
cg_destroy(child);
free(child_procs);
free(child);
cg_destroy(parent);
free(parent_subctl);
free(parent_procs);
free(parent);
return ret;
}
#define T(x) { x, #x }
struct cpuset_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_cpuset_perms_object_allow),
T(test_cpuset_perms_object_deny),
T(test_cpuset_perms_subtree),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "cpuset"))
if (cg_write(root, "cgroup.subtree_control", "+cpuset"))
ksft_exit_skip("Failed to set cpuset controller\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_cpuset.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <linux/limits.h>
#include <linux/sched.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <pthread.h>
#include "../kselftest.h"
#include "cgroup_util.h"
static int touch_anon(char *buf, size_t size)
{
int fd;
char *pos = buf;
fd = open("/dev/urandom", O_RDONLY);
if (fd < 0)
return -1;
while (size > 0) {
ssize_t ret = read(fd, pos, size);
if (ret < 0) {
if (errno != EINTR) {
close(fd);
return -1;
}
} else {
pos += ret;
size -= ret;
}
}
close(fd);
return 0;
}
static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
{
int ppid = getppid();
size_t size = (size_t)arg;
void *buf;
buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
0, 0);
if (buf == MAP_FAILED)
return -1;
if (touch_anon((char *)buf, size)) {
munmap(buf, size);
return -1;
}
while (getppid() == ppid)
sleep(1);
munmap(buf, size);
return 0;
}
/*
* Create a child process that allocates and touches 100MB, then waits to be
* killed. Wait until the child is attached to the cgroup, kill all processes
* in that cgroup and wait until "cgroup.procs" is empty. At this point try to
* destroy the empty cgroup. The test helps detect race conditions between
* dying processes leaving the cgroup and cgroup destruction path.
*/
static int test_cgcore_destroy(const char *root)
{
int ret = KSFT_FAIL;
char *cg_test = NULL;
int child_pid;
char buf[PAGE_SIZE];
cg_test = cg_name(root, "cg_test");
if (!cg_test)
goto cleanup;
for (int i = 0; i < 10; i++) {
if (cg_create(cg_test))
goto cleanup;
child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
(void *) MB(100));
if (child_pid < 0)
goto cleanup;
/* wait for the child to enter cgroup */
if (cg_wait_for_proc_count(cg_test, 1))
goto cleanup;
if (cg_killall(cg_test))
goto cleanup;
/* wait for cgroup to be empty */
while (1) {
if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
goto cleanup;
if (buf[0] == '\0')
break;
usleep(1000);
}
if (rmdir(cg_test))
goto cleanup;
if (waitpid(child_pid, NULL, 0) < 0)
goto cleanup;
}
ret = KSFT_PASS;
cleanup:
if (cg_test)
cg_destroy(cg_test);
free(cg_test);
return ret;
}
/*
* A(0) - B(0) - C(1)
* \ D(0)
*
* A, B and C's "populated" fields would be 1 while D's 0.
* test that after the one process in C is moved to root,
* A,B and C's "populated" fields would flip to "0" and file
* modified events will be generated on the
* "cgroup.events" files of both cgroups.
*/
static int test_cgcore_populated(const char *root)
{
int ret = KSFT_FAIL;
int err;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_c = NULL, *cg_test_d = NULL;
int cgroup_fd = -EBADF;
pid_t pid;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
goto cleanup;
if (cg_create(cg_test_a))
goto cleanup;
if (cg_create(cg_test_b))
goto cleanup;
if (cg_create(cg_test_c))
goto cleanup;
if (cg_create(cg_test_d))
goto cleanup;
if (cg_enter_current(cg_test_c))
goto cleanup;
if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
goto cleanup;
if (cg_enter_current(root))
goto cleanup;
if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
goto cleanup;
if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
goto cleanup;
/* Test that we can directly clone into a new cgroup. */
cgroup_fd = dirfd_open_opath(cg_test_d);
if (cgroup_fd < 0)
goto cleanup;
pid = clone_into_cgroup(cgroup_fd);
if (pid < 0) {
if (errno == ENOSYS)
goto cleanup_pass;
goto cleanup;
}
if (pid == 0) {
if (raise(SIGSTOP))
exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
(void)clone_reap(pid, WSTOPPED);
(void)kill(pid, SIGCONT);
(void)clone_reap(pid, WEXITED);
if (err)
goto cleanup;
if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
goto cleanup;
/* Remove cgroup. */
if (cg_test_d) {
cg_destroy(cg_test_d);
free(cg_test_d);
cg_test_d = NULL;
}
pid = clone_into_cgroup(cgroup_fd);
if (pid < 0)
goto cleanup_pass;
if (pid == 0)
exit(EXIT_SUCCESS);
(void)clone_reap(pid, WEXITED);
goto cleanup;
cleanup_pass:
ret = KSFT_PASS;
cleanup:
if (cg_test_d)
cg_destroy(cg_test_d);
if (cg_test_c)
cg_destroy(cg_test_c);
if (cg_test_b)
cg_destroy(cg_test_b);
if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_d);
free(cg_test_c);
free(cg_test_b);
free(cg_test_a);
if (cgroup_fd >= 0)
close(cgroup_fd);
return ret;
}
/*
* A (domain threaded) - B (threaded) - C (domain)
*
* test that C can't be used until it is turned into a
* threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
* these cases. Operations which fail due to invalid topology use
* EOPNOTSUPP as the errno.
*/
static int test_cgcore_invalid_domain(const char *root)
{
int ret = KSFT_FAIL;
char *grandparent = NULL, *parent = NULL, *child = NULL;
grandparent = cg_name(root, "cg_test_grandparent");
parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
if (!parent || !child || !grandparent)
goto cleanup;
if (cg_create(grandparent))
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cgroup.type", "threaded"))
goto cleanup;
if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
goto cleanup;
if (!cg_enter_current(child))
goto cleanup;
if (errno != EOPNOTSUPP)
goto cleanup;
if (!clone_into_cgroup_run_wait(child))
goto cleanup;
if (errno == ENOSYS)
goto cleanup_pass;
if (errno != EOPNOTSUPP)
goto cleanup;
cleanup_pass:
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
if (grandparent)
cg_destroy(grandparent);
free(child);
free(parent);
free(grandparent);
return ret;
}
/*
* Test that when a child becomes threaded
* the parent type becomes domain threaded.
*/
static int test_cgcore_parent_becomes_threaded(const char *root)
{
int ret = KSFT_FAIL;
char *parent = NULL, *child = NULL;
parent = cg_name(root, "cg_test_parent");
child = cg_name(root, "cg_test_parent/cg_test_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(child, "cgroup.type", "threaded"))
goto cleanup;
if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* Test that there's no internal process constrain on threaded cgroups.
* You can add threads/processes on a parent with a controller enabled.
*/
static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
{
int ret = KSFT_FAIL;
char *parent = NULL, *child = NULL;
if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
cg_write(root, "cgroup.subtree_control", "+cpu")) {
ret = KSFT_SKIP;
goto cleanup;
}
parent = cg_name(root, "cg_test_parent");
child = cg_name(root, "cg_test_parent/cg_test_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cgroup.type", "threaded"))
goto cleanup;
if (cg_write(child, "cgroup.type", "threaded"))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
goto cleanup;
if (cg_enter_current(parent))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
cg_enter_current(root);
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* Test that you can't enable a controller on a child if it's not enabled
* on the parent.
*/
static int test_cgcore_top_down_constraint_enable(const char *root)
{
int ret = KSFT_FAIL;
char *parent = NULL, *child = NULL;
parent = cg_name(root, "cg_test_parent");
child = cg_name(root, "cg_test_parent/cg_test_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (!cg_write(child, "cgroup.subtree_control", "+memory"))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* Test that you can't disable a controller on a parent
* if it's enabled in a child.
*/
static int test_cgcore_top_down_constraint_disable(const char *root)
{
int ret = KSFT_FAIL;
char *parent = NULL, *child = NULL;
parent = cg_name(root, "cg_test_parent");
child = cg_name(root, "cg_test_parent/cg_test_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (cg_write(child, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/*
* Test internal process constraint.
* You can't add a pid to a domain parent if a controller is enabled.
*/
static int test_cgcore_internal_process_constraint(const char *root)
{
int ret = KSFT_FAIL;
char *parent = NULL, *child = NULL;
parent = cg_name(root, "cg_test_parent");
child = cg_name(root, "cg_test_parent/cg_test_child");
if (!parent || !child)
goto cleanup;
if (cg_create(parent))
goto cleanup;
if (cg_create(child))
goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
goto cleanup;
if (!cg_enter_current(parent))
goto cleanup;
if (!clone_into_cgroup_run_wait(parent))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (child)
cg_destroy(child);
if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
static void *dummy_thread_fn(void *arg)
{
return (void *)(size_t)pause();
}
/*
* Test threadgroup migration.
* All threads of a process are migrated together.
*/
static int test_cgcore_proc_migration(const char *root)
{
int ret = KSFT_FAIL;
int t, c_threads = 0, n_threads = 13;
char *src = NULL, *dst = NULL;
pthread_t threads[n_threads];
src = cg_name(root, "cg_src");
dst = cg_name(root, "cg_dst");
if (!src || !dst)
goto cleanup;
if (cg_create(src))
goto cleanup;
if (cg_create(dst))
goto cleanup;
if (cg_enter_current(src))
goto cleanup;
for (c_threads = 0; c_threads < n_threads; ++c_threads) {
if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
goto cleanup;
}
cg_enter_current(dst);
if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (t = 0; t < c_threads; ++t) {
pthread_cancel(threads[t]);
}
for (t = 0; t < c_threads; ++t) {
pthread_join(threads[t], NULL);
}
cg_enter_current(root);
if (dst)
cg_destroy(dst);
if (src)
cg_destroy(src);
free(dst);
free(src);
return ret;
}
static void *migrating_thread_fn(void *arg)
{
int g, i, n_iterations = 1000;
char **grps = arg;
char lines[3][PATH_MAX];
for (g = 1; g < 3; ++g)
snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
for (i = 0; i < n_iterations; ++i) {
cg_enter_current_thread(grps[(i % 2) + 1]);
if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
return (void *)-1;
}
return NULL;
}
/*
* Test single thread migration.
* Threaded cgroups allow successful migration of a thread.
*/
static int test_cgcore_thread_migration(const char *root)
{
int ret = KSFT_FAIL;
char *dom = NULL;
char line[PATH_MAX];
char *grps[3] = { (char *)root, NULL, NULL };
pthread_t thr;
void *retval;
dom = cg_name(root, "cg_dom");
grps[1] = cg_name(root, "cg_dom/cg_src");
grps[2] = cg_name(root, "cg_dom/cg_dst");
if (!grps[1] || !grps[2] || !dom)
goto cleanup;
if (cg_create(dom))
goto cleanup;
if (cg_create(grps[1]))
goto cleanup;
if (cg_create(grps[2]))
goto cleanup;
if (cg_write(grps[1], "cgroup.type", "threaded"))
goto cleanup;
if (cg_write(grps[2], "cgroup.type", "threaded"))
goto cleanup;
if (cg_enter_current(grps[1]))
goto cleanup;
if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
goto cleanup;
if (pthread_join(thr, &retval))
goto cleanup;
if (retval)
goto cleanup;
snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
if (proc_read_strstr(0, 1, "cgroup", line))
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (grps[2])
cg_destroy(grps[2]);
if (grps[1])
cg_destroy(grps[1]);
if (dom)
cg_destroy(dom);
free(grps[2]);
free(grps[1]);
free(dom);
return ret;
}
/*
* cgroup migration permission check should be performed based on the
* credentials at the time of open instead of write.
*/
static int test_cgcore_lesser_euid_open(const char *root)
{
const uid_t test_euid = TEST_UID;
int ret = KSFT_FAIL;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
int cg_test_b_procs_fd = -1;
uid_t saved_uid;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_b");
if (!cg_test_a || !cg_test_b)
goto cleanup;
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
if (!cg_test_a_procs || !cg_test_b_procs)
goto cleanup;
if (cg_create(cg_test_a) || cg_create(cg_test_b))
goto cleanup;
if (cg_enter_current(cg_test_a))
goto cleanup;
if (chown(cg_test_a_procs, test_euid, -1) ||
chown(cg_test_b_procs, test_euid, -1))
goto cleanup;
saved_uid = geteuid();
if (seteuid(test_euid))
goto cleanup;
cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
if (seteuid(saved_uid))
goto cleanup;
if (cg_test_b_procs_fd < 0)
goto cleanup;
if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (cg_test_b_procs_fd >= 0)
close(cg_test_b_procs_fd);
if (cg_test_b)
cg_destroy(cg_test_b);
if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_b_procs);
free(cg_test_a_procs);
free(cg_test_b);
free(cg_test_a);
return ret;
}
struct lesser_ns_open_thread_arg {
const char *path;
int fd;
int err;
};
static int lesser_ns_open_thread_fn(void *arg)
{
struct lesser_ns_open_thread_arg *targ = arg;
targ->fd = open(targ->path, O_RDWR);
targ->err = errno;
return 0;
}
/*
* cgroup migration permission check should be performed based on the cgroup
* namespace at the time of open instead of write.
*/
static int test_cgcore_lesser_ns_open(const char *root)
{
static char stack[65536];
const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
int ret = KSFT_FAIL;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
int cg_test_b_procs_fd = -1;
struct lesser_ns_open_thread_arg targ = { .fd = -1 };
pid_t pid;
int status;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_b");
if (!cg_test_a || !cg_test_b)
goto cleanup;
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
if (!cg_test_a_procs || !cg_test_b_procs)
goto cleanup;
if (cg_create(cg_test_a) || cg_create(cg_test_b))
goto cleanup;
if (cg_enter_current(cg_test_b))
goto cleanup;
if (chown(cg_test_a_procs, test_euid, -1) ||
chown(cg_test_b_procs, test_euid, -1))
goto cleanup;
targ.path = cg_test_b_procs;
pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
&targ);
if (pid < 0)
goto cleanup;
if (waitpid(pid, &status, 0) < 0)
goto cleanup;
if (!WIFEXITED(status))
goto cleanup;
cg_test_b_procs_fd = targ.fd;
if (cg_test_b_procs_fd < 0)
goto cleanup;
if (cg_enter_current(cg_test_a))
goto cleanup;
if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (cg_test_b_procs_fd >= 0)
close(cg_test_b_procs_fd);
if (cg_test_b)
cg_destroy(cg_test_b);
if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_b_procs);
free(cg_test_a_procs);
free(cg_test_b);
free(cg_test_a);
return ret;
}
#define T(x) { x, #x }
struct corecg_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_cgcore_internal_process_constraint),
T(test_cgcore_top_down_constraint_enable),
T(test_cgcore_top_down_constraint_disable),
T(test_cgcore_no_internal_process_constraint_on_threads),
T(test_cgcore_parent_becomes_threaded),
T(test_cgcore_invalid_domain),
T(test_cgcore_populated),
T(test_cgcore_proc_migration),
T(test_cgcore_thread_migration),
T(test_cgcore_destroy),
T(test_cgcore_lesser_euid_open),
T(test_cgcore_lesser_ns_open),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_core.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <errno.h>
#include <linux/limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include "../kselftest.h"
#include "../pidfd/pidfd.h"
#include "cgroup_util.h"
/*
* Kill the given cgroup and wait for the inotify signal.
* If there are no events in 10 seconds, treat this as an error.
* Then check that the cgroup is in the desired state.
*/
static int cg_kill_wait(const char *cgroup)
{
int fd, ret = -1;
fd = cg_prepare_for_wait(cgroup);
if (fd < 0)
return fd;
ret = cg_write(cgroup, "cgroup.kill", "1");
if (ret)
goto out;
ret = cg_wait_for(fd);
if (ret)
goto out;
out:
close(fd);
return ret;
}
/*
* A simple process running in a sleep loop until being
* re-parented.
*/
static int child_fn(const char *cgroup, void *arg)
{
int ppid = getppid();
while (getppid() == ppid)
usleep(1000);
return getppid() == ppid;
}
static int test_cgkill_simple(const char *root)
{
pid_t pids[100];
int ret = KSFT_FAIL;
char *cgroup = NULL;
int i;
cgroup = cg_name(root, "cg_test_simple");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
for (i = 0; i < 100; i++)
pids[i] = cg_run_nowait(cgroup, child_fn, NULL);
if (cg_wait_for_proc_count(cgroup, 100))
goto cleanup;
if (cg_read_strcmp(cgroup, "cgroup.events", "populated 1\n"))
goto cleanup;
if (cg_kill_wait(cgroup))
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (i = 0; i < 100; i++)
wait_for_pid(pids[i]);
if (ret == KSFT_PASS &&
cg_read_strcmp(cgroup, "cgroup.events", "populated 0\n"))
ret = KSFT_FAIL;
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
/*
* The test creates the following hierarchy:
* A
* / / \ \
* B E I K
* /\ |
* C D F
* |
* G
* |
* H
*
* with a process in C, H and 3 processes in K.
* Then it tries to kill the whole tree.
*/
static int test_cgkill_tree(const char *root)
{
pid_t pids[5];
char *cgroup[10] = {0};
int ret = KSFT_FAIL;
int i;
cgroup[0] = cg_name(root, "cg_test_tree_A");
if (!cgroup[0])
goto cleanup;
cgroup[1] = cg_name(cgroup[0], "B");
if (!cgroup[1])
goto cleanup;
cgroup[2] = cg_name(cgroup[1], "C");
if (!cgroup[2])
goto cleanup;
cgroup[3] = cg_name(cgroup[1], "D");
if (!cgroup[3])
goto cleanup;
cgroup[4] = cg_name(cgroup[0], "E");
if (!cgroup[4])
goto cleanup;
cgroup[5] = cg_name(cgroup[4], "F");
if (!cgroup[5])
goto cleanup;
cgroup[6] = cg_name(cgroup[5], "G");
if (!cgroup[6])
goto cleanup;
cgroup[7] = cg_name(cgroup[6], "H");
if (!cgroup[7])
goto cleanup;
cgroup[8] = cg_name(cgroup[0], "I");
if (!cgroup[8])
goto cleanup;
cgroup[9] = cg_name(cgroup[0], "K");
if (!cgroup[9])
goto cleanup;
for (i = 0; i < 10; i++)
if (cg_create(cgroup[i]))
goto cleanup;
pids[0] = cg_run_nowait(cgroup[2], child_fn, NULL);
pids[1] = cg_run_nowait(cgroup[7], child_fn, NULL);
pids[2] = cg_run_nowait(cgroup[9], child_fn, NULL);
pids[3] = cg_run_nowait(cgroup[9], child_fn, NULL);
pids[4] = cg_run_nowait(cgroup[9], child_fn, NULL);
/*
* Wait until all child processes will enter
* corresponding cgroups.
*/
if (cg_wait_for_proc_count(cgroup[2], 1) ||
cg_wait_for_proc_count(cgroup[7], 1) ||
cg_wait_for_proc_count(cgroup[9], 3))
goto cleanup;
/*
* Kill A and check that we get an empty notification.
*/
if (cg_kill_wait(cgroup[0]))
goto cleanup;
ret = KSFT_PASS;
cleanup:
for (i = 0; i < 5; i++)
wait_for_pid(pids[i]);
if (ret == KSFT_PASS &&
cg_read_strcmp(cgroup[0], "cgroup.events", "populated 0\n"))
ret = KSFT_FAIL;
for (i = 9; i >= 0 && cgroup[i]; i--) {
cg_destroy(cgroup[i]);
free(cgroup[i]);
}
return ret;
}
static int forkbomb_fn(const char *cgroup, void *arg)
{
int ppid;
fork();
fork();
ppid = getppid();
while (getppid() == ppid)
usleep(1000);
return getppid() == ppid;
}
/*
* The test runs a fork bomb in a cgroup and tries to kill it.
*/
static int test_cgkill_forkbomb(const char *root)
{
int ret = KSFT_FAIL;
char *cgroup = NULL;
pid_t pid = -ESRCH;
cgroup = cg_name(root, "cg_forkbomb_test");
if (!cgroup)
goto cleanup;
if (cg_create(cgroup))
goto cleanup;
pid = cg_run_nowait(cgroup, forkbomb_fn, NULL);
if (pid < 0)
goto cleanup;
usleep(100000);
if (cg_kill_wait(cgroup))
goto cleanup;
if (cg_wait_for_proc_count(cgroup, 0))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (pid > 0)
wait_for_pid(pid);
if (ret == KSFT_PASS &&
cg_read_strcmp(cgroup, "cgroup.events", "populated 0\n"))
ret = KSFT_FAIL;
if (cgroup)
cg_destroy(cgroup);
free(cgroup);
return ret;
}
#define T(x) { x, #x }
struct cgkill_test {
int (*fn)(const char *root);
const char *name;
} tests[] = {
T(test_cgkill_simple),
T(test_cgkill_tree),
T(test_cgkill_forkbomb),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name);
break;
case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
return ret;
}
| linux-master | tools/testing/selftests/cgroup/test_kill.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <fcntl.h>
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include "../kselftest.h"
static int lock_set(int fd, struct flock *fl)
{
int ret;
fl->l_pid = 0; // needed for OFD locks
fl->l_whence = SEEK_SET;
ret = fcntl(fd, F_OFD_SETLK, fl);
if (ret)
perror("fcntl()");
return ret;
}
static int lock_get(int fd, struct flock *fl)
{
int ret;
fl->l_pid = 0; // needed for OFD locks
fl->l_whence = SEEK_SET;
ret = fcntl(fd, F_OFD_GETLK, fl);
if (ret)
perror("fcntl()");
return ret;
}
int main(void)
{
int rc;
struct flock fl, fl2;
int fd = open("/tmp/aa", O_RDWR | O_CREAT | O_EXCL, 0600);
int fd2 = open("/tmp/aa", O_RDONLY);
unlink("/tmp/aa");
assert(fd != -1);
assert(fd2 != -1);
ksft_print_msg("[INFO] opened fds %i %i\n", fd, fd2);
/* Set some read lock */
fl.l_type = F_RDLCK;
fl.l_start = 5;
fl.l_len = 3;
rc = lock_set(fd, &fl);
if (rc == 0) {
ksft_print_msg
("[SUCCESS] set OFD read lock on first fd\n");
} else {
ksft_print_msg("[FAIL] to set OFD read lock on first fd\n");
return -1;
}
/* Make sure read locks do not conflict on different fds. */
fl.l_type = F_RDLCK;
fl.l_start = 5;
fl.l_len = 1;
rc = lock_get(fd2, &fl);
if (rc != 0)
return -1;
if (fl.l_type != F_UNLCK) {
ksft_print_msg("[FAIL] read locks conflicted\n");
return -1;
}
/* Make sure read/write locks do conflict on different fds. */
fl.l_type = F_WRLCK;
fl.l_start = 5;
fl.l_len = 1;
rc = lock_get(fd2, &fl);
if (rc != 0)
return -1;
if (fl.l_type != F_UNLCK) {
ksft_print_msg
("[SUCCESS] read and write locks conflicted\n");
} else {
ksft_print_msg
("[SUCCESS] read and write locks not conflicted\n");
return -1;
}
/* Get info about the lock on first fd. */
fl.l_type = F_UNLCK;
fl.l_start = 5;
fl.l_len = 1;
rc = lock_get(fd, &fl);
if (rc != 0) {
ksft_print_msg
("[FAIL] F_OFD_GETLK with F_UNLCK not supported\n");
return -1;
}
if (fl.l_type != F_UNLCK) {
ksft_print_msg
("[SUCCESS] F_UNLCK test returns: locked, type %i pid %i len %zi\n",
fl.l_type, fl.l_pid, fl.l_len);
} else {
ksft_print_msg
("[FAIL] F_OFD_GETLK with F_UNLCK did not return lock info\n");
return -1;
}
/* Try the same but by locking everything by len==0. */
fl2.l_type = F_UNLCK;
fl2.l_start = 0;
fl2.l_len = 0;
rc = lock_get(fd, &fl2);
if (rc != 0) {
ksft_print_msg
("[FAIL] F_OFD_GETLK with F_UNLCK not supported\n");
return -1;
}
if (memcmp(&fl, &fl2, sizeof(fl))) {
ksft_print_msg
("[FAIL] F_UNLCK test returns: locked, type %i pid %i len %zi\n",
fl.l_type, fl.l_pid, fl.l_len);
return -1;
}
ksft_print_msg("[SUCCESS] F_UNLCK with len==0 returned the same\n");
/* Get info about the lock on second fd - no locks on it. */
fl.l_type = F_UNLCK;
fl.l_start = 0;
fl.l_len = 0;
lock_get(fd2, &fl);
if (fl.l_type != F_UNLCK) {
ksft_print_msg
("[FAIL] F_OFD_GETLK with F_UNLCK return lock info from another fd\n");
return -1;
}
return 0;
}
| linux-master | tools/testing/selftests/filelock/ofdlocks.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* kselftest suite for mincore().
*
* Copyright (C) 2020 Collabora, Ltd.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
#include "../kselftest.h"
#include "../kselftest_harness.h"
/* Default test file size: 4MB */
#define MB (1UL << 20)
#define FILE_SIZE (4 * MB)
/*
* Tests the user interface. This test triggers most of the documented
* error conditions in mincore().
*/
TEST(basic_interface)
{
int retval;
int page_size;
unsigned char vec[1];
char *addr;
page_size = sysconf(_SC_PAGESIZE);
/* Query a 0 byte sized range */
retval = mincore(0, 0, vec);
EXPECT_EQ(0, retval);
/* Addresses in the specified range are invalid or unmapped */
errno = 0;
retval = mincore(NULL, page_size, vec);
EXPECT_EQ(-1, retval);
EXPECT_EQ(ENOMEM, errno);
errno = 0;
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, addr) {
TH_LOG("mmap error: %s", strerror(errno));
}
/* <addr> argument is not page-aligned */
errno = 0;
retval = mincore(addr + 1, page_size, vec);
EXPECT_EQ(-1, retval);
EXPECT_EQ(EINVAL, errno);
/* <length> argument is too large */
errno = 0;
retval = mincore(addr, -1, vec);
EXPECT_EQ(-1, retval);
EXPECT_EQ(ENOMEM, errno);
/* <vec> argument points to an illegal address */
errno = 0;
retval = mincore(addr, page_size, NULL);
EXPECT_EQ(-1, retval);
EXPECT_EQ(EFAULT, errno);
munmap(addr, page_size);
}
/*
* Test mincore() behavior on a private anonymous page mapping.
* Check that the page is not loaded into memory right after the mapping
* but after accessing it (on-demand allocation).
* Then free the page and check that it's not memory-resident.
*/
TEST(check_anonymous_locked_pages)
{
unsigned char vec[1];
char *addr;
int retval;
int page_size;
page_size = sysconf(_SC_PAGESIZE);
/* Map one page and check it's not memory-resident */
errno = 0;
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, addr) {
TH_LOG("mmap error: %s", strerror(errno));
}
retval = mincore(addr, page_size, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(0, vec[0]) {
TH_LOG("Page found in memory before use");
}
/* Touch the page and check again. It should now be in memory */
addr[0] = 1;
mlock(addr, page_size);
retval = mincore(addr, page_size, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(1, vec[0]) {
TH_LOG("Page not found in memory after use");
}
/*
* It shouldn't be memory-resident after unlocking it and
* marking it as unneeded.
*/
munlock(addr, page_size);
madvise(addr, page_size, MADV_DONTNEED);
retval = mincore(addr, page_size, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(0, vec[0]) {
TH_LOG("Page in memory after being zapped");
}
munmap(addr, page_size);
}
/*
* Check mincore() behavior on huge pages.
* This test will be skipped if the mapping fails (ie. if there are no
* huge pages available).
*
* Make sure the system has at least one free huge page, check
* "HugePages_Free" in /proc/meminfo.
* Increment /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages if
* needed.
*/
TEST(check_huge_pages)
{
unsigned char vec[1];
char *addr;
int retval;
int page_size;
page_size = sysconf(_SC_PAGESIZE);
errno = 0;
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (addr == MAP_FAILED) {
if (errno == ENOMEM || errno == EINVAL)
SKIP(return, "No huge pages available or CONFIG_HUGETLB_PAGE disabled.");
else
TH_LOG("mmap error: %s", strerror(errno));
}
retval = mincore(addr, page_size, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(0, vec[0]) {
TH_LOG("Page found in memory before use");
}
addr[0] = 1;
mlock(addr, page_size);
retval = mincore(addr, page_size, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(1, vec[0]) {
TH_LOG("Page not found in memory after use");
}
munlock(addr, page_size);
munmap(addr, page_size);
}
/*
* Test mincore() behavior on a file-backed page.
* No pages should be loaded into memory right after the mapping. Then,
* accessing any address in the mapping range should load the page
* containing the address and a number of subsequent pages (readahead).
*
* The actual readahead settings depend on the test environment, so we
* can't make a lot of assumptions about that. This test covers the most
* general cases.
*/
TEST(check_file_mmap)
{
unsigned char *vec;
int vec_size;
char *addr;
int retval;
int page_size;
int fd;
int i;
int ra_pages = 0;
page_size = sysconf(_SC_PAGESIZE);
vec_size = FILE_SIZE / page_size;
if (FILE_SIZE % page_size)
vec_size++;
vec = calloc(vec_size, sizeof(unsigned char));
ASSERT_NE(NULL, vec) {
TH_LOG("Can't allocate array");
}
errno = 0;
fd = open(".", O_TMPFILE | O_RDWR, 0600);
if (fd < 0) {
ASSERT_EQ(errno, EOPNOTSUPP) {
TH_LOG("Can't create temporary file: %s",
strerror(errno));
}
SKIP(goto out_free, "O_TMPFILE not supported by filesystem.");
}
errno = 0;
retval = fallocate(fd, 0, 0, FILE_SIZE);
if (retval) {
ASSERT_EQ(errno, EOPNOTSUPP) {
TH_LOG("Error allocating space for the temporary file: %s",
strerror(errno));
}
SKIP(goto out_close, "fallocate not supported by filesystem.");
}
/*
* Map the whole file, the pages shouldn't be fetched yet.
*/
errno = 0;
addr = mmap(NULL, FILE_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
ASSERT_NE(MAP_FAILED, addr) {
TH_LOG("mmap error: %s", strerror(errno));
}
retval = mincore(addr, FILE_SIZE, vec);
ASSERT_EQ(0, retval);
for (i = 0; i < vec_size; i++) {
ASSERT_EQ(0, vec[i]) {
TH_LOG("Unexpected page in memory");
}
}
/*
* Touch a page in the middle of the mapping. We expect the next
* few pages (the readahead window) to be populated too.
*/
addr[FILE_SIZE / 2] = 1;
retval = mincore(addr, FILE_SIZE, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(1, vec[FILE_SIZE / 2 / page_size]) {
TH_LOG("Page not found in memory after use");
}
i = FILE_SIZE / 2 / page_size + 1;
while (i < vec_size && vec[i]) {
ra_pages++;
i++;
}
EXPECT_GT(ra_pages, 0) {
TH_LOG("No read-ahead pages found in memory");
}
EXPECT_LT(i, vec_size) {
TH_LOG("Read-ahead pages reached the end of the file");
}
/*
* End of the readahead window. The rest of the pages shouldn't
* be in memory.
*/
if (i < vec_size) {
while (i < vec_size && !vec[i])
i++;
EXPECT_EQ(vec_size, i) {
TH_LOG("Unexpected page in memory beyond readahead window");
}
}
munmap(addr, FILE_SIZE);
out_close:
close(fd);
out_free:
free(vec);
}
/*
* Test mincore() behavior on a page backed by a tmpfs file. This test
* performs the same steps as the previous one. However, we don't expect
* any readahead in this case.
*/
TEST(check_tmpfs_mmap)
{
unsigned char *vec;
int vec_size;
char *addr;
int retval;
int page_size;
int fd;
int i;
int ra_pages = 0;
page_size = sysconf(_SC_PAGESIZE);
vec_size = FILE_SIZE / page_size;
if (FILE_SIZE % page_size)
vec_size++;
vec = calloc(vec_size, sizeof(unsigned char));
ASSERT_NE(NULL, vec) {
TH_LOG("Can't allocate array");
}
errno = 0;
fd = open("/dev/shm", O_TMPFILE | O_RDWR, 0600);
ASSERT_NE(-1, fd) {
TH_LOG("Can't create temporary file: %s",
strerror(errno));
}
errno = 0;
retval = fallocate(fd, 0, 0, FILE_SIZE);
ASSERT_EQ(0, retval) {
TH_LOG("Error allocating space for the temporary file: %s",
strerror(errno));
}
/*
* Map the whole file, the pages shouldn't be fetched yet.
*/
errno = 0;
addr = mmap(NULL, FILE_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
ASSERT_NE(MAP_FAILED, addr) {
TH_LOG("mmap error: %s", strerror(errno));
}
retval = mincore(addr, FILE_SIZE, vec);
ASSERT_EQ(0, retval);
for (i = 0; i < vec_size; i++) {
ASSERT_EQ(0, vec[i]) {
TH_LOG("Unexpected page in memory");
}
}
/*
* Touch a page in the middle of the mapping. We expect only
* that page to be fetched into memory.
*/
addr[FILE_SIZE / 2] = 1;
retval = mincore(addr, FILE_SIZE, vec);
ASSERT_EQ(0, retval);
ASSERT_EQ(1, vec[FILE_SIZE / 2 / page_size]) {
TH_LOG("Page not found in memory after use");
}
i = FILE_SIZE / 2 / page_size + 1;
while (i < vec_size && vec[i]) {
ra_pages++;
i++;
}
ASSERT_EQ(ra_pages, 0) {
TH_LOG("Read-ahead pages found in memory");
}
munmap(addr, FILE_SIZE);
close(fd);
free(vec);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/mincore/mincore_selftest.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <linux/unistd.h>
#include <linux/kcmp.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/epoll.h>
#include "../kselftest.h"
static long sys_kcmp(int pid1, int pid2, int type, unsigned long fd1, unsigned long fd2)
{
return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
}
static const unsigned int duped_num = 64;
int main(int argc, char **argv)
{
const char kpath[] = "kcmp-test-file";
struct kcmp_epoll_slot epoll_slot;
struct epoll_event ev;
int pid1, pid2;
int pipefd[2];
int fd1, fd2;
int epollfd;
int status;
int fddup;
fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
pid1 = getpid();
if (fd1 < 0) {
perror("Can't create file");
ksft_exit_fail();
}
if (pipe(pipefd)) {
perror("Can't create pipe");
ksft_exit_fail();
}
epollfd = epoll_create1(0);
if (epollfd < 0) {
perror("epoll_create1 failed");
ksft_exit_fail();
}
memset(&ev, 0xff, sizeof(ev));
ev.events = EPOLLIN | EPOLLOUT;
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, pipefd[0], &ev)) {
perror("epoll_ctl failed");
ksft_exit_fail();
}
fddup = dup2(pipefd[1], duped_num);
if (fddup < 0) {
perror("dup2 failed");
ksft_exit_fail();
}
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, fddup, &ev)) {
perror("epoll_ctl failed");
ksft_exit_fail();
}
close(fddup);
pid2 = fork();
if (pid2 < 0) {
perror("fork failed");
ksft_exit_fail();
}
if (!pid2) {
int pid2 = getpid();
int ret;
ksft_print_header();
ksft_set_plan(3);
fd2 = open(kpath, O_RDWR, 0644);
if (fd2 < 0) {
perror("Can't open file");
ksft_exit_fail();
}
/* An example of output and arguments */
printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
"FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
"INV: %2ld\n",
pid1, pid2,
sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd2),
sys_kcmp(pid1, pid2, KCMP_FILES, 0, 0),
sys_kcmp(pid1, pid2, KCMP_VM, 0, 0),
sys_kcmp(pid1, pid2, KCMP_FS, 0, 0),
sys_kcmp(pid1, pid2, KCMP_SIGHAND, 0, 0),
sys_kcmp(pid1, pid2, KCMP_IO, 0, 0),
sys_kcmp(pid1, pid2, KCMP_SYSVSEM, 0, 0),
/* This one should fail */
sys_kcmp(pid1, pid2, KCMP_TYPES + 1, 0, 0));
/* This one should return same fd */
ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
ksft_inc_fail_cnt();
ret = -1;
} else {
printf("PASS: 0 returned as expected\n");
ksft_inc_pass_cnt();
}
/* Compare with self */
ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
ksft_inc_fail_cnt();
ret = -1;
} else {
printf("PASS: 0 returned as expected\n");
ksft_inc_pass_cnt();
}
/* Compare epoll target */
epoll_slot = (struct kcmp_epoll_slot) {
.efd = epollfd,
.tfd = duped_num,
.toff = 0,
};
ret = sys_kcmp(pid1, pid1, KCMP_EPOLL_TFD, pipefd[1],
(unsigned long)(void *)&epoll_slot);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
ksft_inc_fail_cnt();
ret = -1;
} else {
printf("PASS: 0 returned as expected\n");
ksft_inc_pass_cnt();
}
if (ret)
ksft_exit_fail();
else
ksft_exit_pass();
}
waitpid(pid2, &status, P_ALL);
return 0;
}
| linux-master | tools/testing/selftests/kcmp/kcmp_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#define _GNU_SOURCE
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <syscall.h>
#include <unistd.h>
#include "../kselftest.h"
int sys_fchmodat2(int dfd, const char *filename, mode_t mode, int flags)
{
int ret = syscall(__NR_fchmodat2, dfd, filename, mode, flags);
return ret >= 0 ? ret : -errno;
}
int setup_testdir(void)
{
int dfd, ret;
char dirname[] = "/tmp/ksft-fchmodat2.XXXXXX";
/* Make the top-level directory. */
if (!mkdtemp(dirname))
ksft_exit_fail_msg("%s: failed to create tmpdir\n", __func__);
dfd = open(dirname, O_PATH | O_DIRECTORY);
if (dfd < 0)
ksft_exit_fail_msg("%s: failed to open tmpdir\n", __func__);
ret = openat(dfd, "regfile", O_CREAT | O_WRONLY | O_TRUNC, 0644);
if (ret < 0)
ksft_exit_fail_msg("%s: failed to create file in tmpdir\n",
__func__);
close(ret);
ret = symlinkat("regfile", dfd, "symlink");
if (ret < 0)
ksft_exit_fail_msg("%s: failed to create symlink in tmpdir\n",
__func__);
return dfd;
}
int expect_mode(int dfd, const char *filename, mode_t expect_mode)
{
struct stat st;
int ret = fstatat(dfd, filename, &st, AT_SYMLINK_NOFOLLOW);
if (ret)
ksft_exit_fail_msg("%s: %s: fstatat failed\n",
__func__, filename);
return (st.st_mode == expect_mode);
}
void test_regfile(void)
{
int dfd, ret;
dfd = setup_testdir();
ret = sys_fchmodat2(dfd, "regfile", 0640, 0);
if (ret < 0)
ksft_exit_fail_msg("%s: fchmodat2(noflag) failed\n", __func__);
if (!expect_mode(dfd, "regfile", 0100640))
ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2\n",
__func__);
ret = sys_fchmodat2(dfd, "regfile", 0600, AT_SYMLINK_NOFOLLOW);
if (ret < 0)
ksft_exit_fail_msg("%s: fchmodat2(AT_SYMLINK_NOFOLLOW) failed\n",
__func__);
if (!expect_mode(dfd, "regfile", 0100600))
ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2 with nofollow\n",
__func__);
ksft_test_result_pass("fchmodat2(regfile)\n");
}
void test_symlink(void)
{
int dfd, ret;
dfd = setup_testdir();
ret = sys_fchmodat2(dfd, "symlink", 0640, 0);
if (ret < 0)
ksft_exit_fail_msg("%s: fchmodat2(noflag) failed\n", __func__);
if (!expect_mode(dfd, "regfile", 0100640))
ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2\n",
__func__);
if (!expect_mode(dfd, "symlink", 0120777))
ksft_exit_fail_msg("%s: wrong symlink mode bits after fchmodat2\n",
__func__);
ret = sys_fchmodat2(dfd, "symlink", 0600, AT_SYMLINK_NOFOLLOW);
/*
* On certain filesystems (xfs or btrfs), chmod operation fails. So we
* first check the symlink target but if the operation fails we mark the
* test as skipped.
*
* https://sourceware.org/legacy-ml/libc-alpha/2020-02/msg00467.html
*/
if (ret == 0 && !expect_mode(dfd, "symlink", 0120600))
ksft_exit_fail_msg("%s: wrong symlink mode bits after fchmodat2 with nofollow\n",
__func__);
if (!expect_mode(dfd, "regfile", 0100640))
ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2 with nofollow\n",
__func__);
if (ret != 0)
ksft_test_result_skip("fchmodat2(symlink)\n");
else
ksft_test_result_pass("fchmodat2(symlink)\n");
}
#define NUM_TESTS 2
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(NUM_TESTS);
test_regfile();
test_symlink();
if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/fchmodat2/fchmodat2_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#define pr_err(fmt, ...) \
({ \
fprintf(stderr, "%s:%d:" fmt ": %m\n", \
__func__, __LINE__, ##__VA_ARGS__); \
1; \
})
#define NSIO 0xb7
#define NS_GET_USERNS _IO(NSIO, 0x1)
#define NS_GET_PARENT _IO(NSIO, 0x2)
#define __stack_aligned__ __attribute__((aligned(16)))
struct cr_clone_arg {
char stack[128] __stack_aligned__;
char stack_ptr[];
};
static int child(void *args)
{
prctl(PR_SET_PDEATHSIG, SIGKILL);
while (1)
sleep(1);
exit(0);
}
int main(int argc, char *argv[])
{
char *ns_strs[] = {"pid", "user"};
char path[] = "/proc/0123456789/ns/pid";
struct cr_clone_arg ca;
struct stat st1, st2;
int ns, pns, i;
pid_t pid;
pid = clone(child, ca.stack_ptr, CLONE_NEWUSER | CLONE_NEWPID | SIGCHLD, NULL);
if (pid < 0)
return pr_err("clone");
for (i = 0; i < 2; i++) {
snprintf(path, sizeof(path), "/proc/%d/ns/%s", pid, ns_strs[i]);
ns = open(path, O_RDONLY);
if (ns < 0)
return pr_err("Unable to open %s", path);
pns = ioctl(ns, NS_GET_PARENT);
if (pns < 0)
return pr_err("Unable to get a parent pidns");
snprintf(path, sizeof(path), "/proc/self/ns/%s", ns_strs[i]);
if (stat(path, &st2))
return pr_err("Unable to stat %s", path);
if (fstat(pns, &st1))
return pr_err("Unable to stat the parent pidns");
if (st1.st_ino != st2.st_ino)
return pr_err("NS_GET_PARENT returned a wrong namespace");
if (ioctl(pns, NS_GET_PARENT) >= 0 || errno != EPERM)
return pr_err("Don't get EPERM");
}
kill(pid, SIGKILL);
wait(NULL);
return 0;
}
| linux-master | tools/testing/selftests/nsfs/pidns.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#define NSIO 0xb7
#define NS_GET_USERNS _IO(NSIO, 0x1)
#define pr_err(fmt, ...) \
({ \
fprintf(stderr, "%s:%d:" fmt ": %m\n", \
__func__, __LINE__, ##__VA_ARGS__); \
1; \
})
int main(int argc, char *argvp[])
{
int pfd[2], ns, uns, init_uns;
struct stat st1, st2;
char path[128];
pid_t pid;
char c;
if (pipe(pfd))
return 1;
pid = fork();
if (pid < 0)
return pr_err("fork");
if (pid == 0) {
prctl(PR_SET_PDEATHSIG, SIGKILL);
if (unshare(CLONE_NEWUTS | CLONE_NEWUSER))
return pr_err("unshare");
close(pfd[0]);
close(pfd[1]);
while (1)
sleep(1);
return 0;
}
close(pfd[1]);
if (read(pfd[0], &c, 1) != 0)
return pr_err("Unable to read from pipe");
close(pfd[0]);
snprintf(path, sizeof(path), "/proc/%d/ns/uts", pid);
ns = open(path, O_RDONLY);
if (ns < 0)
return pr_err("Unable to open %s", path);
uns = ioctl(ns, NS_GET_USERNS);
if (uns < 0)
return pr_err("Unable to get an owning user namespace");
if (fstat(uns, &st1))
return pr_err("fstat");
snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
if (stat(path, &st2))
return pr_err("stat");
if (st1.st_ino != st2.st_ino)
return pr_err("NS_GET_USERNS returned a wrong namespace");
init_uns = ioctl(uns, NS_GET_USERNS);
if (uns < 0)
return pr_err("Unable to get an owning user namespace");
if (ioctl(init_uns, NS_GET_USERNS) >= 0 || errno != EPERM)
return pr_err("Don't get EPERM");
if (unshare(CLONE_NEWUSER))
return pr_err("unshare");
if (ioctl(ns, NS_GET_USERNS) >= 0 || errno != EPERM)
return pr_err("Don't get EPERM");
if (ioctl(init_uns, NS_GET_USERNS) >= 0 || errno != EPERM)
return pr_err("Don't get EPERM");
kill(pid, SIGKILL);
wait(NULL);
return 0;
}
| linux-master | tools/testing/selftests/nsfs/owner.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include "../kselftest.h"
#include "clone3_selftests.h"
#ifndef CLONE_CLEAR_SIGHAND
#define CLONE_CLEAR_SIGHAND 0x100000000ULL
#endif
static void nop_handler(int signo)
{
}
static int wait_for_pid(pid_t pid)
{
int status, ret;
again:
ret = waitpid(pid, &status, 0);
if (ret == -1) {
if (errno == EINTR)
goto again;
return -1;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
static void test_clone3_clear_sighand(void)
{
int ret;
pid_t pid;
struct __clone_args args = {};
struct sigaction act;
/*
* Check that CLONE_CLEAR_SIGHAND and CLONE_SIGHAND are mutually
* exclusive.
*/
args.flags |= CLONE_CLEAR_SIGHAND | CLONE_SIGHAND;
args.exit_signal = SIGCHLD;
pid = sys_clone3(&args, sizeof(args));
if (pid > 0)
ksft_exit_fail_msg(
"clone3(CLONE_CLEAR_SIGHAND | CLONE_SIGHAND) succeeded\n");
act.sa_handler = nop_handler;
ret = sigemptyset(&act.sa_mask);
if (ret < 0)
ksft_exit_fail_msg("%s - sigemptyset() failed\n",
strerror(errno));
act.sa_flags = 0;
/* Register signal handler for SIGUSR1 */
ret = sigaction(SIGUSR1, &act, NULL);
if (ret < 0)
ksft_exit_fail_msg(
"%s - sigaction(SIGUSR1, &act, NULL) failed\n",
strerror(errno));
/* Register signal handler for SIGUSR2 */
ret = sigaction(SIGUSR2, &act, NULL);
if (ret < 0)
ksft_exit_fail_msg(
"%s - sigaction(SIGUSR2, &act, NULL) failed\n",
strerror(errno));
/* Check that CLONE_CLEAR_SIGHAND works. */
args.flags = CLONE_CLEAR_SIGHAND;
pid = sys_clone3(&args, sizeof(args));
if (pid < 0)
ksft_exit_fail_msg("%s - clone3(CLONE_CLEAR_SIGHAND) failed\n",
strerror(errno));
if (pid == 0) {
ret = sigaction(SIGUSR1, NULL, &act);
if (ret < 0)
exit(EXIT_FAILURE);
if (act.sa_handler != SIG_DFL)
exit(EXIT_FAILURE);
ret = sigaction(SIGUSR2, NULL, &act);
if (ret < 0)
exit(EXIT_FAILURE);
if (act.sa_handler != SIG_DFL)
exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
ret = wait_for_pid(pid);
if (ret)
ksft_exit_fail_msg(
"Failed to clear signal handler for child process\n");
ksft_test_result_pass("Cleared signal handlers for child process\n");
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(1);
test_clone3_supported();
test_clone3_clear_sighand();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/clone3/clone3_clear_sighand.c |
// SPDX-License-Identifier: GPL-2.0
/* Based on Christian Brauner's clone3() example */
#define _GNU_SOURCE
#include <errno.h>
#include <inttypes.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sched.h>
#include "../kselftest.h"
#include "clone3_selftests.h"
enum test_mode {
CLONE3_ARGS_NO_TEST,
CLONE3_ARGS_ALL_0,
CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG,
CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG,
CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG,
CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG,
};
static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode)
{
struct __clone_args args = {
.flags = flags,
.exit_signal = SIGCHLD,
};
struct clone_args_extended {
struct __clone_args args;
__aligned_u64 excess_space[2];
} args_ext;
pid_t pid = -1;
int status;
memset(&args_ext, 0, sizeof(args_ext));
if (size > sizeof(struct __clone_args))
args_ext.excess_space[1] = 1;
if (size == 0)
size = sizeof(struct __clone_args);
switch (test_mode) {
case CLONE3_ARGS_NO_TEST:
/*
* Uses default 'flags' and 'SIGCHLD'
* assignment.
*/
break;
case CLONE3_ARGS_ALL_0:
args.flags = 0;
args.exit_signal = 0;
break;
case CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG:
args.exit_signal = 0xbadc0ded00000000ULL;
break;
case CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG:
args.exit_signal = 0x0000000080000000ULL;
break;
case CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG:
args.exit_signal = 0x0000000000000100ULL;
break;
case CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG:
args.exit_signal = 0x00000000000000f0ULL;
break;
}
memcpy(&args_ext.args, &args, sizeof(struct __clone_args));
pid = sys_clone3((struct __clone_args *)&args_ext, size);
if (pid < 0) {
ksft_print_msg("%s - Failed to create new process\n",
strerror(errno));
return -errno;
}
if (pid == 0) {
ksft_print_msg("I am the child, my PID is %d\n", getpid());
_exit(EXIT_SUCCESS);
}
ksft_print_msg("I am the parent (%d). My child's pid is %d\n",
getpid(), pid);
if (waitpid(-1, &status, __WALL) < 0) {
ksft_print_msg("Child returned %s\n", strerror(errno));
return -errno;
}
if (WEXITSTATUS(status))
return WEXITSTATUS(status);
return 0;
}
static void test_clone3(uint64_t flags, size_t size, int expected,
enum test_mode test_mode)
{
int ret;
ksft_print_msg(
"[%d] Trying clone3() with flags %#" PRIx64 " (size %zu)\n",
getpid(), flags, size);
ret = call_clone3(flags, size, test_mode);
ksft_print_msg("[%d] clone3() with flags says: %d expected %d\n",
getpid(), ret, expected);
if (ret != expected)
ksft_test_result_fail(
"[%d] Result (%d) is different than expected (%d)\n",
getpid(), ret, expected);
else
ksft_test_result_pass(
"[%d] Result (%d) matches expectation (%d)\n",
getpid(), ret, expected);
}
int main(int argc, char *argv[])
{
uid_t uid = getuid();
ksft_print_header();
ksft_set_plan(19);
test_clone3_supported();
/* Just a simple clone3() should return 0.*/
test_clone3(0, 0, 0, CLONE3_ARGS_NO_TEST);
/* Do a clone3() in a new PID NS.*/
if (uid == 0)
test_clone3(CLONE_NEWPID, 0, 0, CLONE3_ARGS_NO_TEST);
else
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
/* Do a clone3() with CLONE_ARGS_SIZE_VER0. */
test_clone3(0, CLONE_ARGS_SIZE_VER0, 0, CLONE3_ARGS_NO_TEST);
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 - 8 */
test_clone3(0, CLONE_ARGS_SIZE_VER0 - 8, -EINVAL, CLONE3_ARGS_NO_TEST);
/* Do a clone3() with sizeof(struct clone_args) + 8 */
test_clone3(0, sizeof(struct __clone_args) + 8, 0, CLONE3_ARGS_NO_TEST);
/* Do a clone3() with exit_signal having highest 32 bits non-zero */
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG);
/* Do a clone3() with negative 32-bit exit_signal */
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG);
/* Do a clone3() with exit_signal not fitting into CSIGNAL mask */
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG);
/* Do a clone3() with NSIG < exit_signal < CSIG */
test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG);
test_clone3(0, sizeof(struct __clone_args) + 8, 0, CLONE3_ARGS_ALL_0);
test_clone3(0, sizeof(struct __clone_args) + 16, -E2BIG,
CLONE3_ARGS_ALL_0);
test_clone3(0, sizeof(struct __clone_args) * 2, -E2BIG,
CLONE3_ARGS_ALL_0);
/* Do a clone3() with > page size */
test_clone3(0, getpagesize() + 8, -E2BIG, CLONE3_ARGS_NO_TEST);
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 in a new PID NS. */
if (uid == 0)
test_clone3(CLONE_NEWPID, CLONE_ARGS_SIZE_VER0, 0,
CLONE3_ARGS_NO_TEST);
else
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
/* Do a clone3() with CLONE_ARGS_SIZE_VER0 - 8 in a new PID NS */
test_clone3(CLONE_NEWPID, CLONE_ARGS_SIZE_VER0 - 8, -EINVAL,
CLONE3_ARGS_NO_TEST);
/* Do a clone3() with sizeof(struct clone_args) + 8 in a new PID NS */
if (uid == 0)
test_clone3(CLONE_NEWPID, sizeof(struct __clone_args) + 8, 0,
CLONE3_ARGS_NO_TEST);
else
ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
/* Do a clone3() with > page size in a new PID NS */
test_clone3(CLONE_NEWPID, getpagesize() + 8, -E2BIG,
CLONE3_ARGS_NO_TEST);
/* Do a clone3() in a new time namespace */
test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
/* Do a clone3() with exit signal (SIGCHLD) in flags */
test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
ksft_finished();
}
| linux-master | tools/testing/selftests/clone3/clone3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on Christian Brauner's clone3() example.
* These tests are assuming to be running in the host's
* PID namespace.
*/
/* capabilities related code based on selftests/bpf/test_verifier.c */
#define _GNU_SOURCE
#include <errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/capability.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sched.h>
#include "../kselftest_harness.h"
#include "clone3_selftests.h"
#ifndef MAX_PID_NS_LEVEL
#define MAX_PID_NS_LEVEL 32
#endif
static void child_exit(int ret)
{
fflush(stdout);
fflush(stderr);
_exit(ret);
}
static int call_clone3_set_tid(struct __test_metadata *_metadata,
pid_t *set_tid, size_t set_tid_size)
{
int status;
pid_t pid = -1;
struct __clone_args args = {
.exit_signal = SIGCHLD,
.set_tid = ptr_to_u64(set_tid),
.set_tid_size = set_tid_size,
};
pid = sys_clone3(&args, sizeof(args));
if (pid < 0) {
TH_LOG("%s - Failed to create new process", strerror(errno));
return -errno;
}
if (pid == 0) {
int ret;
char tmp = 0;
TH_LOG("I am the child, my PID is %d (expected %d)", getpid(), set_tid[0]);
if (set_tid[0] != getpid())
child_exit(EXIT_FAILURE);
child_exit(EXIT_SUCCESS);
}
TH_LOG("I am the parent (%d). My child's pid is %d", getpid(), pid);
if (waitpid(pid, &status, 0) < 0) {
TH_LOG("Child returned %s", strerror(errno));
return -errno;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
static int test_clone3_set_tid(struct __test_metadata *_metadata,
pid_t *set_tid, size_t set_tid_size)
{
int ret;
TH_LOG("[%d] Trying clone3() with CLONE_SET_TID to %d", getpid(), set_tid[0]);
ret = call_clone3_set_tid(_metadata, set_tid, set_tid_size);
TH_LOG("[%d] clone3() with CLONE_SET_TID %d says:%d", getpid(), set_tid[0], ret);
return ret;
}
struct libcap {
struct __user_cap_header_struct hdr;
struct __user_cap_data_struct data[2];
};
static int set_capability(void)
{
cap_value_t cap_values[] = { CAP_SETUID, CAP_SETGID };
struct libcap *cap;
int ret = -1;
cap_t caps;
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return -1;
}
/* Drop all capabilities */
if (cap_clear(caps)) {
perror("cap_clear");
goto out;
}
cap_set_flag(caps, CAP_EFFECTIVE, 2, cap_values, CAP_SET);
cap_set_flag(caps, CAP_PERMITTED, 2, cap_values, CAP_SET);
cap = (struct libcap *) caps;
/* 40 -> CAP_CHECKPOINT_RESTORE */
cap->data[1].effective |= 1 << (40 - 32);
cap->data[1].permitted |= 1 << (40 - 32);
if (cap_set_proc(caps)) {
perror("cap_set_proc");
goto out;
}
ret = 0;
out:
if (cap_free(caps))
perror("cap_free");
return ret;
}
TEST(clone3_cap_checkpoint_restore)
{
pid_t pid;
int status;
int ret = 0;
pid_t set_tid[1];
test_clone3_supported();
EXPECT_EQ(getuid(), 0)
SKIP(return, "Skipping all tests as non-root");
memset(&set_tid, 0, sizeof(set_tid));
/* Find the current active PID */
pid = fork();
if (pid == 0) {
TH_LOG("Child has PID %d", getpid());
child_exit(EXIT_SUCCESS);
}
ASSERT_GT(waitpid(pid, &status, 0), 0)
TH_LOG("Waiting for child %d failed", pid);
/* After the child has finished, its PID should be free. */
set_tid[0] = pid;
ASSERT_EQ(set_capability(), 0)
TH_LOG("Could not set CAP_CHECKPOINT_RESTORE");
ASSERT_EQ(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), 0);
EXPECT_EQ(setgid(65534), 0)
TH_LOG("Failed to setgid(65534)");
ASSERT_EQ(setuid(65534), 0);
set_tid[0] = pid;
/* This would fail without CAP_CHECKPOINT_RESTORE */
ASSERT_EQ(test_clone3_set_tid(_metadata, set_tid, 1), -EPERM);
ASSERT_EQ(set_capability(), 0)
TH_LOG("Could not set CAP_CHECKPOINT_RESTORE");
/* This should work as we have CAP_CHECKPOINT_RESTORE as non-root */
ASSERT_EQ(test_clone3_set_tid(_metadata, set_tid, 1), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on Christian Brauner's clone3() example.
* These tests are assuming to be running in the host's
* PID namespace.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sched.h>
#include "../kselftest.h"
#include "clone3_selftests.h"
#ifndef MAX_PID_NS_LEVEL
#define MAX_PID_NS_LEVEL 32
#endif
static int pipe_1[2];
static int pipe_2[2];
static void child_exit(int ret)
{
fflush(stdout);
fflush(stderr);
_exit(ret);
}
static int call_clone3_set_tid(pid_t *set_tid,
size_t set_tid_size,
int flags,
int expected_pid,
bool wait_for_it)
{
int status;
pid_t pid = -1;
struct __clone_args args = {
.flags = flags,
.exit_signal = SIGCHLD,
.set_tid = ptr_to_u64(set_tid),
.set_tid_size = set_tid_size,
};
pid = sys_clone3(&args, sizeof(args));
if (pid < 0) {
ksft_print_msg("%s - Failed to create new process\n",
strerror(errno));
return -errno;
}
if (pid == 0) {
int ret;
char tmp = 0;
int exit_code = EXIT_SUCCESS;
ksft_print_msg("I am the child, my PID is %d (expected %d)\n",
getpid(), set_tid[0]);
if (wait_for_it) {
ksft_print_msg("[%d] Child is ready and waiting\n",
getpid());
/* Signal the parent that the child is ready */
close(pipe_1[0]);
ret = write(pipe_1[1], &tmp, 1);
if (ret != 1) {
ksft_print_msg(
"Writing to pipe returned %d", ret);
exit_code = EXIT_FAILURE;
}
close(pipe_1[1]);
close(pipe_2[1]);
ret = read(pipe_2[0], &tmp, 1);
if (ret != 1) {
ksft_print_msg(
"Reading from pipe returned %d", ret);
exit_code = EXIT_FAILURE;
}
close(pipe_2[0]);
}
if (set_tid[0] != getpid())
child_exit(EXIT_FAILURE);
child_exit(exit_code);
}
if (expected_pid == 0 || expected_pid == pid) {
ksft_print_msg("I am the parent (%d). My child's pid is %d\n",
getpid(), pid);
} else {
ksft_print_msg(
"Expected child pid %d does not match actual pid %d\n",
expected_pid, pid);
return -1;
}
if (waitpid(pid, &status, 0) < 0) {
ksft_print_msg("Child returned %s\n", strerror(errno));
return -errno;
}
if (!WIFEXITED(status))
return -1;
return WEXITSTATUS(status);
}
static void test_clone3_set_tid(pid_t *set_tid,
size_t set_tid_size,
int flags,
int expected,
int expected_pid,
bool wait_for_it)
{
int ret;
ksft_print_msg(
"[%d] Trying clone3() with CLONE_SET_TID to %d and 0x%x\n",
getpid(), set_tid[0], flags);
ret = call_clone3_set_tid(set_tid, set_tid_size, flags, expected_pid,
wait_for_it);
ksft_print_msg(
"[%d] clone3() with CLONE_SET_TID %d says :%d - expected %d\n",
getpid(), set_tid[0], ret, expected);
if (ret != expected)
ksft_test_result_fail(
"[%d] Result (%d) is different than expected (%d)\n",
getpid(), ret, expected);
else
ksft_test_result_pass(
"[%d] Result (%d) matches expectation (%d)\n",
getpid(), ret, expected);
}
int main(int argc, char *argv[])
{
FILE *f;
char buf;
char *line;
int status;
int ret = -1;
size_t len = 0;
int pid_max = 0;
uid_t uid = getuid();
char proc_path[100] = {0};
pid_t pid, ns1, ns2, ns3, ns_pid;
pid_t set_tid[MAX_PID_NS_LEVEL * 2];
ksft_print_header();
ksft_set_plan(29);
test_clone3_supported();
if (pipe(pipe_1) < 0 || pipe(pipe_2) < 0)
ksft_exit_fail_msg("pipe() failed\n");
f = fopen("/proc/sys/kernel/pid_max", "r");
if (f == NULL)
ksft_exit_fail_msg(
"%s - Could not open /proc/sys/kernel/pid_max\n",
strerror(errno));
fscanf(f, "%d", &pid_max);
fclose(f);
ksft_print_msg("/proc/sys/kernel/pid_max %d\n", pid_max);
/* Try invalid settings */
memset(&set_tid, 0, sizeof(set_tid));
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL + 1, 0, -EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2, 0, -EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2 + 1, 0,
-EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 42, 0, -EINVAL, 0, 0);
/*
* This can actually work if this test running in a MAX_PID_NS_LEVEL - 1
* nested PID namespace.
*/
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL - 1, 0, -EINVAL, 0, 0);
memset(&set_tid, 0xff, sizeof(set_tid));
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL + 1, 0, -EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2, 0, -EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2 + 1, 0,
-EINVAL, 0, 0);
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 42, 0, -EINVAL, 0, 0);
/*
* This can actually work if this test running in a MAX_PID_NS_LEVEL - 1
* nested PID namespace.
*/
test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL - 1, 0, -EINVAL, 0, 0);
memset(&set_tid, 0, sizeof(set_tid));
/* Try with an invalid PID */
set_tid[0] = 0;
test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
set_tid[0] = -1;
test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
/* Claim that the set_tid array actually contains 2 elements. */
test_clone3_set_tid(set_tid, 2, 0, -EINVAL, 0, 0);
/* Try it in a new PID namespace */
if (uid == 0)
test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
else
ksft_test_result_skip("Clone3() with set_tid requires root\n");
/* Try with a valid PID (1) this should return -EEXIST. */
set_tid[0] = 1;
if (uid == 0)
test_clone3_set_tid(set_tid, 1, 0, -EEXIST, 0, 0);
else
ksft_test_result_skip("Clone3() with set_tid requires root\n");
/* Try it in a new PID namespace */
if (uid == 0)
test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, 0, 0, 0);
else
ksft_test_result_skip("Clone3() with set_tid requires root\n");
/* pid_max should fail everywhere */
set_tid[0] = pid_max;
test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
if (uid == 0)
test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
else
ksft_test_result_skip("Clone3() with set_tid requires root\n");
if (uid != 0) {
/*
* All remaining tests require root. Tell the framework
* that all those tests are skipped as non-root.
*/
ksft_cnt.ksft_xskip += ksft_plan - ksft_test_num();
goto out;
}
/* Find the current active PID */
pid = fork();
if (pid == 0) {
ksft_print_msg("Child has PID %d\n", getpid());
child_exit(EXIT_SUCCESS);
}
if (waitpid(pid, &status, 0) < 0)
ksft_exit_fail_msg("Waiting for child %d failed", pid);
/* After the child has finished, its PID should be free. */
set_tid[0] = pid;
test_clone3_set_tid(set_tid, 1, 0, 0, 0, 0);
/* This should fail as there is no PID 1 in that namespace */
test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
/*
* Creating a process with PID 1 in the newly created most nested
* PID namespace and PID 'pid' in the parent PID namespace. This
* needs to work.
*/
set_tid[0] = 1;
set_tid[1] = pid;
test_clone3_set_tid(set_tid, 2, CLONE_NEWPID, 0, pid, 0);
ksft_print_msg("unshare PID namespace\n");
if (unshare(CLONE_NEWPID) == -1)
ksft_exit_fail_msg("unshare(CLONE_NEWPID) failed: %s\n",
strerror(errno));
set_tid[0] = pid;
/* This should fail as there is no PID 1 in that namespace */
test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
/* Let's create a PID 1 */
ns_pid = fork();
if (ns_pid == 0) {
/*
* This and the next test cases check that all pid-s are
* released on error paths.
*/
set_tid[0] = 43;
set_tid[1] = -1;
test_clone3_set_tid(set_tid, 2, 0, -EINVAL, 0, 0);
set_tid[0] = 43;
set_tid[1] = pid;
test_clone3_set_tid(set_tid, 2, 0, 0, 43, 0);
ksft_print_msg("Child in PID namespace has PID %d\n", getpid());
set_tid[0] = 2;
test_clone3_set_tid(set_tid, 1, 0, 0, 2, 0);
set_tid[0] = 1;
set_tid[1] = -1;
set_tid[2] = pid;
/* This should fail as there is invalid PID at level '1'. */
test_clone3_set_tid(set_tid, 3, CLONE_NEWPID, -EINVAL, 0, 0);
set_tid[0] = 1;
set_tid[1] = 42;
set_tid[2] = pid;
/*
* This should fail as there are not enough active PID
* namespaces. Again assuming this is running in the host's
* PID namespace. Not yet nested.
*/
test_clone3_set_tid(set_tid, 4, CLONE_NEWPID, -EINVAL, 0, 0);
/*
* This should work and from the parent we should see
* something like 'NSpid: pid 42 1'.
*/
test_clone3_set_tid(set_tid, 3, CLONE_NEWPID, 0, 42, true);
child_exit(ksft_cnt.ksft_fail);
}
close(pipe_1[1]);
close(pipe_2[0]);
while (read(pipe_1[0], &buf, 1) > 0) {
ksft_print_msg("[%d] Child is ready and waiting\n", getpid());
break;
}
snprintf(proc_path, sizeof(proc_path), "/proc/%d/status", pid);
f = fopen(proc_path, "r");
if (f == NULL)
ksft_exit_fail_msg(
"%s - Could not open %s\n",
strerror(errno), proc_path);
while (getline(&line, &len, f) != -1) {
if (strstr(line, "NSpid")) {
int i;
/* Verify that all generated PIDs are as expected. */
i = sscanf(line, "NSpid:\t%d\t%d\t%d",
&ns3, &ns2, &ns1);
if (i != 3) {
ksft_print_msg(
"Unexpected 'NSPid:' entry: %s",
line);
ns1 = ns2 = ns3 = 0;
}
break;
}
}
fclose(f);
free(line);
close(pipe_2[0]);
/* Tell the clone3()'d child to finish. */
write(pipe_2[1], &buf, 1);
close(pipe_2[1]);
if (waitpid(ns_pid, &status, 0) < 0) {
ksft_print_msg("Child returned %s\n", strerror(errno));
ret = -errno;
goto out;
}
if (!WIFEXITED(status))
ksft_test_result_fail("Child error\n");
ksft_cnt.ksft_pass += 6 - (ksft_cnt.ksft_fail - WEXITSTATUS(status));
ksft_cnt.ksft_fail = WEXITSTATUS(status);
if (ns3 == pid && ns2 == 42 && ns1 == 1)
ksft_test_result_pass(
"PIDs in all namespaces as expected (%d,%d,%d)\n",
ns3, ns2, ns1);
else
ksft_test_result_fail(
"PIDs in all namespaces not as expected (%d,%d,%d)\n",
ns3, ns2, ns1);
out:
ret = 0;
return !ret ? ksft_exit_pass() : ksft_exit_fail();
}
| linux-master | tools/testing/selftests/clone3/clone3_set_tid.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <stdbool.h>
#include <linux/kernel.h>
#include <linux/magic.h>
#include <linux/mman.h>
#include <sys/mman.h>
#include <sys/shm.h>
#include <sys/syscall.h>
#include <sys/vfs.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include "../kselftest.h"
#define NR_TESTS 9
static const char * const dev_files[] = {
"/dev/zero", "/dev/null", "/dev/urandom",
"/proc/version", "/proc"
};
void print_cachestat(struct cachestat *cs)
{
ksft_print_msg(
"Using cachestat: Cached: %lu, Dirty: %lu, Writeback: %lu, Evicted: %lu, Recently Evicted: %lu\n",
cs->nr_cache, cs->nr_dirty, cs->nr_writeback,
cs->nr_evicted, cs->nr_recently_evicted);
}
bool write_exactly(int fd, size_t filesize)
{
int random_fd = open("/dev/urandom", O_RDONLY);
char *cursor, *data;
int remained;
bool ret;
if (random_fd < 0) {
ksft_print_msg("Unable to access urandom.\n");
ret = false;
goto out;
}
data = malloc(filesize);
if (!data) {
ksft_print_msg("Unable to allocate data.\n");
ret = false;
goto close_random_fd;
}
remained = filesize;
cursor = data;
while (remained) {
ssize_t read_len = read(random_fd, cursor, remained);
if (read_len <= 0) {
ksft_print_msg("Unable to read from urandom.\n");
ret = false;
goto out_free_data;
}
remained -= read_len;
cursor += read_len;
}
/* write random data to fd */
remained = filesize;
cursor = data;
while (remained) {
ssize_t write_len = write(fd, cursor, remained);
if (write_len <= 0) {
ksft_print_msg("Unable write random data to file.\n");
ret = false;
goto out_free_data;
}
remained -= write_len;
cursor += write_len;
}
ret = true;
out_free_data:
free(data);
close_random_fd:
close(random_fd);
out:
return ret;
}
/*
* fsync() is implemented via noop_fsync() on tmpfs. This makes the fsync()
* test fail below, so we need to check for test file living on a tmpfs.
*/
static bool is_on_tmpfs(int fd)
{
struct statfs statfs_buf;
if (fstatfs(fd, &statfs_buf))
return false;
return statfs_buf.f_type == TMPFS_MAGIC;
}
/*
* Open/create the file at filename, (optionally) write random data to it
* (exactly num_pages), then test the cachestat syscall on this file.
*
* If test_fsync == true, fsync the file, then check the number of dirty
* pages.
*/
static int test_cachestat(const char *filename, bool write_random, bool create,
bool test_fsync, unsigned long num_pages,
int open_flags, mode_t open_mode)
{
size_t PS = sysconf(_SC_PAGESIZE);
int filesize = num_pages * PS;
int ret = KSFT_PASS;
long syscall_ret;
struct cachestat cs;
struct cachestat_range cs_range = { 0, filesize };
int fd = open(filename, open_flags, open_mode);
if (fd == -1) {
ksft_print_msg("Unable to create/open file.\n");
ret = KSFT_FAIL;
goto out;
} else {
ksft_print_msg("Create/open %s\n", filename);
}
if (write_random) {
if (!write_exactly(fd, filesize)) {
ksft_print_msg("Unable to access urandom.\n");
ret = KSFT_FAIL;
goto out1;
}
}
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call returned %ld\n", syscall_ret);
if (syscall_ret) {
ksft_print_msg("Cachestat returned non-zero.\n");
ret = KSFT_FAIL;
goto out1;
} else {
print_cachestat(&cs);
if (write_random) {
if (cs.nr_cache + cs.nr_evicted != num_pages) {
ksft_print_msg(
"Total number of cached and evicted pages is off.\n");
ret = KSFT_FAIL;
}
}
}
if (test_fsync) {
if (is_on_tmpfs(fd)) {
ret = KSFT_SKIP;
} else if (fsync(fd)) {
ksft_print_msg("fsync fails.\n");
ret = KSFT_FAIL;
} else {
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call (after fsync) returned %ld\n",
syscall_ret);
if (!syscall_ret) {
print_cachestat(&cs);
if (cs.nr_dirty) {
ret = KSFT_FAIL;
ksft_print_msg(
"Number of dirty should be zero after fsync.\n");
}
} else {
ksft_print_msg("Cachestat (after fsync) returned non-zero.\n");
ret = KSFT_FAIL;
goto out1;
}
}
}
out1:
close(fd);
if (create)
remove(filename);
out:
return ret;
}
bool test_cachestat_shmem(void)
{
size_t PS = sysconf(_SC_PAGESIZE);
size_t filesize = PS * 512 * 2; /* 2 2MB huge pages */
int syscall_ret;
size_t compute_len = PS * 512;
struct cachestat_range cs_range = { PS, compute_len };
char *filename = "tmpshmcstat";
struct cachestat cs;
bool ret = true;
unsigned long num_pages = compute_len / PS;
int fd = shm_open(filename, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
ksft_print_msg("Unable to create shmem file.\n");
ret = false;
goto out;
}
if (ftruncate(fd, filesize)) {
ksft_print_msg("Unable to truncate shmem file.\n");
ret = false;
goto close_fd;
}
if (!write_exactly(fd, filesize)) {
ksft_print_msg("Unable to write to shmem file.\n");
ret = false;
goto close_fd;
}
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
if (syscall_ret) {
ksft_print_msg("Cachestat returned non-zero.\n");
ret = false;
goto close_fd;
} else {
print_cachestat(&cs);
if (cs.nr_cache + cs.nr_evicted != num_pages) {
ksft_print_msg(
"Total number of cached and evicted pages is off.\n");
ret = false;
}
}
close_fd:
shm_unlink(filename);
out:
return ret;
}
int main(void)
{
int ret;
ksft_print_header();
ret = syscall(__NR_cachestat, -1, NULL, NULL, 0);
if (ret == -1 && errno == ENOSYS)
ksft_exit_skip("cachestat syscall not available\n");
ksft_set_plan(NR_TESTS);
if (ret == -1 && errno == EBADF) {
ksft_test_result_pass("bad file descriptor recognized\n");
ret = 0;
} else {
ksft_test_result_fail("bad file descriptor ignored\n");
ret = 1;
}
for (int i = 0; i < 5; i++) {
const char *dev_filename = dev_files[i];
if (test_cachestat(dev_filename, false, false, false,
4, O_RDONLY, 0400) == KSFT_PASS)
ksft_test_result_pass("cachestat works with %s\n", dev_filename);
else {
ksft_test_result_fail("cachestat fails with %s\n", dev_filename);
ret = 1;
}
}
if (test_cachestat("tmpfilecachestat", true, true,
false, 4, O_CREAT | O_RDWR, 0600) == KSFT_PASS)
ksft_test_result_pass("cachestat works with a normal file\n");
else {
ksft_test_result_fail("cachestat fails with normal file\n");
ret = 1;
}
switch (test_cachestat("tmpfilecachestat", true, true,
true, 4, O_CREAT | O_RDWR, 0600)) {
case KSFT_FAIL:
ksft_test_result_fail("cachestat fsync fails with normal file\n");
ret = KSFT_FAIL;
break;
case KSFT_PASS:
ksft_test_result_pass("cachestat fsync works with a normal file\n");
break;
case KSFT_SKIP:
ksft_test_result_skip("tmpfilecachestat is on tmpfs\n");
break;
}
if (test_cachestat_shmem())
ksft_test_result_pass("cachestat works with a shmem file\n");
else {
ksft_test_result_fail("cachestat fails with a shmem file\n");
ret = 1;
}
return ret;
}
| linux-master | tools/testing/selftests/cachestat/test_cachestat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog Driver Test Program
* - Tests all ioctls
* - Tests Magic Close - CONFIG_WATCHDOG_NOWAYOUT
* - Could be tested against softdog driver on systems that
* don't have watchdog hardware.
* - TODO:
* - Enhance test to add coverage for WDIOC_GETTEMP.
*
* Reference: Documentation/watchdog/watchdog-api.rst
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <signal.h>
#include <getopt.h>
#include <sys/ioctl.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#define DEFAULT_PING_RATE 1
int fd;
const char v = 'V';
static const char sopts[] = "bdehp:st:Tn:NLf:i";
static const struct option lopts[] = {
{"bootstatus", no_argument, NULL, 'b'},
{"disable", no_argument, NULL, 'd'},
{"enable", no_argument, NULL, 'e'},
{"help", no_argument, NULL, 'h'},
{"pingrate", required_argument, NULL, 'p'},
{"status", no_argument, NULL, 's'},
{"timeout", required_argument, NULL, 't'},
{"gettimeout", no_argument, NULL, 'T'},
{"pretimeout", required_argument, NULL, 'n'},
{"getpretimeout", no_argument, NULL, 'N'},
{"gettimeleft", no_argument, NULL, 'L'},
{"file", required_argument, NULL, 'f'},
{"info", no_argument, NULL, 'i'},
{NULL, no_argument, NULL, 0x0}
};
/*
* This function simply sends an IOCTL to the driver, which in turn ticks
* the PC Watchdog card to reset its internal timer so it doesn't trigger
* a computer reset.
*/
static void keep_alive(void)
{
int dummy;
int ret;
ret = ioctl(fd, WDIOC_KEEPALIVE, &dummy);
if (!ret)
printf(".");
}
/*
* The main program. Run the program with "-d" to disable the card,
* or "-e" to enable the card.
*/
static void term(int sig)
{
int ret = write(fd, &v, 1);
close(fd);
if (ret < 0)
printf("\nStopping watchdog ticks failed (%d)...\n", errno);
else
printf("\nStopping watchdog ticks...\n");
exit(0);
}
static void usage(char *progname)
{
printf("Usage: %s [options]\n", progname);
printf(" -f, --file\t\tOpen watchdog device file\n");
printf("\t\t\tDefault is /dev/watchdog\n");
printf(" -i, --info\t\tShow watchdog_info\n");
printf(" -s, --status\t\tGet status & supported features\n");
printf(" -b, --bootstatus\tGet last boot status (Watchdog/POR)\n");
printf(" -d, --disable\t\tTurn off the watchdog timer\n");
printf(" -e, --enable\t\tTurn on the watchdog timer\n");
printf(" -h, --help\t\tPrint the help message\n");
printf(" -p, --pingrate=P\tSet ping rate to P seconds (default %d)\n",
DEFAULT_PING_RATE);
printf(" -t, --timeout=T\tSet timeout to T seconds\n");
printf(" -T, --gettimeout\tGet the timeout\n");
printf(" -n, --pretimeout=T\tSet the pretimeout to T seconds\n");
printf(" -N, --getpretimeout\tGet the pretimeout\n");
printf(" -L, --gettimeleft\tGet the time left until timer expires\n");
printf("\n");
printf("Parameters are parsed left-to-right in real-time.\n");
printf("Example: %s -d -t 10 -p 5 -e\n", progname);
printf("Example: %s -t 12 -T -n 7 -N\n", progname);
}
struct wdiof_status {
int flag;
const char *status_str;
};
#define WDIOF_NUM_STATUS 8
static const struct wdiof_status wdiof_status[WDIOF_NUM_STATUS] = {
{WDIOF_SETTIMEOUT, "Set timeout (in seconds)"},
{WDIOF_MAGICCLOSE, "Supports magic close char"},
{WDIOF_PRETIMEOUT, "Pretimeout (in seconds), get/set"},
{WDIOF_ALARMONLY, "Watchdog triggers a management or other external alarm not a reboot"},
{WDIOF_KEEPALIVEPING, "Keep alive ping reply"},
{WDIOS_DISABLECARD, "Turn off the watchdog timer"},
{WDIOS_ENABLECARD, "Turn on the watchdog timer"},
{WDIOS_TEMPPANIC, "Kernel panic on temperature trip"},
};
static void print_status(int flags)
{
int wdiof = 0;
if (flags == WDIOS_UNKNOWN) {
printf("Unknown status error from WDIOC_GETSTATUS\n");
return;
}
for (wdiof = 0; wdiof < WDIOF_NUM_STATUS; wdiof++) {
if (flags & wdiof_status[wdiof].flag)
printf("Support/Status: %s\n",
wdiof_status[wdiof].status_str);
}
}
#define WDIOF_NUM_BOOTSTATUS 7
static const struct wdiof_status wdiof_bootstatus[WDIOF_NUM_BOOTSTATUS] = {
{WDIOF_OVERHEAT, "Reset due to CPU overheat"},
{WDIOF_FANFAULT, "Fan failed"},
{WDIOF_EXTERN1, "External relay 1"},
{WDIOF_EXTERN2, "External relay 2"},
{WDIOF_POWERUNDER, "Power bad/power fault"},
{WDIOF_CARDRESET, "Card previously reset the CPU"},
{WDIOF_POWEROVER, "Power over voltage"},
};
static void print_boot_status(int flags)
{
int wdiof = 0;
if (flags == WDIOF_UNKNOWN) {
printf("Unknown flag error from WDIOC_GETBOOTSTATUS\n");
return;
}
if (flags == 0) {
printf("Last boot is caused by: Power-On-Reset\n");
return;
}
for (wdiof = 0; wdiof < WDIOF_NUM_BOOTSTATUS; wdiof++) {
if (flags & wdiof_bootstatus[wdiof].flag)
printf("Last boot is caused by: %s\n",
wdiof_bootstatus[wdiof].status_str);
}
}
int main(int argc, char *argv[])
{
int flags;
unsigned int ping_rate = DEFAULT_PING_RATE;
int ret;
int c;
int oneshot = 0;
char *file = "/dev/watchdog";
struct watchdog_info info;
int temperature;
setbuf(stdout, NULL);
while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
if (c == 'f')
file = optarg;
}
fd = open(file, O_WRONLY);
if (fd == -1) {
if (errno == ENOENT)
printf("Watchdog device (%s) not found.\n", file);
else if (errno == EACCES)
printf("Run watchdog as root.\n");
else
printf("Watchdog device open failed %s\n",
strerror(errno));
exit(-1);
}
/*
* Validate that `file` is a watchdog device
*/
ret = ioctl(fd, WDIOC_GETSUPPORT, &info);
if (ret) {
printf("WDIOC_GETSUPPORT error '%s'\n", strerror(errno));
close(fd);
exit(ret);
}
optind = 0;
while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
switch (c) {
case 'b':
flags = 0;
oneshot = 1;
ret = ioctl(fd, WDIOC_GETBOOTSTATUS, &flags);
if (!ret)
print_boot_status(flags);
else
printf("WDIOC_GETBOOTSTATUS error '%s'\n", strerror(errno));
break;
case 'd':
flags = WDIOS_DISABLECARD;
ret = ioctl(fd, WDIOC_SETOPTIONS, &flags);
if (!ret)
printf("Watchdog card disabled.\n");
else {
printf("WDIOS_DISABLECARD error '%s'\n", strerror(errno));
oneshot = 1;
}
break;
case 'e':
flags = WDIOS_ENABLECARD;
ret = ioctl(fd, WDIOC_SETOPTIONS, &flags);
if (!ret)
printf("Watchdog card enabled.\n");
else {
printf("WDIOS_ENABLECARD error '%s'\n", strerror(errno));
oneshot = 1;
}
break;
case 'p':
ping_rate = strtoul(optarg, NULL, 0);
if (!ping_rate)
ping_rate = DEFAULT_PING_RATE;
printf("Watchdog ping rate set to %u seconds.\n", ping_rate);
break;
case 's':
flags = 0;
oneshot = 1;
ret = ioctl(fd, WDIOC_GETSTATUS, &flags);
if (!ret)
print_status(flags);
else
printf("WDIOC_GETSTATUS error '%s'\n", strerror(errno));
ret = ioctl(fd, WDIOC_GETTEMP, &temperature);
if (ret)
printf("WDIOC_GETTEMP: '%s'\n", strerror(errno));
else
printf("Temperature %d\n", temperature);
break;
case 't':
flags = strtoul(optarg, NULL, 0);
ret = ioctl(fd, WDIOC_SETTIMEOUT, &flags);
if (!ret)
printf("Watchdog timeout set to %u seconds.\n", flags);
else {
printf("WDIOC_SETTIMEOUT error '%s'\n", strerror(errno));
oneshot = 1;
}
break;
case 'T':
oneshot = 1;
ret = ioctl(fd, WDIOC_GETTIMEOUT, &flags);
if (!ret)
printf("WDIOC_GETTIMEOUT returns %u seconds.\n", flags);
else
printf("WDIOC_GETTIMEOUT error '%s'\n", strerror(errno));
break;
case 'n':
flags = strtoul(optarg, NULL, 0);
ret = ioctl(fd, WDIOC_SETPRETIMEOUT, &flags);
if (!ret)
printf("Watchdog pretimeout set to %u seconds.\n", flags);
else {
printf("WDIOC_SETPRETIMEOUT error '%s'\n", strerror(errno));
oneshot = 1;
}
break;
case 'N':
oneshot = 1;
ret = ioctl(fd, WDIOC_GETPRETIMEOUT, &flags);
if (!ret)
printf("WDIOC_GETPRETIMEOUT returns %u seconds.\n", flags);
else
printf("WDIOC_GETPRETIMEOUT error '%s'\n", strerror(errno));
break;
case 'L':
oneshot = 1;
ret = ioctl(fd, WDIOC_GETTIMELEFT, &flags);
if (!ret)
printf("WDIOC_GETTIMELEFT returns %u seconds.\n", flags);
else
printf("WDIOC_GETTIMELEFT error '%s'\n", strerror(errno));
break;
case 'f':
/* Handled above */
break;
case 'i':
/*
* watchdog_info was obtained as part of file open
* validation. So we just show it here.
*/
oneshot = 1;
printf("watchdog_info:\n");
printf(" identity:\t\t%s\n", info.identity);
printf(" firmware_version:\t%u\n",
info.firmware_version);
print_status(info.options);
break;
default:
usage(argv[0]);
goto end;
}
}
if (oneshot)
goto end;
printf("Watchdog Ticking Away!\n");
signal(SIGINT, term);
while (1) {
keep_alive();
sleep(ping_rate);
}
end:
/*
* Send specific magic character 'V' just in case Magic Close is
* enabled to ensure watchdog gets disabled on close.
*/
ret = write(fd, &v, 1);
if (ret < 0)
printf("Stopping watchdog ticks failed (%d)...\n", errno);
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/watchdog/watchdog-test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/membarrier.h>
#include <syscall.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include "membarrier_test_impl.h"
static int thread_ready, thread_quit;
static pthread_mutex_t test_membarrier_thread_mutex =
PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t test_membarrier_thread_cond =
PTHREAD_COND_INITIALIZER;
void *test_membarrier_thread(void *arg)
{
pthread_mutex_lock(&test_membarrier_thread_mutex);
thread_ready = 1;
pthread_cond_broadcast(&test_membarrier_thread_cond);
pthread_mutex_unlock(&test_membarrier_thread_mutex);
pthread_mutex_lock(&test_membarrier_thread_mutex);
while (!thread_quit)
pthread_cond_wait(&test_membarrier_thread_cond,
&test_membarrier_thread_mutex);
pthread_mutex_unlock(&test_membarrier_thread_mutex);
return NULL;
}
static int test_mt_membarrier(void)
{
int i;
pthread_t test_thread;
pthread_create(&test_thread, NULL,
test_membarrier_thread, NULL);
pthread_mutex_lock(&test_membarrier_thread_mutex);
while (!thread_ready)
pthread_cond_wait(&test_membarrier_thread_cond,
&test_membarrier_thread_mutex);
pthread_mutex_unlock(&test_membarrier_thread_mutex);
test_membarrier_fail();
test_membarrier_success();
pthread_mutex_lock(&test_membarrier_thread_mutex);
thread_quit = 1;
pthread_cond_broadcast(&test_membarrier_thread_cond);
pthread_mutex_unlock(&test_membarrier_thread_mutex);
pthread_join(test_thread, NULL);
return 0;
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(16);
test_membarrier_query();
/* Multi-threaded */
test_mt_membarrier();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/membarrier/membarrier_test_multi_thread.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <linux/membarrier.h>
#include <syscall.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include "membarrier_test_impl.h"
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(18);
test_membarrier_get_registrations(/*cmd=*/0);
test_membarrier_query();
test_membarrier_fail();
test_membarrier_success();
test_membarrier_get_registrations(/*cmd=*/0);
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/membarrier/membarrier_test_single_thread.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Aleksa Sarai <[email protected]>
* Copyright (C) 2018-2019 SUSE LLC.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <syscall.h>
#include <limits.h>
#include <unistd.h>
#include "../kselftest.h"
#include "helpers.h"
/* Construct a test directory with the following structure:
*
* root/
* |-- a/
* | `-- c/
* `-- b/
*/
int setup_testdir(void)
{
int dfd;
char dirname[] = "/tmp/ksft-openat2-rename-attack.XXXXXX";
/* Make the top-level directory. */
if (!mkdtemp(dirname))
ksft_exit_fail_msg("setup_testdir: failed to create tmpdir\n");
dfd = open(dirname, O_PATH | O_DIRECTORY);
if (dfd < 0)
ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
E_mkdirat(dfd, "a", 0755);
E_mkdirat(dfd, "b", 0755);
E_mkdirat(dfd, "a/c", 0755);
return dfd;
}
/* Swap @dirfd/@a and @dirfd/@b constantly. Parent must kill this process. */
pid_t spawn_attack(int dirfd, char *a, char *b)
{
pid_t child = fork();
if (child != 0)
return child;
/* If the parent (the test process) dies, kill ourselves too. */
E_prctl(PR_SET_PDEATHSIG, SIGKILL);
/* Swap @a and @b. */
for (;;)
renameat2(dirfd, a, dirfd, b, RENAME_EXCHANGE);
exit(1);
}
#define NUM_RENAME_TESTS 2
#define ROUNDS 400000
const char *flagname(int resolve)
{
switch (resolve) {
case RESOLVE_IN_ROOT:
return "RESOLVE_IN_ROOT";
case RESOLVE_BENEATH:
return "RESOLVE_BENEATH";
}
return "(unknown)";
}
void test_rename_attack(int resolve)
{
int dfd, afd;
pid_t child;
void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
int escapes = 0, other_errs = 0, exdevs = 0, eagains = 0, successes = 0;
struct open_how how = {
.flags = O_PATH,
.resolve = resolve,
};
if (!openat2_supported) {
how.resolve = 0;
ksft_print_msg("openat2(2) unsupported -- using openat(2) instead\n");
}
dfd = setup_testdir();
afd = openat(dfd, "a", O_PATH);
if (afd < 0)
ksft_exit_fail_msg("test_rename_attack: failed to open 'a'\n");
child = spawn_attack(dfd, "a/c", "b");
for (int i = 0; i < ROUNDS; i++) {
int fd;
char *victim_path = "c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../..";
if (openat2_supported)
fd = sys_openat2(afd, victim_path, &how);
else
fd = sys_openat(afd, victim_path, &how);
if (fd < 0) {
if (fd == -EAGAIN)
eagains++;
else if (fd == -EXDEV)
exdevs++;
else if (fd == -ENOENT)
escapes++; /* escaped outside and got ENOENT... */
else
other_errs++; /* unexpected error */
} else {
if (fdequal(fd, afd, NULL))
successes++;
else
escapes++; /* we got an unexpected fd */
}
close(fd);
}
if (escapes > 0)
resultfn = ksft_test_result_fail;
ksft_print_msg("non-escapes: EAGAIN=%d EXDEV=%d E<other>=%d success=%d\n",
eagains, exdevs, other_errs, successes);
resultfn("rename attack with %s (%d runs, got %d escapes)\n",
flagname(resolve), ROUNDS, escapes);
/* Should be killed anyway, but might as well make sure. */
E_kill(child, SIGKILL);
}
#define NUM_TESTS NUM_RENAME_TESTS
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(NUM_TESTS);
test_rename_attack(RESOLVE_BENEATH);
test_rename_attack(RESOLVE_IN_ROOT);
if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/openat2/rename_attack_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Aleksa Sarai <[email protected]>
* Copyright (C) 2018-2019 SUSE LLC.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include "../kselftest.h"
#include "helpers.h"
/*
* Construct a test directory with the following structure:
*
* root/
* |-- procexe -> /proc/self/exe
* |-- procroot -> /proc/self/root
* |-- root/
* |-- mnt/ [mountpoint]
* | |-- self -> ../mnt/
* | `-- absself -> /mnt/
* |-- etc/
* | `-- passwd
* |-- creatlink -> /newfile3
* |-- reletc -> etc/
* |-- relsym -> etc/passwd
* |-- absetc -> /etc/
* |-- abssym -> /etc/passwd
* |-- abscheeky -> /cheeky
* `-- cheeky/
* |-- absself -> /
* |-- self -> ../../root/
* |-- garbageself -> /../../root/
* |-- passwd -> ../cheeky/../cheeky/../etc/../etc/passwd
* |-- abspasswd -> /../cheeky/../cheeky/../etc/../etc/passwd
* |-- dotdotlink -> ../../../../../../../../../../../../../../etc/passwd
* `-- garbagelink -> /../../../../../../../../../../../../../../etc/passwd
*/
int setup_testdir(void)
{
int dfd, tmpfd;
char dirname[] = "/tmp/ksft-openat2-testdir.XXXXXX";
/* Unshare and make /tmp a new directory. */
E_unshare(CLONE_NEWNS);
E_mount("", "/tmp", "", MS_PRIVATE, "");
/* Make the top-level directory. */
if (!mkdtemp(dirname))
ksft_exit_fail_msg("setup_testdir: failed to create tmpdir\n");
dfd = open(dirname, O_PATH | O_DIRECTORY);
if (dfd < 0)
ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
/* A sub-directory which is actually used for tests. */
E_mkdirat(dfd, "root", 0755);
tmpfd = openat(dfd, "root", O_PATH | O_DIRECTORY);
if (tmpfd < 0)
ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
close(dfd);
dfd = tmpfd;
E_symlinkat("/proc/self/exe", dfd, "procexe");
E_symlinkat("/proc/self/root", dfd, "procroot");
E_mkdirat(dfd, "root", 0755);
/* There is no mountat(2), so use chdir. */
E_mkdirat(dfd, "mnt", 0755);
E_fchdir(dfd);
E_mount("tmpfs", "./mnt", "tmpfs", MS_NOSUID | MS_NODEV, "");
E_symlinkat("../mnt/", dfd, "mnt/self");
E_symlinkat("/mnt/", dfd, "mnt/absself");
E_mkdirat(dfd, "etc", 0755);
E_touchat(dfd, "etc/passwd");
E_symlinkat("/newfile3", dfd, "creatlink");
E_symlinkat("etc/", dfd, "reletc");
E_symlinkat("etc/passwd", dfd, "relsym");
E_symlinkat("/etc/", dfd, "absetc");
E_symlinkat("/etc/passwd", dfd, "abssym");
E_symlinkat("/cheeky", dfd, "abscheeky");
E_mkdirat(dfd, "cheeky", 0755);
E_symlinkat("/", dfd, "cheeky/absself");
E_symlinkat("../../root/", dfd, "cheeky/self");
E_symlinkat("/../../root/", dfd, "cheeky/garbageself");
E_symlinkat("../cheeky/../etc/../etc/passwd", dfd, "cheeky/passwd");
E_symlinkat("/../cheeky/../etc/../etc/passwd", dfd, "cheeky/abspasswd");
E_symlinkat("../../../../../../../../../../../../../../etc/passwd",
dfd, "cheeky/dotdotlink");
E_symlinkat("/../../../../../../../../../../../../../../etc/passwd",
dfd, "cheeky/garbagelink");
return dfd;
}
struct basic_test {
const char *name;
const char *dir;
const char *path;
struct open_how how;
bool pass;
union {
int err;
const char *path;
} out;
};
#define NUM_OPENAT2_OPATH_TESTS 88
void test_openat2_opath_tests(void)
{
int rootfd, hardcoded_fd;
char *procselfexe, *hardcoded_fdpath;
E_asprintf(&procselfexe, "/proc/%d/exe", getpid());
rootfd = setup_testdir();
hardcoded_fd = open("/dev/null", O_RDONLY);
E_assert(hardcoded_fd >= 0, "open fd to hardcode");
E_asprintf(&hardcoded_fdpath, "self/fd/%d", hardcoded_fd);
struct basic_test tests[] = {
/** RESOLVE_BENEATH **/
/* Attempts to cross dirfd should be blocked. */
{ .name = "[beneath] jump to /",
.path = "/", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] absolute link to $root",
.path = "cheeky/absself", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] chained absolute links to $root",
.path = "abscheeky/absself", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] jump outside $root",
.path = "..", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] temporary jump outside $root",
.path = "../root/", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] symlink temporary jump outside $root",
.path = "cheeky/self", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] chained symlink temporary jump outside $root",
.path = "abscheeky/self", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] garbage links to $root",
.path = "cheeky/garbageself", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] chained garbage links to $root",
.path = "abscheeky/garbageself", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
/* Only relative paths that stay inside dirfd should work. */
{ .name = "[beneath] ordinary path to 'root'",
.path = "root", .how.resolve = RESOLVE_BENEATH,
.out.path = "root", .pass = true },
{ .name = "[beneath] ordinary path to 'etc'",
.path = "etc", .how.resolve = RESOLVE_BENEATH,
.out.path = "etc", .pass = true },
{ .name = "[beneath] ordinary path to 'etc/passwd'",
.path = "etc/passwd", .how.resolve = RESOLVE_BENEATH,
.out.path = "etc/passwd", .pass = true },
{ .name = "[beneath] relative symlink inside $root",
.path = "relsym", .how.resolve = RESOLVE_BENEATH,
.out.path = "etc/passwd", .pass = true },
{ .name = "[beneath] chained-'..' relative symlink inside $root",
.path = "cheeky/passwd", .how.resolve = RESOLVE_BENEATH,
.out.path = "etc/passwd", .pass = true },
{ .name = "[beneath] absolute symlink component outside $root",
.path = "abscheeky/passwd", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] absolute symlink target outside $root",
.path = "abssym", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] absolute path outside $root",
.path = "/etc/passwd", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] cheeky absolute path outside $root",
.path = "cheeky/abspasswd", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] chained cheeky absolute path outside $root",
.path = "abscheeky/abspasswd", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
/* Tricky paths should fail. */
{ .name = "[beneath] tricky '..'-chained symlink outside $root",
.path = "cheeky/dotdotlink", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] tricky absolute + '..'-chained symlink outside $root",
.path = "abscheeky/dotdotlink", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] tricky garbage link outside $root",
.path = "cheeky/garbagelink", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
{ .name = "[beneath] tricky absolute + garbage link outside $root",
.path = "abscheeky/garbagelink", .how.resolve = RESOLVE_BENEATH,
.out.err = -EXDEV, .pass = false },
/** RESOLVE_IN_ROOT **/
/* All attempts to cross the dirfd will be scoped-to-root. */
{ .name = "[in_root] jump to /",
.path = "/", .how.resolve = RESOLVE_IN_ROOT,
.out.path = NULL, .pass = true },
{ .name = "[in_root] absolute symlink to /root",
.path = "cheeky/absself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = NULL, .pass = true },
{ .name = "[in_root] chained absolute symlinks to /root",
.path = "abscheeky/absself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = NULL, .pass = true },
{ .name = "[in_root] '..' at root",
.path = "..", .how.resolve = RESOLVE_IN_ROOT,
.out.path = NULL, .pass = true },
{ .name = "[in_root] '../root' at root",
.path = "../root/", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] relative symlink containing '..' above root",
.path = "cheeky/self", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] garbage link to /root",
.path = "cheeky/garbageself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] chained garbage links to /root",
.path = "abscheeky/garbageself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] relative path to 'root'",
.path = "root", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] relative path to 'etc'",
.path = "etc", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc", .pass = true },
{ .name = "[in_root] relative path to 'etc/passwd'",
.path = "etc/passwd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] relative symlink to 'etc/passwd'",
.path = "relsym", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] chained-'..' relative symlink to 'etc/passwd'",
.path = "cheeky/passwd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] chained-'..' absolute + relative symlink to 'etc/passwd'",
.path = "abscheeky/passwd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] absolute symlink to 'etc/passwd'",
.path = "abssym", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] absolute path 'etc/passwd'",
.path = "/etc/passwd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] cheeky absolute path 'etc/passwd'",
.path = "cheeky/abspasswd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] chained cheeky absolute path 'etc/passwd'",
.path = "abscheeky/abspasswd", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky '..'-chained symlink outside $root",
.path = "cheeky/dotdotlink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky absolute + '..'-chained symlink outside $root",
.path = "abscheeky/dotdotlink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky absolute path + absolute + '..'-chained symlink outside $root",
.path = "/../../../../abscheeky/dotdotlink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky garbage link outside $root",
.path = "cheeky/garbagelink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky absolute + garbage link outside $root",
.path = "abscheeky/garbagelink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
{ .name = "[in_root] tricky absolute path + absolute + garbage link outside $root",
.path = "/../../../../abscheeky/garbagelink", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "etc/passwd", .pass = true },
/* O_CREAT should handle trailing symlinks correctly. */
{ .name = "[in_root] O_CREAT of relative path inside $root",
.path = "newfile1", .how.flags = O_CREAT,
.how.mode = 0700,
.how.resolve = RESOLVE_IN_ROOT,
.out.path = "newfile1", .pass = true },
{ .name = "[in_root] O_CREAT of absolute path",
.path = "/newfile2", .how.flags = O_CREAT,
.how.mode = 0700,
.how.resolve = RESOLVE_IN_ROOT,
.out.path = "newfile2", .pass = true },
{ .name = "[in_root] O_CREAT of tricky symlink outside root",
.path = "/creatlink", .how.flags = O_CREAT,
.how.mode = 0700,
.how.resolve = RESOLVE_IN_ROOT,
.out.path = "newfile3", .pass = true },
/** RESOLVE_NO_XDEV **/
/* Crossing *down* into a mountpoint is disallowed. */
{ .name = "[no_xdev] cross into $mnt",
.path = "mnt", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] cross into $mnt/",
.path = "mnt/", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] cross into $mnt/.",
.path = "mnt/.", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
/* Crossing *up* out of a mountpoint is disallowed. */
{ .name = "[no_xdev] goto mountpoint root",
.dir = "mnt", .path = ".", .how.resolve = RESOLVE_NO_XDEV,
.out.path = "mnt", .pass = true },
{ .name = "[no_xdev] cross up through '..'",
.dir = "mnt", .path = "..", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] temporary cross up through '..'",
.dir = "mnt", .path = "../mnt", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] temporary relative symlink cross up",
.dir = "mnt", .path = "self", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] temporary absolute symlink cross up",
.dir = "mnt", .path = "absself", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
/* Jumping to "/" is ok, but later components cannot cross. */
{ .name = "[no_xdev] jump to / directly",
.dir = "mnt", .path = "/", .how.resolve = RESOLVE_NO_XDEV,
.out.path = "/", .pass = true },
{ .name = "[no_xdev] jump to / (from /) directly",
.dir = "/", .path = "/", .how.resolve = RESOLVE_NO_XDEV,
.out.path = "/", .pass = true },
{ .name = "[no_xdev] jump to / then proc",
.path = "/proc/1", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] jump to / then tmp",
.path = "/tmp", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
/* Magic-links are blocked since they can switch vfsmounts. */
{ .name = "[no_xdev] cross through magic-link to self/root",
.dir = "/proc", .path = "self/root", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
{ .name = "[no_xdev] cross through magic-link to self/cwd",
.dir = "/proc", .path = "self/cwd", .how.resolve = RESOLVE_NO_XDEV,
.out.err = -EXDEV, .pass = false },
/* Except magic-link jumps inside the same vfsmount. */
{ .name = "[no_xdev] jump through magic-link to same procfs",
.dir = "/proc", .path = hardcoded_fdpath, .how.resolve = RESOLVE_NO_XDEV,
.out.path = "/proc", .pass = true, },
/** RESOLVE_NO_MAGICLINKS **/
/* Regular symlinks should work. */
{ .name = "[no_magiclinks] ordinary relative symlink",
.path = "relsym", .how.resolve = RESOLVE_NO_MAGICLINKS,
.out.path = "etc/passwd", .pass = true },
/* Magic-links should not work. */
{ .name = "[no_magiclinks] symlink to magic-link",
.path = "procexe", .how.resolve = RESOLVE_NO_MAGICLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_magiclinks] normal path to magic-link",
.path = "/proc/self/exe", .how.resolve = RESOLVE_NO_MAGICLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_magiclinks] normal path to magic-link with O_NOFOLLOW",
.path = "/proc/self/exe", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_MAGICLINKS,
.out.path = procselfexe, .pass = true },
{ .name = "[no_magiclinks] symlink to magic-link path component",
.path = "procroot/etc", .how.resolve = RESOLVE_NO_MAGICLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_magiclinks] magic-link path component",
.path = "/proc/self/root/etc", .how.resolve = RESOLVE_NO_MAGICLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_magiclinks] magic-link path component with O_NOFOLLOW",
.path = "/proc/self/root/etc", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_MAGICLINKS,
.out.err = -ELOOP, .pass = false },
/** RESOLVE_NO_SYMLINKS **/
/* Normal paths should work. */
{ .name = "[no_symlinks] ordinary path to '.'",
.path = ".", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = NULL, .pass = true },
{ .name = "[no_symlinks] ordinary path to 'root'",
.path = "root", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "root", .pass = true },
{ .name = "[no_symlinks] ordinary path to 'etc'",
.path = "etc", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "etc", .pass = true },
{ .name = "[no_symlinks] ordinary path to 'etc/passwd'",
.path = "etc/passwd", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "etc/passwd", .pass = true },
/* Regular symlinks are blocked. */
{ .name = "[no_symlinks] relative symlink target",
.path = "relsym", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] relative symlink component",
.path = "reletc/passwd", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] absolute symlink target",
.path = "abssym", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] absolute symlink component",
.path = "absetc/passwd", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] cheeky garbage link",
.path = "cheeky/garbagelink", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] cheeky absolute + garbage link",
.path = "abscheeky/garbagelink", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] cheeky absolute + absolute symlink",
.path = "abscheeky/absself", .how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
/* Trailing symlinks with NO_FOLLOW. */
{ .name = "[no_symlinks] relative symlink with O_NOFOLLOW",
.path = "relsym", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "relsym", .pass = true },
{ .name = "[no_symlinks] absolute symlink with O_NOFOLLOW",
.path = "abssym", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "abssym", .pass = true },
{ .name = "[no_symlinks] trailing symlink with O_NOFOLLOW",
.path = "cheeky/garbagelink", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_SYMLINKS,
.out.path = "cheeky/garbagelink", .pass = true },
{ .name = "[no_symlinks] multiple symlink components with O_NOFOLLOW",
.path = "abscheeky/absself", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
{ .name = "[no_symlinks] multiple symlink (and garbage link) components with O_NOFOLLOW",
.path = "abscheeky/garbagelink", .how.flags = O_NOFOLLOW,
.how.resolve = RESOLVE_NO_SYMLINKS,
.out.err = -ELOOP, .pass = false },
};
BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_OPATH_TESTS);
for (int i = 0; i < ARRAY_LEN(tests); i++) {
int dfd, fd;
char *fdpath = NULL;
bool failed;
void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
struct basic_test *test = &tests[i];
if (!openat2_supported) {
ksft_print_msg("openat2(2) unsupported\n");
resultfn = ksft_test_result_skip;
goto skip;
}
/* Auto-set O_PATH. */
if (!(test->how.flags & O_CREAT))
test->how.flags |= O_PATH;
if (test->dir)
dfd = openat(rootfd, test->dir, O_PATH | O_DIRECTORY);
else
dfd = dup(rootfd);
E_assert(dfd, "failed to openat root '%s': %m", test->dir);
E_dup2(dfd, hardcoded_fd);
fd = sys_openat2(dfd, test->path, &test->how);
if (test->pass)
failed = (fd < 0 || !fdequal(fd, rootfd, test->out.path));
else
failed = (fd != test->out.err);
if (fd >= 0) {
fdpath = fdreadlink(fd);
close(fd);
}
close(dfd);
if (failed) {
resultfn = ksft_test_result_fail;
ksft_print_msg("openat2 unexpectedly returned ");
if (fdpath)
ksft_print_msg("%d['%s']\n", fd, fdpath);
else
ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
}
skip:
if (test->pass)
resultfn("%s gives path '%s'\n", test->name,
test->out.path ?: ".");
else
resultfn("%s fails with %d (%s)\n", test->name,
test->out.err, strerror(-test->out.err));
fflush(stdout);
free(fdpath);
}
free(procselfexe);
close(rootfd);
free(hardcoded_fdpath);
close(hardcoded_fd);
}
#define NUM_TESTS NUM_OPENAT2_OPATH_TESTS
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(NUM_TESTS);
/* NOTE: We should be checking for CAP_SYS_ADMIN here... */
if (geteuid() != 0)
ksft_exit_skip("all tests require euid == 0\n");
test_openat2_opath_tests();
if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/openat2/resolve_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Aleksa Sarai <[email protected]>
* Copyright (C) 2018-2019 SUSE LLC.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include "../kselftest.h"
#include "helpers.h"
/*
* O_LARGEFILE is set to 0 by glibc.
* XXX: This is wrong on {mips, parisc, powerpc, sparc}.
*/
#undef O_LARGEFILE
#ifdef __aarch64__
#define O_LARGEFILE 0x20000
#else
#define O_LARGEFILE 0x8000
#endif
struct open_how_ext {
struct open_how inner;
uint32_t extra1;
char pad1[128];
uint32_t extra2;
char pad2[128];
uint32_t extra3;
};
struct struct_test {
const char *name;
struct open_how_ext arg;
size_t size;
int err;
};
#define NUM_OPENAT2_STRUCT_TESTS 7
#define NUM_OPENAT2_STRUCT_VARIATIONS 13
void test_openat2_struct(void)
{
int misalignments[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 17, 87 };
struct struct_test tests[] = {
/* Normal struct. */
{ .name = "normal struct",
.arg.inner.flags = O_RDONLY,
.size = sizeof(struct open_how) },
/* Bigger struct, with zeroed out end. */
{ .name = "bigger struct (zeroed out)",
.arg.inner.flags = O_RDONLY,
.size = sizeof(struct open_how_ext) },
/* TODO: Once expanded, check zero-padding. */
/* Smaller than version-0 struct. */
{ .name = "zero-sized 'struct'",
.arg.inner.flags = O_RDONLY, .size = 0, .err = -EINVAL },
{ .name = "smaller-than-v0 struct",
.arg.inner.flags = O_RDONLY,
.size = OPEN_HOW_SIZE_VER0 - 1, .err = -EINVAL },
/* Bigger struct, with non-zero trailing bytes. */
{ .name = "bigger struct (non-zero data in first 'future field')",
.arg.inner.flags = O_RDONLY, .arg.extra1 = 0xdeadbeef,
.size = sizeof(struct open_how_ext), .err = -E2BIG },
{ .name = "bigger struct (non-zero data in middle of 'future fields')",
.arg.inner.flags = O_RDONLY, .arg.extra2 = 0xfeedcafe,
.size = sizeof(struct open_how_ext), .err = -E2BIG },
{ .name = "bigger struct (non-zero data at end of 'future fields')",
.arg.inner.flags = O_RDONLY, .arg.extra3 = 0xabad1dea,
.size = sizeof(struct open_how_ext), .err = -E2BIG },
};
BUILD_BUG_ON(ARRAY_LEN(misalignments) != NUM_OPENAT2_STRUCT_VARIATIONS);
BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_STRUCT_TESTS);
for (int i = 0; i < ARRAY_LEN(tests); i++) {
struct struct_test *test = &tests[i];
struct open_how_ext how_ext = test->arg;
for (int j = 0; j < ARRAY_LEN(misalignments); j++) {
int fd, misalign = misalignments[j];
char *fdpath = NULL;
bool failed;
void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
void *copy = NULL, *how_copy = &how_ext;
if (!openat2_supported) {
ksft_print_msg("openat2(2) unsupported\n");
resultfn = ksft_test_result_skip;
goto skip;
}
if (misalign) {
/*
* Explicitly misalign the structure copying it with the given
* (mis)alignment offset. The other data is set to be non-zero to
* make sure that non-zero bytes outside the struct aren't checked
*
* This is effectively to check that is_zeroed_user() works.
*/
copy = malloc(misalign + sizeof(how_ext));
how_copy = copy + misalign;
memset(copy, 0xff, misalign);
memcpy(how_copy, &how_ext, sizeof(how_ext));
}
fd = raw_openat2(AT_FDCWD, ".", how_copy, test->size);
if (test->err >= 0)
failed = (fd < 0);
else
failed = (fd != test->err);
if (fd >= 0) {
fdpath = fdreadlink(fd);
close(fd);
}
if (failed) {
resultfn = ksft_test_result_fail;
ksft_print_msg("openat2 unexpectedly returned ");
if (fdpath)
ksft_print_msg("%d['%s']\n", fd, fdpath);
else
ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
}
skip:
if (test->err >= 0)
resultfn("openat2 with %s argument [misalign=%d] succeeds\n",
test->name, misalign);
else
resultfn("openat2 with %s argument [misalign=%d] fails with %d (%s)\n",
test->name, misalign, test->err,
strerror(-test->err));
free(copy);
free(fdpath);
fflush(stdout);
}
}
}
struct flag_test {
const char *name;
struct open_how how;
int err;
};
#define NUM_OPENAT2_FLAG_TESTS 25
void test_openat2_flags(void)
{
struct flag_test tests[] = {
/* O_TMPFILE is incompatible with O_PATH and O_CREAT. */
{ .name = "incompatible flags (O_TMPFILE | O_PATH)",
.how.flags = O_TMPFILE | O_PATH | O_RDWR, .err = -EINVAL },
{ .name = "incompatible flags (O_TMPFILE | O_CREAT)",
.how.flags = O_TMPFILE | O_CREAT | O_RDWR, .err = -EINVAL },
/* O_PATH only permits certain other flags to be set ... */
{ .name = "compatible flags (O_PATH | O_CLOEXEC)",
.how.flags = O_PATH | O_CLOEXEC },
{ .name = "compatible flags (O_PATH | O_DIRECTORY)",
.how.flags = O_PATH | O_DIRECTORY },
{ .name = "compatible flags (O_PATH | O_NOFOLLOW)",
.how.flags = O_PATH | O_NOFOLLOW },
/* ... and others are absolutely not permitted. */
{ .name = "incompatible flags (O_PATH | O_RDWR)",
.how.flags = O_PATH | O_RDWR, .err = -EINVAL },
{ .name = "incompatible flags (O_PATH | O_CREAT)",
.how.flags = O_PATH | O_CREAT, .err = -EINVAL },
{ .name = "incompatible flags (O_PATH | O_EXCL)",
.how.flags = O_PATH | O_EXCL, .err = -EINVAL },
{ .name = "incompatible flags (O_PATH | O_NOCTTY)",
.how.flags = O_PATH | O_NOCTTY, .err = -EINVAL },
{ .name = "incompatible flags (O_PATH | O_DIRECT)",
.how.flags = O_PATH | O_DIRECT, .err = -EINVAL },
{ .name = "incompatible flags (O_PATH | O_LARGEFILE)",
.how.flags = O_PATH | O_LARGEFILE, .err = -EINVAL },
/* ->mode must only be set with O_{CREAT,TMPFILE}. */
{ .name = "non-zero how.mode and O_RDONLY",
.how.flags = O_RDONLY, .how.mode = 0600, .err = -EINVAL },
{ .name = "non-zero how.mode and O_PATH",
.how.flags = O_PATH, .how.mode = 0600, .err = -EINVAL },
{ .name = "valid how.mode and O_CREAT",
.how.flags = O_CREAT, .how.mode = 0600 },
{ .name = "valid how.mode and O_TMPFILE",
.how.flags = O_TMPFILE | O_RDWR, .how.mode = 0600 },
/* ->mode must only contain 0777 bits. */
{ .name = "invalid how.mode and O_CREAT",
.how.flags = O_CREAT,
.how.mode = 0xFFFF, .err = -EINVAL },
{ .name = "invalid (very large) how.mode and O_CREAT",
.how.flags = O_CREAT,
.how.mode = 0xC000000000000000ULL, .err = -EINVAL },
{ .name = "invalid how.mode and O_TMPFILE",
.how.flags = O_TMPFILE | O_RDWR,
.how.mode = 0x1337, .err = -EINVAL },
{ .name = "invalid (very large) how.mode and O_TMPFILE",
.how.flags = O_TMPFILE | O_RDWR,
.how.mode = 0x0000A00000000000ULL, .err = -EINVAL },
/* ->resolve flags must not conflict. */
{ .name = "incompatible resolve flags (BENEATH | IN_ROOT)",
.how.flags = O_RDONLY,
.how.resolve = RESOLVE_BENEATH | RESOLVE_IN_ROOT,
.err = -EINVAL },
/* ->resolve must only contain RESOLVE_* flags. */
{ .name = "invalid how.resolve and O_RDONLY",
.how.flags = O_RDONLY,
.how.resolve = 0x1337, .err = -EINVAL },
{ .name = "invalid how.resolve and O_CREAT",
.how.flags = O_CREAT,
.how.resolve = 0x1337, .err = -EINVAL },
{ .name = "invalid how.resolve and O_TMPFILE",
.how.flags = O_TMPFILE | O_RDWR,
.how.resolve = 0x1337, .err = -EINVAL },
{ .name = "invalid how.resolve and O_PATH",
.how.flags = O_PATH,
.how.resolve = 0x1337, .err = -EINVAL },
/* currently unknown upper 32 bit rejected. */
{ .name = "currently unknown bit (1 << 63)",
.how.flags = O_RDONLY | (1ULL << 63),
.how.resolve = 0, .err = -EINVAL },
};
BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_FLAG_TESTS);
for (int i = 0; i < ARRAY_LEN(tests); i++) {
int fd, fdflags = -1;
char *path, *fdpath = NULL;
bool failed = false;
struct flag_test *test = &tests[i];
void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
if (!openat2_supported) {
ksft_print_msg("openat2(2) unsupported\n");
resultfn = ksft_test_result_skip;
goto skip;
}
path = (test->how.flags & O_CREAT) ? "/tmp/ksft.openat2_tmpfile" : ".";
unlink(path);
fd = sys_openat2(AT_FDCWD, path, &test->how);
if (fd < 0 && fd == -EOPNOTSUPP) {
/*
* Skip the testcase if it failed because not supported
* by FS. (e.g. a valid O_TMPFILE combination on NFS)
*/
ksft_test_result_skip("openat2 with %s fails with %d (%s)\n",
test->name, fd, strerror(-fd));
goto next;
}
if (test->err >= 0)
failed = (fd < 0);
else
failed = (fd != test->err);
if (fd >= 0) {
int otherflags;
fdpath = fdreadlink(fd);
fdflags = fcntl(fd, F_GETFL);
otherflags = fcntl(fd, F_GETFD);
close(fd);
E_assert(fdflags >= 0, "fcntl F_GETFL of new fd");
E_assert(otherflags >= 0, "fcntl F_GETFD of new fd");
/* O_CLOEXEC isn't shown in F_GETFL. */
if (otherflags & FD_CLOEXEC)
fdflags |= O_CLOEXEC;
/* O_CREAT is hidden from F_GETFL. */
if (test->how.flags & O_CREAT)
fdflags |= O_CREAT;
if (!(test->how.flags & O_LARGEFILE))
fdflags &= ~O_LARGEFILE;
failed |= (fdflags != test->how.flags);
}
if (failed) {
resultfn = ksft_test_result_fail;
ksft_print_msg("openat2 unexpectedly returned ");
if (fdpath)
ksft_print_msg("%d['%s'] with %X (!= %X)\n",
fd, fdpath, fdflags,
test->how.flags);
else
ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
}
skip:
if (test->err >= 0)
resultfn("openat2 with %s succeeds\n", test->name);
else
resultfn("openat2 with %s fails with %d (%s)\n",
test->name, test->err, strerror(-test->err));
next:
free(fdpath);
fflush(stdout);
}
}
#define NUM_TESTS (NUM_OPENAT2_STRUCT_VARIATIONS * NUM_OPENAT2_STRUCT_TESTS + \
NUM_OPENAT2_FLAG_TESTS)
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(NUM_TESTS);
test_openat2_struct();
test_openat2_flags();
if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/openat2/openat2_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Aleksa Sarai <[email protected]>
* Copyright (C) 2018-2019 SUSE LLC.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <string.h>
#include <syscall.h>
#include <limits.h>
#include "helpers.h"
bool needs_openat2(const struct open_how *how)
{
return how->resolve != 0;
}
int raw_openat2(int dfd, const char *path, void *how, size_t size)
{
int ret = syscall(__NR_openat2, dfd, path, how, size);
return ret >= 0 ? ret : -errno;
}
int sys_openat2(int dfd, const char *path, struct open_how *how)
{
return raw_openat2(dfd, path, how, sizeof(*how));
}
int sys_openat(int dfd, const char *path, struct open_how *how)
{
int ret = openat(dfd, path, how->flags, how->mode);
return ret >= 0 ? ret : -errno;
}
int sys_renameat2(int olddirfd, const char *oldpath,
int newdirfd, const char *newpath, unsigned int flags)
{
int ret = syscall(__NR_renameat2, olddirfd, oldpath,
newdirfd, newpath, flags);
return ret >= 0 ? ret : -errno;
}
int touchat(int dfd, const char *path)
{
int fd = openat(dfd, path, O_CREAT, 0700);
if (fd >= 0)
close(fd);
return fd;
}
char *fdreadlink(int fd)
{
char *target, *tmp;
E_asprintf(&tmp, "/proc/self/fd/%d", fd);
target = malloc(PATH_MAX);
if (!target)
ksft_exit_fail_msg("fdreadlink: malloc failed\n");
memset(target, 0, PATH_MAX);
E_readlink(tmp, target, PATH_MAX);
free(tmp);
return target;
}
bool fdequal(int fd, int dfd, const char *path)
{
char *fdpath, *dfdpath, *other;
bool cmp;
fdpath = fdreadlink(fd);
dfdpath = fdreadlink(dfd);
if (!path)
E_asprintf(&other, "%s", dfdpath);
else if (*path == '/')
E_asprintf(&other, "%s", path);
else
E_asprintf(&other, "%s/%s", dfdpath, path);
cmp = !strcmp(fdpath, other);
free(fdpath);
free(dfdpath);
free(other);
return cmp;
}
bool openat2_supported = false;
void __attribute__((constructor)) init(void)
{
struct open_how how = {};
int fd;
BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_VER0);
/* Check openat2(2) support. */
fd = sys_openat2(AT_FDCWD, ".", &how);
openat2_supported = (fd >= 0);
if (fd >= 0)
close(fd);
}
| linux-master | tools/testing/selftests/openat2/helpers.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <semaphore.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/compiler.h>
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
/*
* s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
* 2MB sized and aligned region so that the initial region corresponds to
* exactly one large page.
*/
#define MEM_REGION_SIZE 0x200000
#ifdef __x86_64__
/*
* Somewhat arbitrary location and slot, intended to not overlap anything.
*/
#define MEM_REGION_GPA 0xc0000000
#define MEM_REGION_SLOT 10
static const uint64_t MMIO_VAL = 0xbeefull;
extern const uint64_t final_rip_start;
extern const uint64_t final_rip_end;
static sem_t vcpu_ready;
static inline uint64_t guest_spin_on_val(uint64_t spin_val)
{
uint64_t val;
do {
val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
} while (val == spin_val);
GUEST_SYNC(0);
return val;
}
static void *vcpu_worker(void *data)
{
struct kvm_vcpu *vcpu = data;
struct kvm_run *run = vcpu->run;
struct ucall uc;
uint64_t cmd;
/*
* Loop until the guest is done. Re-enter the guest on all MMIO exits,
* which will occur if the guest attempts to access a memslot after it
* has been deleted or while it is being moved .
*/
while (1) {
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) {
cmd = get_ucall(vcpu, &uc);
if (cmd != UCALL_SYNC)
break;
sem_post(&vcpu_ready);
continue;
}
if (run->exit_reason != KVM_EXIT_MMIO)
break;
TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
memcpy(run->mmio.data, &MMIO_VAL, 8);
}
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
REPORT_GUEST_ASSERT(uc);
return NULL;
}
static void wait_for_vcpu(void)
{
struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
"clock_gettime() failed: %d\n", errno);
ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
"sem_timedwait() failed: %d\n", errno);
/* Wait for the vCPU thread to reenter the guest. */
usleep(100000);
}
static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
void *guest_code)
{
struct kvm_vm *vm;
uint64_t *hva;
uint64_t gpa;
vm = vm_create_with_one_vcpu(vcpu, guest_code);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
MEM_REGION_GPA, MEM_REGION_SLOT,
MEM_REGION_SIZE / getpagesize(), 0);
/*
* Allocate and map two pages so that the GPA accessed by guest_code()
* stays valid across the memslot move.
*/
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
/* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, 2 * 4096);
pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu);
/* Ensure the guest thread is spun up. */
wait_for_vcpu();
return vm;
}
static void guest_code_move_memory_region(void)
{
uint64_t val;
GUEST_SYNC(0);
/*
* Spin until the memory region starts getting moved to a
* misaligned address.
* Every region move may or may not trigger MMIO, as the
* window where the memslot is invalid is usually quite small.
*/
val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL,
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
/* Spin until the misaligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
__GUEST_ASSERT(val == 1 || val == 0,
"Expected '0' or '1' (no MMIO), got '%llx'", val);
/* Spin until the memory region starts to get re-aligned. */
val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL,
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
/* Spin until the re-aligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
GUEST_ASSERT_EQ(val, 1);
GUEST_DONE();
}
static void test_move_memory_region(void)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t *hva;
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
/*
* Shift the region's base GPA. The guest should not see "2" as the
* hva->gpa translation is misaligned, i.e. the guest is accessing a
* different host pfn.
*/
vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
WRITE_ONCE(*hva, 2);
/*
* The guest _might_ see an invalid memslot and trigger MMIO, but it's
* a tiny window. Spin and defer the sync until the memslot is
* restored and guest behavior is once again deterministic.
*/
usleep(100000);
/*
* Note, value in memory needs to be changed *before* restoring the
* memslot, else the guest could race the update and see "2".
*/
WRITE_ONCE(*hva, 1);
/* Restore the original base, the guest should see "1". */
vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
wait_for_vcpu();
/* Defered sync from when the memslot was misaligned (above). */
wait_for_vcpu();
pthread_join(vcpu_thread, NULL);
kvm_vm_free(vm);
}
static void guest_code_delete_memory_region(void)
{
uint64_t val;
GUEST_SYNC(0);
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
GUEST_ASSERT_EQ(val, MMIO_VAL);
/* Spin until the memory region is recreated. */
val = guest_spin_on_val(MMIO_VAL);
GUEST_ASSERT_EQ(val, 0);
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
GUEST_ASSERT_EQ(val, MMIO_VAL);
asm("1:\n\t"
".pushsection .rodata\n\t"
".global final_rip_start\n\t"
"final_rip_start: .quad 1b\n\t"
".popsection");
/* Spin indefinitely (until the code memslot is deleted). */
guest_spin_on_val(MMIO_VAL);
asm("1:\n\t"
".pushsection .rodata\n\t"
".global final_rip_end\n\t"
"final_rip_end: .quad 1b\n\t"
".popsection");
GUEST_ASSERT(0);
}
static void test_delete_memory_region(void)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_run *run;
struct kvm_vm *vm;
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
/* Delete the memory region, the guest should not die. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
/* Recreate the memory region. The guest should see "0". */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
MEM_REGION_GPA, MEM_REGION_SLOT,
MEM_REGION_SIZE / getpagesize(), 0);
wait_for_vcpu();
/* Delete the region again so that there's only one memslot left. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
/*
* Delete the primary memslot. This should cause an emulation error or
* shutdown due to the page tables getting nuked.
*/
vm_mem_region_delete(vm, 0);
pthread_join(vcpu_thread, NULL);
run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit reason = %d", run->exit_reason);
vcpu_regs_get(vcpu, ®s);
/*
* On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
* so the instruction pointer would point to the reset vector.
*/
if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
TEST_ASSERT(regs.rip >= final_rip_start &&
regs.rip < final_rip_end,
"Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
final_rip_start, final_rip_end, regs.rip);
kvm_vm_free(vm);
}
static void test_zero_memory_regions(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
pr_info("Testing KVM_RUN with zero added memory regions\n");
vm = vm_create_barebones();
vcpu = __vm_vcpu_add(vm, 0);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
kvm_vm_free(vm);
}
#endif /* __x86_64__ */
/*
* Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
* tentative to add further slots should fail.
*/
static void test_add_max_memory_regions(void)
{
int ret;
struct kvm_vm *vm;
uint32_t max_mem_slots;
uint32_t slot;
void *mem, *mem_aligned, *mem_extra;
size_t alignment;
#ifdef __s390x__
/* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
alignment = 0x100000;
#else
alignment = 1;
#endif
max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_mem_slots > 0,
"KVM_CAP_NR_MEMSLOTS should be greater than 0");
pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
vm = vm_create_barebones();
/* Check it can be added memory slots up to the maximum allowed */
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
for (slot = 0; slot < max_mem_slots; slot++)
vm_set_user_memory_region(vm, slot, 0,
((uint64_t)slot * MEM_REGION_SIZE),
MEM_REGION_SIZE,
mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
/* Check it cannot be added memory slots beyond the limit */
mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
(uint64_t)max_mem_slots * MEM_REGION_SIZE,
MEM_REGION_SIZE, mem_extra);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
#ifdef __x86_64__
int i, loops;
#endif
#ifdef __x86_64__
/*
* FIXME: the zero-memslot test fails on aarch64 and s390x because
* KVM_RUN fails with ENOEXEC or EFAULT.
*/
test_zero_memory_regions();
#endif
test_add_max_memory_regions();
#ifdef __x86_64__
if (argc > 1)
loops = atoi_positive("Number of iterations", argv[1]);
else
loops = 10;
pr_info("Testing MOVE of in-use region, %d loops\n", loops);
for (i = 0; i < loops; i++)
test_move_memory_region();
pr_info("Testing DELETE of in-use region, %d loops\n", loops);
for (i = 0; i < loops; i++)
test_delete_memory_region();
#endif
return 0;
}
| linux-master | tools/testing/selftests/kvm/set_memory_region_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A test for GUEST_PRINTF
*
* Copyright 2022, Google, Inc. and/or its affiliates.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
struct guest_vals {
uint64_t a;
uint64_t b;
uint64_t type;
};
static struct guest_vals vals;
/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */
#define TYPE_LIST \
TYPE(test_type_i64, I64, "%ld", int64_t) \
TYPE(test_type_u64, U64u, "%lu", uint64_t) \
TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \
TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \
TYPE(test_type_u32, U32u, "%u", uint32_t) \
TYPE(test_type_x32, U32x, "0x%x", uint32_t) \
TYPE(test_type_X32, U32X, "0x%X", uint32_t) \
TYPE(test_type_int, INT, "%d", int) \
TYPE(test_type_char, CHAR, "%c", char) \
TYPE(test_type_str, STR, "'%s'", const char *) \
TYPE(test_type_ptr, PTR, "%p", uintptr_t)
enum args_type {
#define TYPE(fn, ext, fmt_t, T) TYPE_##ext,
TYPE_LIST
#undef TYPE
};
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
const char *expected_assert);
#define BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T) \
const char *PRINTF_FMT_##ext = "Got params a = " fmt_t " and b = " fmt_t; \
const char *ASSERT_FMT_##ext = "Expected " fmt_t ", got " fmt_t " instead"; \
static void fn(struct kvm_vcpu *vcpu, T a, T b) \
{ \
char expected_printf[UCALL_BUFFER_LEN]; \
char expected_assert[UCALL_BUFFER_LEN]; \
\
snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \
snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \
vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \
sync_global_to_guest(vcpu->vm, vals); \
run_test(vcpu, expected_printf, expected_assert); \
}
#define TYPE(fn, ext, fmt_t, T) \
BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T)
TYPE_LIST
#undef TYPE
static void guest_code(void)
{
while (1) {
switch (vals.type) {
#define TYPE(fn, ext, fmt_t, T) \
case TYPE_##ext: \
GUEST_PRINTF(PRINTF_FMT_##ext, vals.a, vals.b); \
__GUEST_ASSERT(vals.a == vals.b, \
ASSERT_FMT_##ext, vals.a, vals.b); \
break;
TYPE_LIST
#undef TYPE
default:
GUEST_SYNC(vals.type);
}
GUEST_DONE();
}
}
/*
* Unfortunately this gets a little messy because 'assert_msg' doesn't
* just contains the matching string, it also contains additional assert
* info. Fortunately the part that matches should be at the very end of
* 'assert_msg'.
*/
static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
{
int len_str = strlen(assert_msg);
int len_substr = strlen(expected_assert_msg);
int offset = len_str - len_substr;
TEST_ASSERT(len_substr <= len_str,
"Expected '%s' to be a substring of '%s'\n",
assert_msg, expected_assert_msg);
TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
"Unexpected mismatch. Expected: '%s', got: '%s'",
expected_assert_msg, &assert_msg[offset]);
}
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
const char *expected_assert)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_FAIL("Unknown 'args_type' = %lu", uc.args[1]);
break;
case UCALL_PRINTF:
TEST_ASSERT(strcmp(uc.buffer, expected_printf) == 0,
"Unexpected mismatch. Expected: '%s', got: '%s'",
expected_printf, uc.buffer);
break;
case UCALL_ABORT:
ucall_abort(uc.buffer, expected_assert);
break;
case UCALL_DONE:
return;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
static void guest_code_limits(void)
{
char test_str[UCALL_BUFFER_LEN + 10];
memset(test_str, 'a', sizeof(test_str));
test_str[sizeof(test_str) - 1] = 0;
GUEST_PRINTF("%s", test_str);
}
static void test_limits(void)
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits);
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
"Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n",
uc.cmd, UCALL_ABORT);
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
test_type_i64(vcpu, -1, -1);
test_type_i64(vcpu, -1, 1);
test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
test_type_u32(vcpu, 0x90abcdef, 0x90abcdef);
test_type_u32(vcpu, 0x90abcdef, 0x90abcdee);
test_type_x32(vcpu, 0x90abcdef, 0x90abcdef);
test_type_x32(vcpu, 0x90abcdef, 0x90abcdee);
test_type_X32(vcpu, 0x90abcdef, 0x90abcdef);
test_type_X32(vcpu, 0x90abcdef, 0x90abcdee);
test_type_int(vcpu, -1, -1);
test_type_int(vcpu, -1, 1);
test_type_int(vcpu, 1, 1);
test_type_char(vcpu, 'a', 'a');
test_type_char(vcpu, 'a', 'A');
test_type_char(vcpu, 'a', 'b');
test_type_str(vcpu, "foo", "foo");
test_type_str(vcpu, "foo", "bar");
test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
kvm_vm_free(vm);
test_limits();
return 0;
}
| linux-master | tools/testing/selftests/kvm/guest_print_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This test is intended to reproduce a crash that happens when
* kvm_arch_hardware_disable is called and it attempts to unregister the user
* return notifiers.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <test_util.h>
#include "kvm_util.h"
#define VCPU_NUM 4
#define SLEEPING_THREAD_NUM (1 << 4)
#define FORK_NUM (1ULL << 9)
#define DELAY_US_MAX 2000
#define GUEST_CODE_PIO_PORT 4
sem_t *sem;
static void guest_code(void)
{
for (;;)
; /* Some busy work */
printf("Should not be reached.\n");
}
static void *run_vcpu(void *arg)
{
struct kvm_vcpu *vcpu = arg;
struct kvm_run *run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
__func__, run->exit_reason,
exit_reason_str(run->exit_reason));
pthread_exit(NULL);
}
static void *sleeping_thread(void *arg)
{
int fd;
while (true) {
fd = open("/dev/null", O_RDWR);
close(fd);
}
TEST_ASSERT(false, "%s: exited\n", __func__);
pthread_exit(NULL);
}
static inline void check_create_thread(pthread_t *thread, pthread_attr_t *attr,
void *(*f)(void *), void *arg)
{
int r;
r = pthread_create(thread, attr, f, arg);
TEST_ASSERT(r == 0, "%s: failed to create thread", __func__);
}
static inline void check_set_affinity(pthread_t thread, cpu_set_t *cpu_set)
{
int r;
r = pthread_setaffinity_np(thread, sizeof(cpu_set_t), cpu_set);
TEST_ASSERT(r == 0, "%s: failed set affinity", __func__);
}
static inline void check_join(pthread_t thread, void **retval)
{
int r;
r = pthread_join(thread, retval);
TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
}
static void run_test(uint32_t run)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
cpu_set_t cpu_set;
pthread_t threads[VCPU_NUM];
pthread_t throw_away;
void *b;
uint32_t i, j;
CPU_ZERO(&cpu_set);
for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set);
vm = vm_create(VCPU_NUM);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {
vcpu = vm_vcpu_add(vm, i, guest_code);
check_create_thread(&threads[i], NULL, run_vcpu, vcpu);
check_set_affinity(threads[i], &cpu_set);
for (j = 0; j < SLEEPING_THREAD_NUM; ++j) {
check_create_thread(&throw_away, NULL, sleeping_thread,
(void *)NULL);
check_set_affinity(throw_away, &cpu_set);
}
}
pr_debug("%s: [%d] all threads launched\n", __func__, run);
sem_post(sem);
for (i = 0; i < VCPU_NUM; ++i)
check_join(threads[i], &b);
/* Should not be reached */
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
}
void wait_for_child_setup(pid_t pid)
{
/*
* Wait for the child to post to the semaphore, but wake up periodically
* to check if the child exited prematurely.
*/
for (;;) {
const struct timespec wait_period = { .tv_sec = 1 };
int status;
if (!sem_timedwait(sem, &wait_period))
return;
/* Child is still running, keep waiting. */
if (pid != waitpid(pid, &status, WNOHANG))
continue;
/*
* Child is no longer running, which is not expected.
*
* If it exited with a non-zero status, we explicitly forward
* the child's status in case it exited with KSFT_SKIP.
*/
if (WIFEXITED(status))
exit(WEXITSTATUS(status));
else
TEST_ASSERT(false, "Child exited unexpectedly");
}
}
int main(int argc, char **argv)
{
uint32_t i;
int s, r;
pid_t pid;
sem = sem_open("vm_sem", O_CREAT | O_EXCL, 0644, 0);
sem_unlink("vm_sem");
for (i = 0; i < FORK_NUM; ++i) {
pid = fork();
TEST_ASSERT(pid >= 0, "%s: unable to fork", __func__);
if (pid == 0)
run_test(i); /* This function always exits */
pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
wait_for_child_setup(pid);
r = (rand() % DELAY_US_MAX) + 1;
pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
usleep(r);
r = waitpid(pid, &s, WNOHANG);
TEST_ASSERT(r != pid,
"%s: [%d] child exited unexpectedly status: [%d]",
__func__, i, s);
pr_debug("%s: [%d] killing child\n", __func__, i);
kill(pid, SIGKILL);
}
sem_destroy(sem);
exit(0);
}
| linux-master | tools/testing/selftests/kvm/hardware_disable_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kvm_create_max_vcpus
*
* Copyright (C) 2019, Google LLC.
*
* Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include "test_util.h"
#include "kvm_util.h"
#include "asm/kvm.h"
#include "linux/kvm.h"
void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
{
struct kvm_vm *vm;
int i;
pr_info("Testing creating %d vCPUs, with IDs %d...%d.\n",
num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
vm = vm_create_barebones();
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */
__vm_vcpu_add(vm, i);
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
/*
* Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
* an arbitrary number for everything else.
*/
int nr_fds_wanted = kvm_max_vcpus + 100;
struct rlimit rl;
pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
/*
* Check that we're allowed to open nr_fds_wanted file descriptors and
* try raising the limits if needed.
*/
TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
if (rl.rlim_cur < nr_fds_wanted) {
rl.rlim_cur = nr_fds_wanted;
if (rl.rlim_max < nr_fds_wanted) {
int old_rlim_max = rl.rlim_max;
rl.rlim_max = nr_fds_wanted;
int r = setrlimit(RLIMIT_NOFILE, &rl);
__TEST_REQUIRE(r >= 0,
"RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
old_rlim_max, nr_fds_wanted);
} else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
}
}
/*
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
* Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
* in this case.
*/
if (!kvm_max_vcpu_id)
kvm_max_vcpu_id = kvm_max_vcpus;
TEST_ASSERT(kvm_max_vcpu_id >= kvm_max_vcpus,
"KVM_MAX_VCPU_IDS (%d) must be at least as large as KVM_MAX_VCPUS (%d).",
kvm_max_vcpu_id, kvm_max_vcpus);
test_vcpu_creation(0, kvm_max_vcpus);
if (kvm_max_vcpu_id > kvm_max_vcpus)
test_vcpu_creation(
kvm_max_vcpu_id - kvm_max_vcpus, kvm_max_vcpus);
return 0;
}
| linux-master | tools/testing/selftests/kvm/kvm_create_max_vcpus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM page table test
*
* Copyright (C) 2021, Huawei, Inc.
*
* Make sure that THP has been enabled or enough HUGETLB pages with specific
* page size have been pre-allocated on your system, if you are planning to
* use hugepages to back the guest memory for testing.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <pthread.h>
#include <semaphore.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "guest_modes.h"
#define TEST_MEM_SLOT_INDEX 1
/* Default size(1GB) of the memory for testing */
#define DEFAULT_TEST_MEM_SIZE (1 << 30)
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* Different guest memory accessing stages */
enum test_stage {
KVM_BEFORE_MAPPINGS,
KVM_CREATE_MAPPINGS,
KVM_UPDATE_MAPPINGS,
KVM_ADJUST_MAPPINGS,
NUM_TEST_STAGES,
};
static const char * const test_stage_string[] = {
"KVM_BEFORE_MAPPINGS",
"KVM_CREATE_MAPPINGS",
"KVM_UPDATE_MAPPINGS",
"KVM_ADJUST_MAPPINGS",
};
struct test_args {
struct kvm_vm *vm;
uint64_t guest_test_virt_mem;
uint64_t host_page_size;
uint64_t host_num_pages;
uint64_t large_page_size;
uint64_t large_num_pages;
uint64_t host_pages_per_lpage;
enum vm_mem_backing_src_type src_type;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
/*
* Guest variables. Use addr_gva2hva() if these variables need
* to be changed in host.
*/
static enum test_stage guest_test_stage;
/* Host variables */
static uint32_t nr_vcpus = 1;
static struct test_args test_args;
static enum test_stage *current_stage;
static bool host_quit;
/* Whether the test stage is updated, or completed */
static sem_t test_stage_updated;
static sem_t test_stage_completed;
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
static uint64_t guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
static void guest_code(bool do_write)
{
struct test_args *p = &test_args;
enum test_stage *current_stage = &guest_test_stage;
uint64_t addr;
int i, j;
while (true) {
addr = p->guest_test_virt_mem;
switch (READ_ONCE(*current_stage)) {
/*
* All vCPU threads will be started in this stage,
* where guest code of each vCPU will do nothing.
*/
case KVM_BEFORE_MAPPINGS:
break;
/*
* Before dirty logging, vCPUs concurrently access the first
* 8 bytes of each page (host page/large page) within the same
* memory region with different accessing types (read/write).
* Then KVM will create normal page mappings or huge block
* mappings for them.
*/
case KVM_CREATE_MAPPINGS:
for (i = 0; i < p->large_num_pages; i++) {
if (do_write)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
addr += p->large_page_size;
}
break;
/*
* During dirty logging, KVM will only update attributes of the
* normal page mappings from RO to RW if memory backing src type
* is anonymous. In other cases, KVM will split the huge block
* mappings into normal page mappings if memory backing src type
* is THP or HUGETLB.
*/
case KVM_UPDATE_MAPPINGS:
if (p->src_type == VM_MEM_SRC_ANONYMOUS) {
for (i = 0; i < p->host_num_pages; i++) {
*(uint64_t *)addr = 0x0123456789ABCDEF;
addr += p->host_page_size;
}
break;
}
for (i = 0; i < p->large_num_pages; i++) {
/*
* Write to the first host page in each large
* page region, and triger break of large pages.
*/
*(uint64_t *)addr = 0x0123456789ABCDEF;
/*
* Access the middle host pages in each large
* page region. Since dirty logging is enabled,
* this will create new mappings at the smallest
* granularity.
*/
addr += p->large_page_size / 2;
for (j = 0; j < p->host_pages_per_lpage / 2; j++) {
READ_ONCE(*(uint64_t *)addr);
addr += p->host_page_size;
}
}
break;
/*
* After dirty logging is stopped, vCPUs concurrently read
* from every single host page. Then KVM will coalesce the
* split page mappings back to block mappings. And a TLB
* conflict abort could occur here if TLB entries of the
* page mappings are not fully invalidated.
*/
case KVM_ADJUST_MAPPINGS:
for (i = 0; i < p->host_num_pages; i++) {
READ_ONCE(*(uint64_t *)addr);
addr += p->host_page_size;
}
break;
default:
GUEST_ASSERT(0);
}
GUEST_SYNC(1);
}
}
static void *vcpu_worker(void *data)
{
struct kvm_vcpu *vcpu = data;
bool do_write = !(vcpu->id % 2);
struct timespec start;
struct timespec ts_diff;
enum test_stage stage;
int ret;
vcpu_args_set(vcpu, 1, do_write);
while (!READ_ONCE(host_quit)) {
ret = sem_wait(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_wait");
if (READ_ONCE(host_quit))
return NULL;
clock_gettime(CLOCK_MONOTONIC, &start);
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(vcpu->run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu->id);
stage = READ_ONCE(*current_stage);
/*
* Here we can know the execution time of every
* single vcpu running in different test stages.
*/
pr_debug("vCPU %d has completed stage %s\n"
"execution time is: %ld.%.9lds\n\n",
vcpu->id, test_stage_string[stage],
ts_diff.tv_sec, ts_diff.tv_nsec);
ret = sem_post(&test_stage_completed);
TEST_ASSERT(ret == 0, "Error in sem_post");
}
return NULL;
}
struct test_params {
uint64_t phys_offset;
uint64_t test_mem_size;
enum vm_mem_backing_src_type src_type;
};
static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
{
int ret;
struct test_params *p = arg;
enum vm_mem_backing_src_type src_type = p->src_type;
uint64_t large_page_size = get_backing_src_pagesz(src_type);
uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
uint64_t host_page_size = getpagesize();
uint64_t test_mem_size = p->test_mem_size;
uint64_t guest_num_pages;
uint64_t alignment;
void *host_test_mem;
struct kvm_vm *vm;
/* Align up the test memory size */
alignment = max(large_page_size, guest_page_size);
test_mem_size = (test_mem_size + alignment - 1) & ~(alignment - 1);
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
guest_code, test_args.vcpus);
/* Align down GPA of the testing memslot */
if (!p->phys_offset)
guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
guest_page_size;
else
guest_test_phys_mem = p->phys_offset;
#ifdef __s390x__
alignment = max(0x100000UL, alignment);
#endif
guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
/* Set up the shared data structure test_args */
test_args.vm = vm;
test_args.guest_test_virt_mem = guest_test_virt_mem;
test_args.host_page_size = host_page_size;
test_args.host_num_pages = test_mem_size / host_page_size;
test_args.large_page_size = large_page_size;
test_args.large_num_pages = test_mem_size / large_page_size;
test_args.host_pages_per_lpage = large_page_size / host_page_size;
test_args.src_type = src_type;
/* Add an extra memory slot with specified backing src type */
vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
/* Do mapping(GVA->GPA) for the testing memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
/* Export shared structure test_args to guest */
sync_global_to_guest(vm, test_args);
ret = sem_init(&test_stage_updated, 0, 0);
TEST_ASSERT(ret == 0, "Error in sem_init");
ret = sem_init(&test_stage_completed, 0, 0);
TEST_ASSERT(ret == 0, "Error in sem_init");
current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage));
*current_stage = NUM_TEST_STAGES;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
pr_info("Testing memory backing src type: %s\n",
vm_mem_backing_src_alias(src_type)->name);
pr_info("Testing memory backing src granularity: 0x%lx\n",
large_page_size);
pr_info("Testing memory size(aligned): 0x%lx\n", test_mem_size);
pr_info("Guest physical test memory offset: 0x%lx\n",
guest_test_phys_mem);
pr_info("Host virtual test memory offset: 0x%lx\n",
(uint64_t)host_test_mem);
pr_info("Number of testing vCPUs: %d\n", nr_vcpus);
return vm;
}
static void vcpus_complete_new_stage(enum test_stage stage)
{
int ret;
int vcpus;
/* Wake up all the vcpus to run new test stage */
for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
ret = sem_post(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_post");
}
pr_debug("All vcpus have been notified to continue\n");
/* Wait for all the vcpus to complete new test stage */
for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
ret = sem_wait(&test_stage_completed);
TEST_ASSERT(ret == 0, "Error in sem_wait");
pr_debug("%d vcpus have completed stage %s\n",
vcpus + 1, test_stage_string[stage]);
}
pr_debug("All vcpus have completed stage %s\n",
test_stage_string[stage]);
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
pthread_t *vcpu_threads;
struct kvm_vm *vm;
struct timespec start;
struct timespec ts_diff;
int ret, i;
/* Create VM with vCPUs and make some pre-initialization */
vm = pre_init_before_test(mode, arg);
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
host_quit = false;
*current_stage = KVM_BEFORE_MAPPINGS;
for (i = 0; i < nr_vcpus; i++)
pthread_create(&vcpu_threads[i], NULL, vcpu_worker,
test_args.vcpus[i]);
vcpus_complete_new_stage(*current_stage);
pr_info("Started all vCPUs successfully\n");
/* Test the stage of KVM creating mappings */
*current_stage = KVM_CREATE_MAPPINGS;
clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
pr_info("KVM_CREATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
/* Test the stage of KVM updating mappings */
vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
KVM_MEM_LOG_DIRTY_PAGES);
*current_stage = KVM_UPDATE_MAPPINGS;
clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
pr_info("KVM_UPDATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
/* Test the stage of KVM adjusting mappings */
vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
*current_stage = KVM_ADJUST_MAPPINGS;
clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
pr_info("KVM_ADJUST_MAPPINGS: total execution time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
/* Tell the vcpu thread to quit */
host_quit = true;
for (i = 0; i < nr_vcpus; i++) {
ret = sem_post(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_post");
}
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i], NULL);
ret = sem_destroy(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_destroy");
ret = sem_destroy(&test_stage_completed);
TEST_ASSERT(ret == 0, "Error in sem_destroy");
free(vcpu_threads);
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-p offset] [-m mode] "
"[-b mem-size] [-v vcpus] [-s mem-type]\n", name);
puts("");
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
guest_modes_help();
printf(" -b: specify size of the memory region for testing. e.g. 10M or 3G.\n"
" (default: 1G)\n");
printf(" -v: specify the number of vCPUs to run\n"
" (default: 1)\n");
backing_src_help("-s");
puts("");
}
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
struct test_params p = {
.test_mem_size = DEFAULT_TEST_MEM_SIZE,
.src_type = DEFAULT_VM_MEM_SRC,
};
int opt;
guest_modes_append_default();
while ((opt = getopt(argc, argv, "hp:m:b:v:s:")) != -1) {
switch (opt) {
case 'p':
p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
guest_modes_cmdline(optarg);
break;
case 'b':
p.test_mem_size = parse_size(optarg);
break;
case 'v':
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 's':
p.src_type = parse_backing_src_type(optarg);
break;
case 'h':
default:
help(argv[0]);
exit(0);
}
}
for_each_guest_mode(run_test, &p);
return 0;
}
| linux-master | tools/testing/selftests/kvm/kvm_page_table_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging performance test
*
* Based on dirty_log_test.c
*
* Copyright (C) 2018, Red Hat, Inc.
* Copyright (C) 2020, Google, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <pthread.h>
#include <linux/bitmap.h>
#include "kvm_util.h"
#include "test_util.h"
#include "memstress.h"
#include "guest_modes.h"
#ifdef __aarch64__
#include "aarch64/vgic.h"
#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL
static int gic_fd;
static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
{
/*
* The test can still run even if hardware does not support GICv3, as it
* is only an optimization to reduce guest exits.
*/
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
}
static void arch_cleanup_vm(struct kvm_vm *vm)
{
if (gic_fd > 0)
close(gic_fd);
}
#else /* __aarch64__ */
static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
{
}
static void arch_cleanup_vm(struct kvm_vm *vm)
{
}
#endif
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus_while_disabling_dirty_logging;
/* Host variables */
static u64 dirty_log_manual_caps;
static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t pages_count = 0;
struct kvm_run *run;
struct timespec start;
struct timespec ts_diff;
struct timespec total = (struct timespec){0};
struct timespec avg;
int ret;
run = vcpu->run;
while (!READ_ONCE(host_quit)) {
int current_iteration = READ_ONCE(iteration);
clock_gettime(CLOCK_MONOTONIC, &start);
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
pr_debug("vCPU %d updated last completed iteration to %d\n",
vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
if (current_iteration) {
pages_count += vcpu_args->pages;
total = timespec_add(total, ts_diff);
pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
vcpu_idx, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
} else {
pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
vcpu_idx, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
}
/*
* Keep running the guest while dirty logging is being disabled
* (iteration is negative) so that vCPUs are accessing memory
* for the entire duration of zapping collapsible SPTEs.
*/
while (current_iteration == READ_ONCE(iteration) &&
READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {}
}
avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
struct test_params {
unsigned long iterations;
uint64_t phys_offset;
bool partition_vcpu_memory_access;
enum vm_mem_backing_src_type backing_src;
int slots;
uint32_t write_percent;
uint32_t random_seed;
bool random_access;
};
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
struct kvm_vm *vm;
unsigned long **bitmaps;
uint64_t guest_num_pages;
uint64_t host_num_pages;
uint64_t pages_per_slot;
struct timespec start;
struct timespec ts_diff;
struct timespec get_dirty_log_total = (struct timespec){0};
struct timespec vcpu_dirty_total = (struct timespec){0};
struct timespec avg;
struct timespec clear_dirty_log_total = (struct timespec){0};
int i;
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
pr_info("Random seed: %u\n", p->random_seed);
memstress_set_random_seed(vm, p->random_seed);
memstress_set_write_percent(vm, p->write_percent);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / p->slots;
bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
dirty_log_manual_caps);
arch_setup_vm(vm, nr_vcpus);
/* Start the iterations */
iteration = 0;
host_quit = false;
clock_gettime(CLOCK_MONOTONIC, &start);
for (i = 0; i < nr_vcpus; i++)
vcpu_last_completed_iteration[i] = -1;
/*
* Use 100% writes during the population phase to ensure all
* memory is actually populated and not just mapped to the zero
* page. The prevents expensive copy-on-write faults from
* occurring during the dirty memory iterations below, which
* would pollute the performance results.
*/
memstress_set_write_percent(vm, 100);
memstress_set_random_access(vm, false);
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
for (i = 0; i < nr_vcpus; i++) {
while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
iteration)
;
}
ts_diff = timespec_elapsed(start);
pr_info("Populate memory time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
memstress_enable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
memstress_set_write_percent(vm, p->write_percent);
memstress_set_random_access(vm, p->random_access);
while (iteration < p->iterations) {
/*
* Incrementing the iteration number will start the vCPUs
* dirtying memory again.
*/
clock_gettime(CLOCK_MONOTONIC, &start);
iteration++;
pr_debug("Starting iteration %d\n", iteration);
for (i = 0; i < nr_vcpus; i++) {
while (READ_ONCE(vcpu_last_completed_iteration[i])
!= iteration)
;
}
ts_diff = timespec_elapsed(start);
vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
memstress_get_dirty_log(vm, bitmaps, p->slots);
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
memstress_clear_dirty_log(vm, bitmaps, p->slots,
pages_per_slot);
ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
}
}
/*
* Run vCPUs while dirty logging is being disabled to stress disabling
* in terms of both performance and correctness. Opt-in via command
* line as this significantly increases time to disable dirty logging.
*/
if (run_vcpus_while_disabling_dirty_logging)
WRITE_ONCE(iteration, -1);
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
memstress_disable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
/*
* Tell the vCPU threads to quit. No need to manually check that vCPUs
* have stopped running after disabling dirty logging, the join will
* wait for them to exit.
*/
host_quit = true;
memstress_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
p->iterations, get_dirty_log_total.tv_sec,
get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
if (dirty_log_manual_caps) {
avg = timespec_div(clear_dirty_log_total, p->iterations);
pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
p->iterations, clear_dirty_log_total.tv_sec,
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
memstress_free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm);
memstress_destroy_vm(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] "
"[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
"[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name);
puts("");
printf(" -a: access memory randomly rather than in order.\n");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
" makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
" KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
" and writes will be tracked as soon as dirty logging is\n"
" enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
" is not enabled).\n");
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
guest_modes_help();
printf(" -n: Run the vCPUs in nested mode (L2)\n");
printf(" -e: Run vCPUs while dirty logging is being disabled. This\n"
" can significantly increase runtime, especially if there\n"
" isn't a dedicated pCPU for the main thread.\n");
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
printf(" -r: specify the starting random seed.\n");
backing_src_help("-s");
printf(" -x: Split the memory region into this number of memslots.\n"
" (default: 1)\n");
printf(" -w: specify the percentage of pages which should be written to\n"
" as an integer from 0-100 inclusive. This is probabilistic,\n"
" so -w X means each page has an X%% chance of writing\n"
" and a (100-X)%% chance of reading.\n"
" (default: 100 i.e. all pages are written to.)\n");
kvm_print_vcpu_pinning_help();
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
const char *pcpu_list = NULL;
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.partition_vcpu_memory_access = true,
.backing_src = DEFAULT_VM_MEM_SRC,
.slots = 1,
.random_seed = 1,
.write_percent = 100,
};
int opt;
dirty_log_manual_caps =
kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
guest_modes_append_default();
while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) {
switch (opt) {
case 'a':
p.random_access = true;
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 'c':
pcpu_list = optarg;
break;
case 'e':
/* 'e' is for evil. */
run_vcpus_while_disabling_dirty_logging = true;
break;
case 'g':
dirty_log_manual_caps = 0;
break;
case 'h':
help(argv[0]);
break;
case 'i':
p.iterations = atoi_positive("Number of iterations", optarg);
break;
case 'm':
guest_modes_cmdline(optarg);
break;
case 'n':
memstress_args.nested = true;
break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
case 'p':
p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'r':
p.random_seed = atoi_positive("Random seed", optarg);
break;
case 's':
p.backing_src = parse_backing_src_type(optarg);
break;
case 'v':
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 'w':
p.write_percent = atoi_non_negative("Write percentage", optarg);
TEST_ASSERT(p.write_percent <= 100,
"Write percentage must be between 0 and 100");
break;
case 'x':
p.slots = atoi_positive("Number of slots", optarg);
break;
default:
help(argv[0]);
break;
}
}
if (pcpu_list) {
kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
nr_vcpus);
memstress_args.pin_vcpus = true;
}
TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
pr_info("Test iterations: %"PRIu64"\n", p.iterations);
for_each_guest_mode(run_test, &p);
return 0;
}
| linux-master | tools/testing/selftests/kvm/dirty_log_perf_test.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.