python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* svm_vmcall_test
*
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
*
* Xen shared_info / pvclock testing
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include <stdint.h>
#include <time.h>
#include <sched.h>
#include <signal.h>
#include <pthread.h>
#define NR_TEST_VCPUS 20
static struct kvm_vm *vm;
pthread_spinlock_t create_lock;
#define TEST_TSC_KHZ 2345678UL
#define TEST_TSC_OFFSET 200000000
uint64_t tsc_sync;
static void guest_code(void)
{
uint64_t start_tsc, local_tsc, tmp;
start_tsc = rdtsc();
do {
tmp = READ_ONCE(tsc_sync);
local_tsc = rdtsc();
WRITE_ONCE(tsc_sync, local_tsc);
if (unlikely(local_tsc < tmp))
GUEST_SYNC_ARGS(0, local_tsc, tmp, 0, 0);
} while (local_tsc - start_tsc < 5000 * TEST_TSC_KHZ);
GUEST_DONE();
}
static void *run_vcpu(void *_cpu_nr)
{
unsigned long vcpu_id = (unsigned long)_cpu_nr;
unsigned long failures = 0;
static bool first_cpu_done;
struct kvm_vcpu *vcpu;
/* The kernel is fine, but vm_vcpu_add() needs locking */
pthread_spin_lock(&create_lock);
vcpu = vm_vcpu_add(vm, vcpu_id, guest_code);
if (!first_cpu_done) {
first_cpu_done = true;
vcpu_set_msr(vcpu, MSR_IA32_TSC, TEST_TSC_OFFSET);
}
pthread_spin_unlock(&create_lock);
for (;;) {
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
goto out;
case UCALL_SYNC:
printf("Guest %d sync %lx %lx %ld\n", vcpu->id,
uc.args[2], uc.args[3], uc.args[2] - uc.args[3]);
failures++;
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
out:
return (void *)failures;
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
vm = vm_create(NR_TEST_VCPUS);
vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE);
pthread_t cpu_threads[NR_TEST_VCPUS];
unsigned long cpu;
for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++)
pthread_create(&cpu_threads[cpu], NULL, run_vcpu, (void *)cpu);
unsigned long failures = 0;
for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++) {
void *this_cpu_failures;
pthread_join(cpu_threads[cpu], &this_cpu_failures);
failures += (unsigned long)this_cpu_failures;
}
TEST_ASSERT(!failures, "TSC sync failed");
pthread_spin_destroy(&create_lock);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c |
// SPDX-License-Identifier: GPL-2.0
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "kvm_util.h"
#include "processor.h"
#define CPUID_MWAIT (1u << 3)
enum monitor_mwait_testcases {
MWAIT_QUIRK_DISABLED = BIT(0),
MISC_ENABLES_QUIRK_DISABLED = BIT(1),
MWAIT_DISABLED = BIT(2),
};
/*
* If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD, in all
* other scenarios KVM should emulate them as nops.
*/
#define GUEST_ASSERT_MONITOR_MWAIT(insn, testcase, vector) \
do { \
bool fault_wanted = ((testcase) & MWAIT_QUIRK_DISABLED) && \
((testcase) & MWAIT_DISABLED); \
\
if (fault_wanted) \
__GUEST_ASSERT((vector) == UD_VECTOR, \
"Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \
else \
__GUEST_ASSERT(!(vector), \
"Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \
} while (0)
static void guest_monitor_wait(int testcase)
{
u8 vector;
GUEST_SYNC(testcase);
/*
* Arbitrarily MONITOR this function, SVM performs fault checks before
* intercept checks, so the inputs for MONITOR and MWAIT must be valid.
*/
vector = kvm_asm_safe("monitor", "a"(guest_monitor_wait), "c"(0), "d"(0));
GUEST_ASSERT_MONITOR_MWAIT("MONITOR", testcase, vector);
vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0));
GUEST_ASSERT_MONITOR_MWAIT("MWAIT", testcase, vector);
}
static void guest_code(void)
{
guest_monitor_wait(MWAIT_DISABLED);
guest_monitor_wait(MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_DISABLED);
guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED);
guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
uint64_t disabled_quirks;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int testcase;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
while (1) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
testcase = uc.args[1];
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
goto done;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
goto done;
}
disabled_quirks = 0;
if (testcase & MWAIT_QUIRK_DISABLED)
disabled_quirks |= KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS;
if (testcase & MISC_ENABLES_QUIRK_DISABLED)
disabled_quirks |= KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT;
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks);
/*
* If the MISC_ENABLES quirk (KVM neglects to update CPUID to
* enable/disable MWAIT) is disabled, toggle the ENABLE_MWAIT
* bit in MISC_ENABLES accordingly. If the quirk is enabled,
* the only valid configuration is MWAIT disabled, as CPUID
* can't be manually changed after running the vCPU.
*/
if (!(testcase & MISC_ENABLES_QUIRK_DISABLED)) {
TEST_ASSERT(testcase & MWAIT_DISABLED,
"Can't toggle CPUID features after running vCPU");
continue;
}
vcpu_set_msr(vcpu, MSR_IA32_MISC_ENABLE,
(testcase & MWAIT_DISABLED) ? 0 : MSR_IA32_MISC_ENABLE_MWAIT);
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for x86 KVM_CAP_MSR_PLATFORM_INFO
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Verifies expected behavior of controlling guest access to
* MSR_PLATFORM_INFO.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
static void guest_code(void)
{
uint64_t msr_platform_info;
for (;;) {
msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
GUEST_SYNC(msr_platform_info);
asm volatile ("inc %r11");
}
}
static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
}
static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu)
{
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t msr_platform_info;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO);
vcpu_set_msr(vcpu, MSR_PLATFORM_INFO,
msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
test_msr_platform_info_enabled(vcpu);
test_msr_platform_info_disabled(vcpu);
vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/platform_info_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* amx tests
*
* Copyright (C) 2021, Intel, Inc.
*
* Tests for amx #NM exception and save/restore.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#ifndef __x86_64__
# error This test is 64-bit only
#endif
#define NUM_TILES 8
#define TILE_SIZE 1024
#define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
/* Tile configuration associated: */
#define PALETTE_TABLE_INDEX 1
#define MAX_TILES 16
#define RESERVED_BYTES 14
#define XSAVE_HDR_OFFSET 512
struct tile_config {
u8 palette_id;
u8 start_row;
u8 reserved[RESERVED_BYTES];
u16 colsb[MAX_TILES];
u8 rows[MAX_TILES];
};
struct tile_data {
u8 data[NUM_TILES * TILE_SIZE];
};
struct xtile_info {
u16 bytes_per_tile;
u16 bytes_per_row;
u16 max_names;
u16 max_rows;
u32 xsave_offset;
u32 xsave_size;
};
static struct xtile_info xtile;
static inline void __ldtilecfg(void *cfg)
{
asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
: : "a"(cfg));
}
static inline void __tileloadd(void *tile)
{
asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
: : "a"(tile), "d"(0));
}
static inline void __tilerelease(void)
{
asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
}
static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
uint32_t rfbm_hi = rfbm >> 32;
asm volatile("xsavec (%%rdi)"
: : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi)
: "memory");
}
static void check_xtile_info(void)
{
GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
xtile.xsave_offset = this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET);
GUEST_ASSERT(xtile.xsave_offset == 2816);
xtile.xsave_size = this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE);
GUEST_ASSERT(xtile.xsave_size == 8192);
GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size);
GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES));
GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES) >=
PALETTE_TABLE_INDEX);
GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS));
xtile.max_names = this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS);
GUEST_ASSERT(xtile.max_names == 8);
xtile.bytes_per_tile = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE);
GUEST_ASSERT(xtile.bytes_per_tile == 1024);
xtile.bytes_per_row = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW);
GUEST_ASSERT(xtile.bytes_per_row == 64);
xtile.max_rows = this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS);
GUEST_ASSERT(xtile.max_rows == 16);
}
static void set_tilecfg(struct tile_config *cfg)
{
int i;
/* Only palette id 1 */
cfg->palette_id = 1;
for (i = 0; i < xtile.max_names; i++) {
cfg->colsb[i] = xtile.bytes_per_row;
cfg->rows[i] = xtile.max_rows;
}
}
static void init_regs(void)
{
uint64_t cr4, xcr0;
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE));
/* turn on CR4.OSXSAVE */
cr4 = get_cr4();
cr4 |= X86_CR4_OSXSAVE;
set_cr4(cr4);
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
xcr0 = xgetbv(0);
xcr0 |= XFEATURE_MASK_XTILE;
xsetbv(0x0, xcr0);
GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
}
static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
struct tile_data *tiledata,
struct xstate *xstate)
{
init_regs();
check_xtile_info();
GUEST_SYNC(1);
/* xfd=0, enable amx */
wrmsr(MSR_IA32_XFD, 0);
GUEST_SYNC(2);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
set_tilecfg(amx_cfg);
__ldtilecfg(amx_cfg);
GUEST_SYNC(3);
/* Check save/restore when trap to userspace */
__tileloadd(tiledata);
GUEST_SYNC(4);
__tilerelease();
GUEST_SYNC(5);
/*
* After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in
* the xcomp_bv.
*/
xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
GUEST_ASSERT(xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA);
/* xfd=0x40000, disable amx tiledata */
wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA);
/*
* XTILEDATA is cleared in xstate_bv but set in xcomp_bv, this property
* remains the same even when amx tiledata is disabled by IA32_XFD.
*/
xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
GUEST_ASSERT((xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA));
GUEST_SYNC(6);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
set_tilecfg(amx_cfg);
__ldtilecfg(amx_cfg);
/* Trigger #NM exception */
__tileloadd(tiledata);
GUEST_SYNC(10);
GUEST_DONE();
}
void guest_nm_handler(struct ex_regs *regs)
{
/* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */
GUEST_SYNC(7);
GUEST_ASSERT(!(get_cr0() & X86_CR0_TS));
GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
GUEST_SYNC(8);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
/* Clear xfd_err */
wrmsr(MSR_IA32_XFD_ERR, 0);
/* xfd=0, enable amx */
wrmsr(MSR_IA32_XFD, 0);
GUEST_SYNC(9);
}
int main(int argc, char *argv[])
{
struct kvm_regs regs1, regs2;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_x86_state *state;
int xsave_restore_size;
vm_vaddr_t amx_cfg, tiledata, xstate;
struct ucall uc;
u32 amx_offset;
int stage, ret;
/*
* Note, all off-by-default features must be enabled before anything
* caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has().
*/
vm_xsave_require_permission(XFEATURE_MASK_XTILE_DATA);
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA_XFD));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE),
"KVM should enumerate max XSAVE size when XSAVE is supported");
xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
vcpu_regs_get(vcpu, ®s1);
/* Register #NM handler */
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
/* amx cfg for guest_code */
amx_cfg = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
/* amx tiledata for guest_code */
tiledata = vm_vaddr_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
/* XSAVE state for guest_code */
xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
for (stage = 1; ; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
switch (uc.args[1]) {
case 1:
case 2:
case 3:
case 5:
case 6:
case 7:
case 8:
fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
break;
case 4:
case 10:
fprintf(stderr,
"GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
/* Compacted mode, get amx offset by xsave area
* size subtract 8K amx size.
*/
amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
state = vcpu_save_state(vcpu);
void *amx_start = (void *)state->xsave + amx_offset;
void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */
ret = memcmp(amx_start, tiles_data, TILE_SIZE);
TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret);
kvm_x86_state_cleanup(state);
break;
case 9:
fprintf(stderr,
"GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
break;
}
break;
case UCALL_DONE:
fprintf(stderr, "UCALL_DONE\n");
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/amx_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020, Google LLC.
*
* Tests for KVM paravirtual feature disablement
*/
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <stdint.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
struct msr_data {
uint32_t idx;
const char *name;
};
#define TEST_MSR(msr) { .idx = msr, .name = #msr }
#define UCALL_PR_MSR 0xdeadbeef
#define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
/*
* KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
* written, as the KVM_CPUID_FEATURES leaf is cleared.
*/
static struct msr_data msrs_to_test[] = {
TEST_MSR(MSR_KVM_SYSTEM_TIME),
TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
TEST_MSR(MSR_KVM_WALL_CLOCK),
TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
TEST_MSR(MSR_KVM_ASYNC_PF_EN),
TEST_MSR(MSR_KVM_STEAL_TIME),
TEST_MSR(MSR_KVM_PV_EOI_EN),
TEST_MSR(MSR_KVM_POLL_CONTROL),
TEST_MSR(MSR_KVM_ASYNC_PF_INT),
TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
};
static void test_msr(struct msr_data *msr)
{
uint64_t ignored;
uint8_t vector;
PR_MSR(msr);
vector = rdmsr_safe(msr->idx, &ignored);
GUEST_ASSERT_EQ(vector, GP_VECTOR);
vector = wrmsr_safe(msr->idx, 0);
GUEST_ASSERT_EQ(vector, GP_VECTOR);
}
struct hcall_data {
uint64_t nr;
const char *name;
};
#define TEST_HCALL(hc) { .nr = hc, .name = #hc }
#define UCALL_PR_HCALL 0xdeadc0de
#define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
/*
* KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
* features have been cleared in KVM_CPUID_FEATURES.
*/
static struct hcall_data hcalls_to_test[] = {
TEST_HCALL(KVM_HC_KICK_CPU),
TEST_HCALL(KVM_HC_SEND_IPI),
TEST_HCALL(KVM_HC_SCHED_YIELD),
};
static void test_hcall(struct hcall_data *hc)
{
uint64_t r;
PR_HCALL(hc);
r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
GUEST_ASSERT_EQ(r, -KVM_ENOSYS);
}
static void guest_main(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
test_msr(&msrs_to_test[i]);
}
for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
test_hcall(&hcalls_to_test[i]);
}
GUEST_DONE();
}
static void pr_msr(struct ucall *uc)
{
struct msr_data *msr = (struct msr_data *)uc->args[0];
pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
}
static void pr_hcall(struct ucall *uc)
{
struct hcall_data *hc = (struct hcall_data *)uc->args[0];
pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
}
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct ucall uc;
while (true) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_PR_MSR:
pr_msr(&uc);
break;
case UCALL_PR_HCALL:
pr_hcall(&uc);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
return;
}
}
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
enter_guest(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/kvm_pv_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* XCR0 cpuid test
*
* Copyright (C) 2022, Google LLC.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
/*
* Assert that architectural dependency rules are satisfied, e.g. that AVX is
* supported if and only if SSE is supported.
*/
#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
do { \
uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
\
__GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \
__supported == ((xfeatures) | (dependencies)), \
"supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \
__supported, (xfeatures), (dependencies)); \
} while (0)
/*
* Assert that KVM reports a sane, usable as-is XCR0. Architecturally, a CPU
* isn't strictly required to _support_ all XFeatures related to a feature, but
* at the same time XSETBV will #GP if bundled XFeatures aren't enabled and
* disabled coherently. E.g. a CPU can technically enumerate supported for
* XTILE_CFG but not XTILE_DATA, but attempting to enable XTILE_CFG without
* XTILE_DATA will #GP.
*/
#define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \
do { \
uint64_t __supported = (supported_xcr0) & (xfeatures); \
\
__GUEST_ASSERT(!__supported || __supported == (xfeatures), \
"supported = 0x%llx, xfeatures = 0x%llx", \
__supported, (xfeatures)); \
} while (0)
static void guest_code(void)
{
uint64_t xcr0_reset;
uint64_t supported_xcr0;
int i, vector;
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
xcr0_reset = xgetbv(0);
supported_xcr0 = this_cpu_supported_xcr0();
GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
/* Check AVX */
ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
XFEATURE_MASK_YMM,
XFEATURE_MASK_SSE);
/* Check MPX */
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
/* Check AVX-512 */
ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
XFEATURE_MASK_AVX512,
XFEATURE_MASK_SSE | XFEATURE_MASK_YMM);
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_AVX512);
/* Check AMX */
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_XTILE);
vector = xsetbv_safe(0, supported_xcr0);
__GUEST_ASSERT(!vector,
"Expected success on XSETBV(0x%llx), got vector '0x%x'",
supported_xcr0, vector);
for (i = 0; i < 64; i++) {
if (supported_xcr0 & BIT_ULL(i))
continue;
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'",
BIT_ULL(i), supported_xcr0, vector);
}
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Google LLC.
*
* Tests for adjusting the KVM clock from userspace
*/
#include <asm/kvm_para.h>
#include <asm/pvclock.h>
#include <asm/pvclock-abi.h>
#include <stdint.h>
#include <string.h>
#include <sys/stat.h>
#include <time.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
struct test_case {
uint64_t kvmclock_base;
int64_t realtime_offset;
};
static struct test_case test_cases[] = {
{ .kvmclock_base = 0 },
{ .kvmclock_base = 180 * NSEC_PER_SEC },
{ .kvmclock_base = 0, .realtime_offset = -180 * NSEC_PER_SEC },
{ .kvmclock_base = 0, .realtime_offset = 180 * NSEC_PER_SEC },
};
#define GUEST_SYNC_CLOCK(__stage, __val) \
GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0)
static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti)
{
int i;
wrmsr(MSR_KVM_SYSTEM_TIME_NEW, pvti_pa | KVM_MSR_ENABLED);
for (i = 0; i < ARRAY_SIZE(test_cases); i++)
GUEST_SYNC_CLOCK(i, __pvclock_read_cycles(pvti, rdtsc()));
}
#define EXPECTED_FLAGS (KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
static inline void assert_flags(struct kvm_clock_data *data)
{
TEST_ASSERT((data->flags & EXPECTED_FLAGS) == EXPECTED_FLAGS,
"unexpected clock data flags: %x (want set: %x)",
data->flags, EXPECTED_FLAGS);
}
static void handle_sync(struct ucall *uc, struct kvm_clock_data *start,
struct kvm_clock_data *end)
{
uint64_t obs, exp_lo, exp_hi;
obs = uc->args[2];
exp_lo = start->clock;
exp_hi = end->clock;
assert_flags(start);
assert_flags(end);
TEST_ASSERT(exp_lo <= obs && obs <= exp_hi,
"unexpected kvm-clock value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]",
obs, exp_lo, exp_hi);
pr_info("kvm-clock value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n",
obs, exp_lo, exp_hi);
}
static void handle_abort(struct ucall *uc)
{
REPORT_GUEST_ASSERT(*uc);
}
static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
{
struct kvm_clock_data data;
memset(&data, 0, sizeof(data));
data.clock = test_case->kvmclock_base;
if (test_case->realtime_offset) {
struct timespec ts;
int r;
data.flags |= KVM_CLOCK_REALTIME;
do {
r = clock_gettime(CLOCK_REALTIME, &ts);
if (!r)
break;
} while (errno == EINTR);
TEST_ASSERT(!r, "clock_gettime() failed: %d\n", r);
data.realtime = ts.tv_sec * NSEC_PER_SEC;
data.realtime += ts.tv_nsec;
data.realtime += test_case->realtime_offset;
}
vm_ioctl(vm, KVM_SET_CLOCK, &data);
}
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct kvm_clock_data start, end;
struct kvm_vm *vm = vcpu->vm;
struct ucall uc;
int i;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
setup_clock(vm, &test_cases[i]);
vm_ioctl(vm, KVM_GET_CLOCK, &start);
vcpu_run(vcpu);
vm_ioctl(vm, KVM_GET_CLOCK, &end);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, &start, &end);
break;
case UCALL_ABORT:
handle_abort(&uc);
return;
default:
TEST_ASSERT(0, "unhandled ucall: %ld\n", uc.cmd);
}
}
}
#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
static void check_clocksource(void)
{
char *clk_name;
struct stat st;
FILE *fp;
fp = fopen(CLOCKSOURCE_PATH, "r");
if (!fp) {
pr_info("failed to open clocksource file: %d; assuming TSC.\n",
errno);
return;
}
if (fstat(fileno(fp), &st)) {
pr_info("failed to stat clocksource file: %d; assuming TSC.\n",
errno);
goto out;
}
clk_name = malloc(st.st_size);
TEST_ASSERT(clk_name, "failed to allocate buffer to read file\n");
if (!fgets(clk_name, st.st_size, fp)) {
pr_info("failed to read clocksource file: %d; assuming TSC.\n",
ferror(fp));
goto out;
}
TEST_ASSERT(!strncmp(clk_name, "tsc\n", st.st_size),
"clocksource not supported: %s", clk_name);
out:
fclose(fp);
}
int main(void)
{
struct kvm_vcpu *vcpu;
vm_vaddr_t pvti_gva;
vm_paddr_t pvti_gpa;
struct kvm_vm *vm;
int flags;
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
check_clocksource();
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
pvti_gpa = addr_gva2gpa(vm, pvti_gva);
vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
enter_guest(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/kvm_clock_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_GET/SET_* tests
*
* Copyright (C) 2022, Red Hat, Inc.
*
* Tests for Hyper-V extensions to SVM.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <linux/bitmap.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#include "hyperv.h"
#define L2_GUEST_STACK_SIZE 256
/* Exit to L1 from L2 with RDMSR instruction */
static inline void rdmsr_from_l2(uint32_t msr)
{
/* Currently, L1 doesn't preserve GPRs during vmexits. */
__asm__ __volatile__ ("rdmsr" : : "c"(msr) :
"rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
}
void l2_guest_code(void)
{
u64 unused;
GUEST_SYNC(3);
/* Exit to L1 */
vmmcall();
/* MSR-Bitmap tests */
rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
vmmcall();
rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
GUEST_SYNC(5);
/* L2 TLB flush tests */
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS);
rdmsr_from_l2(MSR_FS_BASE);
/*
* Note: hypercall status (RAX) is not preserved correctly by L1 after
* synthetic vmexit, use unchecked version.
*/
__hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS, &unused);
/* Done, exit to L1 and never come back. */
vmmcall();
}
static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
struct hyperv_test_pages *hv_pages,
vm_vaddr_t pgs_gpa)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
GUEST_SYNC(1);
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
GUEST_ASSERT(svm->vmcb_gpa);
/* Prepare for L2 execution. */
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
/* L2 TLB flush setup */
hve->partition_assist_page = hv_pages->partition_assist_gpa;
hve->hv_enlightenments_control.nested_flush_hypercall = 1;
hve->hv_vm_id = 1;
hve->hv_vp_id = 1;
current_vp_assist->nested_control.features.directhypercall = 1;
*(u32 *)(hv_pages->partition_assist) = 0;
GUEST_SYNC(2);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(4);
vmcb->save.rip += 3;
/* Intercept RDMSR 0xc0000100 */
vmcb->control.intercept |= 1ULL << INTERCEPT_MSR_PROT;
__set_bit(2 * (MSR_FS_BASE & 0x1fff), svm->msr + 0x800);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
/* Enable enlightened MSR bitmap */
hve->hv_enlightenments_control.msr_bitmap = 1;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
/* Intercept RDMSR 0xc0000101 without telling KVM about it */
__set_bit(2 * (MSR_GS_BASE & 0x1fff), svm->msr + 0x800);
/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
vmcb->control.clean |= HV_VMCB_NESTED_ENLIGHTENMENTS;
run_guest(vmcb, svm->vmcb_gpa);
/* Make sure we don't see SVM_EXIT_MSR here so eMSR bitmap works */
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
vmcb->save.rip += 3; /* vmcall */
/* Now tell KVM we've changed MSR-Bitmap */
vmcb->control.clean &= ~HV_VMCB_NESTED_ENLIGHTENMENTS;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
/*
* L2 TLB flush test. First VMCALL should be handled directly by L0,
* no VMCALL exit expected.
*/
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
/* Enable synthetic vmexit */
*(u32 *)(hv_pages->partition_assist) = 1;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL);
GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(6);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int stage;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu);
vcpu_alloc_svm(vm, &nested_gva);
vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
hcall_page = vm_vaddr_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_GET/SET_* tests
*
* Copyright (C) 2018, Red Hat, Inc.
*
* Tests for vCPU state save/restore, including nested guest state.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include "svm_util.h"
#define L2_GUEST_STACK_SIZE 256
void svm_l2_guest_code(void)
{
GUEST_SYNC(4);
/* Exit to L1 */
vmcall();
GUEST_SYNC(6);
/* Done, exit to L1 and never come back. */
vmcall();
}
static void svm_l1_guest_code(struct svm_test_data *svm)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
GUEST_ASSERT(svm->vmcb_gpa);
/* Prepare for L2 execution. */
generic_svm_setup(svm, svm_l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(3);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(5);
vmcb->save.rip += 3;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(7);
}
void vmx_l2_guest_code(void)
{
GUEST_SYNC(6);
/* Exit to L1 */
vmcall();
/* L1 has now set up a shadow VMCS for us. */
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_SYNC(10);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
GUEST_SYNC(11);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
GUEST_SYNC(12);
/* Done, exit to L1 and never come back. */
vmcall();
}
static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_SYNC(3);
GUEST_ASSERT(load_vmcs(vmx_pages));
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_SYNC(4);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
prepare_vmcs(vmx_pages, vmx_l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
/* Check that the launched state is preserved. */
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_SYNC(7);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
GUEST_ASSERT(vmlaunch());
GUEST_SYNC(8);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
vmwrite(GUEST_RIP, 0xc0ffee);
GUEST_SYNC(9);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
GUEST_SYNC(13);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
}
static void __attribute__((__flatten__)) guest_code(void *arg)
{
GUEST_SYNC(1);
GUEST_SYNC(2);
if (arg) {
if (this_cpu_has(X86_FEATURE_SVM))
svm_l1_guest_code(arg);
else
vmx_l1_guest_code(arg);
}
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
struct kvm_regs regs1, regs2;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_x86_state *state;
struct ucall uc;
int stage;
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_regs_get(vcpu, ®s1);
if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_gva);
else if (kvm_cpu_has(X86_FEATURE_VMX))
vcpu_alloc_vmx(vm, &nested_gva);
}
if (!nested_gva)
pr_info("will skip nested state checks\n");
vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/state_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* svm_vmcall_test
*
* Copyright (C) 2020, Red Hat, Inc.
*
* Nested SVM testing: VMCALL
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
static void l2_guest_code(struct svm_test_data *svm)
{
__asm__ __volatile__("vmcall");
}
static void l1_guest_code(struct svm_test_data *svm)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
/* Prepare for L2 execution. */
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
vm_vaddr_t svm_gva;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 1, svm_gva);
for (;;) {
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_nested_tsc_scaling_test
*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* This test case verifies that nested TSC scaling behaves as expected when
* both L1 and L2 are scaled using different ratios. For this test we scale
* L1 down and scale L2 up.
*/
#include <time.h>
#include "kvm_util.h"
#include "vmx.h"
#include "kselftest.h"
/* L2 is scaled up (from L1's perspective) by this factor */
#define L2_SCALE_FACTOR 4ULL
#define TSC_OFFSET_L2 ((uint64_t) -33125236320908)
#define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48)
#define L2_GUEST_STACK_SIZE 64
enum { USLEEP, UCHECK_L1, UCHECK_L2 };
#define GUEST_SLEEP(sec) ucall(UCALL_SYNC, 2, USLEEP, sec)
#define GUEST_CHECK(level, freq) ucall(UCALL_SYNC, 2, level, freq)
/*
* This function checks whether the "actual" TSC frequency of a guest matches
* its expected frequency. In order to account for delays in taking the TSC
* measurements, a difference of 1% between the actual and the expected value
* is tolerated.
*/
static void compare_tsc_freq(uint64_t actual, uint64_t expected)
{
uint64_t tolerance, thresh_low, thresh_high;
tolerance = expected / 100;
thresh_low = expected - tolerance;
thresh_high = expected + tolerance;
TEST_ASSERT(thresh_low < actual,
"TSC freq is expected to be between %"PRIu64" and %"PRIu64
" but it actually is %"PRIu64,
thresh_low, thresh_high, actual);
TEST_ASSERT(thresh_high > actual,
"TSC freq is expected to be between %"PRIu64" and %"PRIu64
" but it actually is %"PRIu64,
thresh_low, thresh_high, actual);
}
static void check_tsc_freq(int level)
{
uint64_t tsc_start, tsc_end, tsc_freq;
/*
* Reading the TSC twice with about a second's difference should give
* us an approximation of the TSC frequency from the guest's
* perspective. Now, this won't be completely accurate, but it should
* be good enough for the purposes of this test.
*/
tsc_start = rdmsr(MSR_IA32_TSC);
GUEST_SLEEP(1);
tsc_end = rdmsr(MSR_IA32_TSC);
tsc_freq = tsc_end - tsc_start;
GUEST_CHECK(level, tsc_freq);
}
static void l2_guest_code(void)
{
check_tsc_freq(UCHECK_L2);
/* exit to L1 */
__asm__ __volatile__("vmcall");
}
static void l1_guest_code(struct vmx_pages *vmx_pages)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint32_t control;
/* check that L1's frequency looks alright before launching L2 */
check_tsc_freq(UCHECK_L1);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* prepare the VMCS for L2 execution */
prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
/* enable TSC offsetting and TSC scaling for L2 */
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
control |= SECONDARY_EXEC_TSC_SCALING;
vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
vmwrite(TSC_OFFSET, TSC_OFFSET_L2);
vmwrite(TSC_MULTIPLIER, TSC_MULTIPLIER_L2);
vmwrite(TSC_MULTIPLIER_HIGH, TSC_MULTIPLIER_L2 >> 32);
/* launch L2 */
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
/* check that L1's frequency still looks good */
check_tsc_freq(UCHECK_L1);
GUEST_DONE();
}
static bool system_has_stable_tsc(void)
{
bool tsc_is_stable;
FILE *fp;
char buf[4];
fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
if (fp == NULL)
return false;
tsc_is_stable = fgets(buf, sizeof(buf), fp) &&
!strncmp(buf, "tsc", sizeof(buf));
fclose(fp);
return tsc_is_stable;
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm_vaddr_t vmx_pages_gva;
uint64_t tsc_start, tsc_end;
uint64_t tsc_khz;
uint64_t l1_scale_factor;
uint64_t l0_tsc_freq = 0;
uint64_t l1_tsc_freq = 0;
uint64_t l2_tsc_freq = 0;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
TEST_REQUIRE(system_has_stable_tsc());
/*
* We set L1's scale factor to be a random number from 2 to 10.
* Ideally we would do the same for L2's factor but that one is
* referenced by both main() and l1_guest_code() and using a global
* variable does not work.
*/
srand(time(NULL));
l1_scale_factor = (rand() % 9) + 2;
printf("L1's scale down factor is: %"PRIu64"\n", l1_scale_factor);
printf("L2's scale up factor is: %llu\n", L2_SCALE_FACTOR);
tsc_start = rdtsc();
sleep(1);
tsc_end = rdtsc();
l0_tsc_freq = tsc_end - tsc_start;
printf("real TSC frequency is around: %"PRIu64"\n", l0_tsc_freq);
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL);
TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
/* scale down L1's TSC frequency */
vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor));
for (;;) {
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
case UCALL_SYNC:
switch (uc.args[0]) {
case USLEEP:
sleep(uc.args[1]);
break;
case UCHECK_L1:
l1_tsc_freq = uc.args[1];
printf("L1's TSC frequency is around: %"PRIu64
"\n", l1_tsc_freq);
compare_tsc_freq(l1_tsc_freq,
l0_tsc_freq / l1_scale_factor);
break;
case UCHECK_L2:
l2_tsc_freq = uc.args[1];
printf("L2's TSC frequency is around: %"PRIu64
"\n", l2_tsc_freq);
compare_tsc_freq(l2_tsc_freq,
l1_tsc_freq * L2_SCALE_FACTOR);
break;
}
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020, Google LLC.
*
* Tests for exiting into userspace on registered MSRs
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "vmx.h"
/* Forced emulation prefix, used to invoke the emulator unconditionally. */
#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
#define KVM_FEP_LENGTH 5
static int fep_available = 1;
#define MSR_NON_EXISTENT 0x474f4f00
static u64 deny_bits = 0;
struct kvm_msr_filter filter_allow = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.nmsrs = 1,
/* Test an MSR the kernel knows about. */
.base = MSR_IA32_XSS,
.bitmap = (uint8_t*)&deny_bits,
}, {
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.nmsrs = 1,
/* Test an MSR the kernel doesn't know about. */
.base = MSR_IA32_FLUSH_CMD,
.bitmap = (uint8_t*)&deny_bits,
}, {
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.nmsrs = 1,
/* Test a fabricated MSR that no one knows about. */
.base = MSR_NON_EXISTENT,
.bitmap = (uint8_t*)&deny_bits,
},
},
};
struct kvm_msr_filter filter_fs = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_FS_BASE,
.bitmap = (uint8_t*)&deny_bits,
},
},
};
struct kvm_msr_filter filter_gs = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_GS_BASE,
.bitmap = (uint8_t*)&deny_bits,
},
},
};
static uint64_t msr_non_existent_data;
static int guest_exception_count;
static u32 msr_reads, msr_writes;
static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_deadbeef[1] = { 0x1 };
static void deny_msr(uint8_t *bitmap, u32 msr)
{
u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
bitmap[idx / 8] &= ~(1 << (idx % 8));
}
static void prepare_bitmaps(void)
{
memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
}
struct kvm_msr_filter filter_deny = {
.flags = KVM_MSR_FILTER_DEFAULT_DENY,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0x00000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_00000000_write,
}, {
.flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
.base = 0x40000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_40000000,
}, {
.flags = KVM_MSR_FILTER_READ,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000_read,
}, {
.flags = KVM_MSR_FILTER_WRITE,
.base = 0xc0000000,
.nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
.bitmap = bitmap_c0000000,
}, {
.flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
.base = 0xdeadbeef,
.nmsrs = 1,
.bitmap = bitmap_deadbeef,
},
},
};
struct kvm_msr_filter no_filter_deny = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
};
/*
* Note: Force test_rdmsr() to not be inlined to prevent the labels,
* rdmsr_start and rdmsr_end, from being defined multiple times.
*/
static noinline uint64_t test_rdmsr(uint32_t msr)
{
uint32_t a, d;
guest_exception_count = 0;
__asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
"=a"(a), "=d"(d) : "c"(msr) : "memory");
return a | ((uint64_t) d << 32);
}
/*
* Note: Force test_wrmsr() to not be inlined to prevent the labels,
* wrmsr_start and wrmsr_end, from being defined multiple times.
*/
static noinline void test_wrmsr(uint32_t msr, uint64_t value)
{
uint32_t a = value;
uint32_t d = value >> 32;
guest_exception_count = 0;
__asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
"a"(a), "d"(d), "c"(msr) : "memory");
}
extern char rdmsr_start, rdmsr_end;
extern char wrmsr_start, wrmsr_end;
/*
* Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
* rdmsr_start and rdmsr_end, from being defined multiple times.
*/
static noinline uint64_t test_em_rdmsr(uint32_t msr)
{
uint32_t a, d;
guest_exception_count = 0;
__asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
"=a"(a), "=d"(d) : "c"(msr) : "memory");
return a | ((uint64_t) d << 32);
}
/*
* Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
* wrmsr_start and wrmsr_end, from being defined multiple times.
*/
static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
{
uint32_t a = value;
uint32_t d = value >> 32;
guest_exception_count = 0;
__asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
"a"(a), "d"(d), "c"(msr) : "memory");
}
extern char em_rdmsr_start, em_rdmsr_end;
extern char em_wrmsr_start, em_wrmsr_end;
static void guest_code_filter_allow(void)
{
uint64_t data;
/*
* Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
*
* A GP is thrown if anything other than 0 is written to
* MSR_IA32_XSS.
*/
data = test_rdmsr(MSR_IA32_XSS);
GUEST_ASSERT(data == 0);
GUEST_ASSERT(guest_exception_count == 0);
test_wrmsr(MSR_IA32_XSS, 0);
GUEST_ASSERT(guest_exception_count == 0);
test_wrmsr(MSR_IA32_XSS, 1);
GUEST_ASSERT(guest_exception_count == 1);
/*
* Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
*
* A GP is thrown if MSR_IA32_FLUSH_CMD is read
* from or if a value other than 1 is written to it.
*/
test_rdmsr(MSR_IA32_FLUSH_CMD);
GUEST_ASSERT(guest_exception_count == 1);
test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
GUEST_ASSERT(guest_exception_count == 1);
test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
GUEST_ASSERT(guest_exception_count == 0);
/*
* Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
*
* Test that a fabricated MSR can pass through the kernel
* and be handled in userspace.
*/
test_wrmsr(MSR_NON_EXISTENT, 2);
GUEST_ASSERT(guest_exception_count == 0);
data = test_rdmsr(MSR_NON_EXISTENT);
GUEST_ASSERT(data == 2);
GUEST_ASSERT(guest_exception_count == 0);
/*
* Test to see if the instruction emulator is available (ie: the module
* parameter 'kvm.force_emulation_prefix=1' is set). This instruction
* will #UD if it isn't available.
*/
__asm__ __volatile__(KVM_FEP "nop");
if (fep_available) {
/* Let userspace know we aren't done. */
GUEST_SYNC(0);
/*
* Now run the same tests with the instruction emulator.
*/
data = test_em_rdmsr(MSR_IA32_XSS);
GUEST_ASSERT(data == 0);
GUEST_ASSERT(guest_exception_count == 0);
test_em_wrmsr(MSR_IA32_XSS, 0);
GUEST_ASSERT(guest_exception_count == 0);
test_em_wrmsr(MSR_IA32_XSS, 1);
GUEST_ASSERT(guest_exception_count == 1);
test_em_rdmsr(MSR_IA32_FLUSH_CMD);
GUEST_ASSERT(guest_exception_count == 1);
test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
GUEST_ASSERT(guest_exception_count == 1);
test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
GUEST_ASSERT(guest_exception_count == 0);
test_em_wrmsr(MSR_NON_EXISTENT, 2);
GUEST_ASSERT(guest_exception_count == 0);
data = test_em_rdmsr(MSR_NON_EXISTENT);
GUEST_ASSERT(data == 2);
GUEST_ASSERT(guest_exception_count == 0);
}
GUEST_DONE();
}
static void guest_msr_calls(bool trapped)
{
/* This goes into the in-kernel emulation */
wrmsr(MSR_SYSCALL_MASK, 0);
if (trapped) {
/* This goes into user space emulation */
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
} else {
GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
}
/* If trapped == true, this goes into user space emulation */
wrmsr(MSR_IA32_POWER_CTL, 0x1234);
/* This goes into the in-kernel emulation */
rdmsr(MSR_IA32_POWER_CTL);
/* Invalid MSR, should always be handled by user space exit */
GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
wrmsr(0xdeadbeef, 0x1234);
}
static void guest_code_filter_deny(void)
{
guest_msr_calls(true);
/*
* Disable msr filtering, so that the kernel
* handles everything in the next round
*/
GUEST_SYNC(0);
guest_msr_calls(false);
GUEST_DONE();
}
static void guest_code_permission_bitmap(void)
{
uint64_t data;
data = test_rdmsr(MSR_FS_BASE);
GUEST_ASSERT(data == MSR_FS_BASE);
data = test_rdmsr(MSR_GS_BASE);
GUEST_ASSERT(data != MSR_GS_BASE);
/* Let userspace know to switch the filter */
GUEST_SYNC(0);
data = test_rdmsr(MSR_FS_BASE);
GUEST_ASSERT(data != MSR_FS_BASE);
data = test_rdmsr(MSR_GS_BASE);
GUEST_ASSERT(data == MSR_GS_BASE);
GUEST_DONE();
}
static void __guest_gp_handler(struct ex_regs *regs,
char *r_start, char *r_end,
char *w_start, char *w_end)
{
if (regs->rip == (uintptr_t)r_start) {
regs->rip = (uintptr_t)r_end;
regs->rax = 0;
regs->rdx = 0;
} else if (regs->rip == (uintptr_t)w_start) {
regs->rip = (uintptr_t)w_end;
} else {
GUEST_ASSERT(!"RIP is at an unknown location!");
}
++guest_exception_count;
}
static void guest_gp_handler(struct ex_regs *regs)
{
__guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
&wrmsr_start, &wrmsr_end);
}
static void guest_fep_gp_handler(struct ex_regs *regs)
{
__guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
&em_wrmsr_start, &em_wrmsr_end);
}
static void guest_ud_handler(struct ex_regs *regs)
{
fep_available = 0;
regs->rip += KVM_FEP_LENGTH;
}
static void check_for_guest_assert(struct kvm_vcpu *vcpu)
{
struct ucall uc;
if (vcpu->run->exit_reason == KVM_EXIT_IO &&
get_ucall(vcpu, &uc) == UCALL_ABORT) {
REPORT_GUEST_ASSERT(uc);
}
}
static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{
struct kvm_run *run = vcpu->run;
check_for_guest_assert(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_RDMSR);
TEST_ASSERT(run->msr.index == msr_index,
"Unexpected msr (0x%04x), expected 0x%04x",
run->msr.index, msr_index);
switch (run->msr.index) {
case MSR_IA32_XSS:
run->msr.data = 0;
break;
case MSR_IA32_FLUSH_CMD:
run->msr.error = 1;
break;
case MSR_NON_EXISTENT:
run->msr.data = msr_non_existent_data;
break;
case MSR_FS_BASE:
run->msr.data = MSR_FS_BASE;
break;
case MSR_GS_BASE:
run->msr.data = MSR_GS_BASE;
break;
default:
TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
}
}
static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{
struct kvm_run *run = vcpu->run;
check_for_guest_assert(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_WRMSR);
TEST_ASSERT(run->msr.index == msr_index,
"Unexpected msr (0x%04x), expected 0x%04x",
run->msr.index, msr_index);
switch (run->msr.index) {
case MSR_IA32_XSS:
if (run->msr.data != 0)
run->msr.error = 1;
break;
case MSR_IA32_FLUSH_CMD:
if (run->msr.data != 1)
run->msr.error = 1;
break;
case MSR_NON_EXISTENT:
msr_non_existent_data = run->msr.data;
break;
default:
TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
}
}
static void process_ucall_done(struct kvm_vcpu *vcpu)
{
struct ucall uc;
check_for_guest_assert(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE);
}
static uint64_t process_ucall(struct kvm_vcpu *vcpu)
{
struct ucall uc = {};
check_for_guest_assert(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
break;
case UCALL_ABORT:
check_for_guest_assert(vcpu);
break;
case UCALL_DONE:
process_ucall_done(vcpu);
break;
default:
TEST_ASSERT(false, "Unexpected ucall");
}
return uc.cmd;
}
static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{
vcpu_run(vcpu);
process_rdmsr(vcpu, msr_index);
}
static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{
vcpu_run(vcpu);
process_wrmsr(vcpu, msr_index);
}
static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
return process_ucall(vcpu);
}
static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
process_ucall_done(vcpu);
}
static void test_msr_filter_allow(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int rc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
/* Process guest code userspace exits. */
run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vcpu_run(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, NULL);
if (process_ucall(vcpu) != UCALL_DONE) {
vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
/* Process emulated rdmsr and wrmsr instructions. */
run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
/* Confirm the guest completed without issues. */
run_guest_then_process_ucall_done(vcpu);
} else {
printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
}
kvm_vm_free(vm);
}
static int handle_ucall(struct kvm_vcpu *vcpu)
{
struct ucall uc;
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_SYNC:
vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
break;
case UCALL_DONE:
return 1;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
return 0;
}
static void handle_rdmsr(struct kvm_run *run)
{
run->msr.data = run->msr.index;
msr_reads++;
if (run->msr.index == MSR_SYSCALL_MASK ||
run->msr.index == MSR_GS_BASE) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR read trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"MSR deadbeef read trap w/o inval fault");
}
}
static void handle_wrmsr(struct kvm_run *run)
{
/* ignore */
msr_writes++;
if (run->msr.index == MSR_IA32_POWER_CTL) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for MSR_IA32_POWER_CTL incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
"MSR_IA32_POWER_CTL trap w/o access fault");
}
if (run->msr.index == 0xdeadbeef) {
TEST_ASSERT(run->msr.data == 0x1234,
"MSR data for deadbeef incorrect");
TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
"deadbeef trap w/o inval fault");
}
}
static void test_msr_filter_deny(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny);
run = vcpu->run;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
prepare_bitmaps();
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) {
vcpu_run(vcpu);
switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR:
handle_rdmsr(run);
break;
case KVM_EXIT_X86_WRMSR:
handle_wrmsr(run);
break;
case KVM_EXIT_IO:
if (handle_ucall(vcpu))
goto done;
break;
}
}
done:
TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
kvm_vm_free(vm);
}
static void test_msr_permission_bitmap(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int rc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
"Expected ucall state to be UCALL_SYNC.");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
run_guest_then_process_ucall_done(vcpu);
kvm_vm_free(vm);
}
#define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
({ \
int r = __vm_ioctl(vm, cmd, arg); \
\
if (flag & valid_mask) \
TEST_ASSERT(!r, __KVM_IOCTL_ERROR(#cmd, r)); \
else \
TEST_ASSERT(r == -1 && errno == EINVAL, \
"Wanted EINVAL for %s with flag = 0x%llx, got rc: %i errno: %i (%s)", \
#cmd, flag, r, errno, strerror(errno)); \
})
static void run_user_space_msr_flag_test(struct kvm_vm *vm)
{
struct kvm_enable_cap cap = { .cap = KVM_CAP_X86_USER_SPACE_MSR };
int nflags = sizeof(cap.args[0]) * BITS_PER_BYTE;
int rc;
int i;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
for (i = 0; i < nflags; i++) {
cap.args[0] = BIT_ULL(i);
test_user_exit_msr_ioctl(vm, KVM_ENABLE_CAP, &cap,
BIT_ULL(i), KVM_MSR_EXIT_REASON_VALID_MASK);
}
}
static void run_msr_filter_flag_test(struct kvm_vm *vm)
{
u64 deny_bits = 0;
struct kvm_msr_filter filter = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
.ranges = {
{
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = 0,
.bitmap = (uint8_t *)&deny_bits,
},
},
};
int nflags;
int rc;
int i;
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
nflags = sizeof(filter.flags) * BITS_PER_BYTE;
for (i = 0; i < nflags; i++) {
filter.flags = BIT_ULL(i);
test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
BIT_ULL(i), KVM_MSR_FILTER_VALID_MASK);
}
filter.flags = KVM_MSR_FILTER_DEFAULT_ALLOW;
nflags = sizeof(filter.ranges[0].flags) * BITS_PER_BYTE;
for (i = 0; i < nflags; i++) {
filter.ranges[0].flags = BIT_ULL(i);
test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
BIT_ULL(i), KVM_MSR_FILTER_RANGE_VALID_MASK);
}
}
/* Test that attempts to write to the unused bits in a flag fails. */
static void test_user_exit_msr_flags(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
/* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
run_user_space_msr_flag_test(vm);
/* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
run_msr_filter_flag_test(vm);
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
test_msr_filter_allow();
test_msr_filter_deny();
test_msr_permission_bitmap();
test_user_exit_msr_flags();
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include <stdio.h>
#include <string.h>
#include "kvm_util.h"
#include "processor.h"
#define UNITY (1ull << 30)
#define HOST_ADJUST (UNITY * 64)
#define GUEST_STEP (UNITY * 4)
#define ROUND(x) ((x + UNITY / 2) & -UNITY)
#define rounded_rdmsr(x) ROUND(rdmsr(x))
#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
static void guest_code(void)
{
u64 val = 0;
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
val = 1ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
GUEST_SYNC(2);
val = 2ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC_ADJUST, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Host: setting the TSC offset. */
GUEST_SYNC(3);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
* host-side offset and affect both MSRs.
*/
GUEST_SYNC(4);
val = 3ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC_ADJUST, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
* offset is now visible in MSR_IA32_TSC_ADJUST.
*/
GUEST_SYNC(5);
val = 4ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
GUEST_DONE();
}
static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
if (!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1)
ksft_test_result_pass("stage %d passed\n", stage + 1);
else
ksft_test_result_fail(
"stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]);
return;
case UCALL_DONE:
ksft_test_result_pass("stage %d passed\n", stage + 1);
return;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
}
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t val;
ksft_print_header();
ksft_set_plan(5);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
val = 0;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
run_vcpu(vcpu, 1);
val = 1ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
run_vcpu(vcpu, 2);
val = 2ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST.
*/
vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
* host-side offset and affect both MSRs.
*/
run_vcpu(vcpu, 4);
val = 3ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
* offset is now visible in MSR_IA32_TSC_ADJUST.
*/
run_vcpu(vcpu, 5);
val = 4ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
kvm_vm_free(vm);
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty logging page splitting test
*
* Based on dirty_log_perf.c
*
* Copyright (C) 2018, Red Hat, Inc.
* Copyright (C) 2023, Google, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <linux/bitmap.h>
#include "kvm_util.h"
#include "test_util.h"
#include "memstress.h"
#include "guest_modes.h"
#define VCPUS 2
#define SLOTS 2
#define ITERATIONS 2
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB;
static u64 dirty_log_manual_caps;
static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
struct kvm_page_stats {
uint64_t pages_4k;
uint64_t pages_2m;
uint64_t pages_1g;
uint64_t hugepages;
};
static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
{
stats->pages_4k = vm_get_stat(vm, "pages_4k");
stats->pages_2m = vm_get_stat(vm, "pages_2m");
stats->pages_1g = vm_get_stat(vm, "pages_1g");
stats->hugepages = stats->pages_2m + stats->pages_1g;
pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n",
stage, stats->pages_4k, stats->pages_2m, stats->pages_1g,
stats->hugepages);
}
static void run_vcpu_iteration(struct kvm_vm *vm)
{
int i;
iteration++;
for (i = 0; i < VCPUS; i++) {
while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
iteration)
;
}
}
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
while (!READ_ONCE(host_quit)) {
int current_iteration = READ_ONCE(iteration);
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
/* Wait for the start of the next iteration to be signaled. */
while (current_iteration == READ_ONCE(iteration) &&
READ_ONCE(iteration) >= 0 &&
!READ_ONCE(host_quit))
;
}
}
static void run_test(enum vm_guest_mode mode, void *unused)
{
struct kvm_vm *vm;
unsigned long **bitmaps;
uint64_t guest_num_pages;
uint64_t host_num_pages;
uint64_t pages_per_slot;
int i;
uint64_t total_4k_pages;
struct kvm_page_stats stats_populated;
struct kvm_page_stats stats_dirty_logging_enabled;
struct kvm_page_stats stats_dirty_pass[ITERATIONS];
struct kvm_page_stats stats_clear_pass[ITERATIONS];
struct kvm_page_stats stats_dirty_logging_disabled;
struct kvm_page_stats stats_repopulated;
vm = memstress_create_vm(mode, VCPUS, guest_percpu_mem_size,
SLOTS, backing_src, false);
guest_num_pages = (VCPUS * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / SLOTS;
bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
if (dirty_log_manual_caps)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
dirty_log_manual_caps);
/* Start the iterations */
iteration = -1;
host_quit = false;
for (i = 0; i < VCPUS; i++)
vcpu_last_completed_iteration[i] = -1;
memstress_start_vcpu_threads(VCPUS, vcpu_worker);
run_vcpu_iteration(vm);
get_page_stats(vm, &stats_populated, "populating memory");
/* Enable dirty logging */
memstress_enable_dirty_logging(vm, SLOTS);
get_page_stats(vm, &stats_dirty_logging_enabled, "enabling dirty logging");
while (iteration < ITERATIONS) {
run_vcpu_iteration(vm);
get_page_stats(vm, &stats_dirty_pass[iteration - 1],
"dirtying memory");
memstress_get_dirty_log(vm, bitmaps, SLOTS);
if (dirty_log_manual_caps) {
memstress_clear_dirty_log(vm, bitmaps, SLOTS, pages_per_slot);
get_page_stats(vm, &stats_clear_pass[iteration - 1], "clearing dirty log");
}
}
/* Disable dirty logging */
memstress_disable_dirty_logging(vm, SLOTS);
get_page_stats(vm, &stats_dirty_logging_disabled, "disabling dirty logging");
/* Run vCPUs again to fault pages back in. */
run_vcpu_iteration(vm);
get_page_stats(vm, &stats_repopulated, "repopulating memory");
/*
* Tell the vCPU threads to quit. No need to manually check that vCPUs
* have stopped running after disabling dirty logging, the join will
* wait for them to exit.
*/
host_quit = true;
memstress_join_vcpu_threads(VCPUS);
memstress_free_bitmaps(bitmaps, SLOTS);
memstress_destroy_vm(vm);
/* Make assertions about the page counts. */
total_4k_pages = stats_populated.pages_4k;
total_4k_pages += stats_populated.pages_2m * 512;
total_4k_pages += stats_populated.pages_1g * 512 * 512;
/*
* Check that all huge pages were split. Since large pages can only
* exist in the data slot, and the vCPUs should have dirtied all pages
* in the data slot, there should be no huge pages left after splitting.
* Splitting happens at dirty log enable time without
* KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and after the first clear pass
* with that capability.
*/
if (dirty_log_manual_caps) {
TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
} else {
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
}
/*
* Once dirty logging is disabled and the vCPUs have touched all their
* memory again, the page counts should be the same as they were
* right after initial population of memory.
*/
TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-b vcpu bytes] [-s mem type]\n",
name);
puts("");
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
backing_src_help("-s");
puts("");
}
int main(int argc, char *argv[])
{
int opt;
TEST_REQUIRE(get_kvm_param_bool("eager_page_split"));
TEST_REQUIRE(get_kvm_param_bool("tdp_mmu"));
while ((opt = getopt(argc, argv, "b:hs:")) != -1) {
switch (opt) {
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 'h':
help(argv[0]);
exit(0);
case 's':
backing_src = parse_backing_src_type(optarg);
break;
default:
help(argv[0]);
exit(1);
}
}
if (!is_backing_src_hugetlb(backing_src)) {
pr_info("This test will only work reliably with HugeTLB memory. "
"It can work with THP, but that is best effort.\n");
}
guest_modes_append_default();
dirty_log_manual_caps = 0;
for_each_guest_mode(run_test, NULL);
dirty_log_manual_caps =
kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
if (dirty_log_manual_caps) {
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
for_each_guest_mode(run_test, NULL);
} else {
pr_info("Skipping testing with MANUAL_PROTECT as it is not supported");
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMX-preemption timer test
*
* Copyright (C) 2020, Google, LLC.
*
* Test to ensure the VM-Enter after migration doesn't
* incorrectly restarts the timer with the full timer
* value instead of partially decayed timer value
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#define PREEMPTION_TIMER_VALUE 100000000ull
#define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
u32 vmx_pt_rate;
bool l2_save_restore_done;
static u64 l2_vmx_pt_start;
volatile u64 l2_vmx_pt_finish;
union vmx_basic basic;
union vmx_ctrl_msr ctrl_pin_rev;
union vmx_ctrl_msr ctrl_exit_rev;
void l2_guest_code(void)
{
u64 vmx_pt_delta;
vmcall();
l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
/*
* Wait until the 1st threshold has passed
*/
do {
l2_vmx_pt_finish = rdtsc();
vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >>
vmx_pt_rate;
} while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1);
/*
* Force L2 through Save and Restore cycle
*/
GUEST_SYNC(1);
l2_save_restore_done = 1;
/*
* Now wait for the preemption timer to fire and
* exit to L1
*/
while ((l2_vmx_pt_finish = rdtsc()))
;
}
void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
u64 l1_vmx_pt_start;
u64 l1_vmx_pt_finish;
u64 l1_tsc_deadline, l2_tsc_deadline;
GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
/*
* Check for Preemption timer support
*/
basic.val = rdmsr(MSR_IA32_VMX_BASIC);
ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS
: MSR_IA32_VMX_PINBASED_CTLS);
ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS
: MSR_IA32_VMX_EXIT_CTLS);
if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) ||
!(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
return;
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
/*
* Turn on PIN control and resume the guest
*/
GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL,
vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
PIN_BASED_VMX_PREEMPTION_TIMER));
GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE,
PREEMPTION_TIMER_VALUE));
vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
l2_save_restore_done = 0;
l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
GUEST_ASSERT(!vmresume());
l1_vmx_pt_finish = rdtsc();
/*
* Ensure exit from L2 happens after L2 goes through
* save and restore
*/
GUEST_ASSERT(l2_save_restore_done);
/*
* Ensure the exit from L2 is due to preemption timer expiry
*/
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER);
l1_tsc_deadline = l1_vmx_pt_start +
(PREEMPTION_TIMER_VALUE << vmx_pt_rate);
l2_tsc_deadline = l2_vmx_pt_start +
(PREEMPTION_TIMER_VALUE << vmx_pt_rate);
/*
* Sync with the host and pass the l1|l2 pt_expiry_finish times and
* tsc deadlines so that host can verify they are as expected
*/
GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline,
l2_vmx_pt_finish, l2_tsc_deadline);
}
void guest_code(struct vmx_pages *vmx_pages)
{
if (vmx_pages)
l1_guest_code(vmx_pages);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
struct kvm_regs regs1, regs2;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
struct kvm_x86_state *state;
struct ucall uc;
int stage;
/*
* AMD currently does not implement any VMX features, so for now we
* just early out.
*/
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_regs_get(vcpu, ®s1);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
/*
* If this stage 2 then we should verify the vmx pt expiry
* is as expected.
* From L1's perspective verify Preemption timer hasn't
* expired too early.
* From L2's perspective verify Preemption timer hasn't
* expired too late.
*/
if (stage == 2) {
pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
stage, uc.args[2], uc.args[3]);
pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
stage, uc.args[4], uc.args[5]);
TEST_ASSERT(uc.args[2] >= uc.args[3],
"Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
stage, uc.args[2], uc.args[3]);
TEST_ASSERT(uc.args[4] < uc.args[5],
"Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
stage, uc.args[4], uc.args[5]);
}
state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that KVM_SET_BOOT_CPU_ID works as intended
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "apic.h"
static void guest_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
GUEST_ASSERT_NE(get_bsp_flag(), 0);
GUEST_DONE();
}
static void guest_not_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
GUEST_ASSERT_EQ(get_bsp_flag(), 0);
GUEST_DONE();
}
static void test_set_bsp_busy(struct kvm_vcpu *vcpu, const char *msg)
{
int r = __vm_ioctl(vcpu->vm, KVM_SET_BOOT_CPU_ID,
(void *)(unsigned long)vcpu->id);
TEST_ASSERT(r == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set %s", msg);
}
static void run_vcpu(struct kvm_vcpu *vcpu)
{
struct ucall uc;
int stage;
for (stage = 0; stage < 2; stage++) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
"Stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]);
test_set_bsp_busy(vcpu, "while running vm");
break;
case UCALL_DONE:
TEST_ASSERT(stage == 1,
"Expected GUEST_DONE in stage 2, got stage %d",
stage);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
}
}
}
static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
uint32_t i;
vm = vm_create(nr_vcpus);
vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id);
for (i = 0; i < nr_vcpus; i++)
vcpus[i] = vm_vcpu_add(vm, i, i == bsp_vcpu_id ? guest_bsp_vcpu :
guest_not_bsp_vcpu);
return vm;
}
static void run_vm_bsp(uint32_t bsp_vcpu_id)
{
struct kvm_vcpu *vcpus[2];
struct kvm_vm *vm;
vm = create_vm(ARRAY_SIZE(vcpus), bsp_vcpu_id, vcpus);
run_vcpu(vcpus[0]);
run_vcpu(vcpus[1]);
kvm_vm_free(vm);
}
static void check_set_bsp_busy(void)
{
struct kvm_vcpu *vcpus[2];
struct kvm_vm *vm;
vm = create_vm(ARRAY_SIZE(vcpus), 0, vcpus);
test_set_bsp_busy(vcpus[1], "after adding vcpu");
run_vcpu(vcpus[0]);
run_vcpu(vcpus[1]);
test_set_bsp_busy(vcpus[1], "to a terminated vcpu");
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID));
run_vm_bsp(0);
run_vm_bsp(1);
run_vm_bsp(0);
check_set_bsp_busy();
}
| linux-master | tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* svm_vmcall_test
*
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
*
* Xen shared_info / pvclock testing
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include <stdint.h>
#include <time.h>
#include <sched.h>
#include <signal.h>
#include <pthread.h>
#include <sys/eventfd.h>
#define SHINFO_REGION_GVA 0xc0000000ULL
#define SHINFO_REGION_GPA 0xc0000000ULL
#define SHINFO_REGION_SLOT 10
#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (3 * PAGE_SIZE))
#define DUMMY_REGION_SLOT 11
#define DUMMY_REGION_GPA_2 (SHINFO_REGION_GPA + (4 * PAGE_SIZE))
#define DUMMY_REGION_SLOT_2 12
#define SHINFO_ADDR (SHINFO_REGION_GPA)
#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
#define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - 15)
#define SHINFO_VADDR (SHINFO_REGION_GVA)
#define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40)
#define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + PAGE_SIZE - 15)
#define EVTCHN_VECTOR 0x10
#define EVTCHN_TEST1 15
#define EVTCHN_TEST2 66
#define EVTCHN_TIMER 13
enum {
TEST_INJECT_VECTOR = 0,
TEST_RUNSTATE_runnable,
TEST_RUNSTATE_blocked,
TEST_RUNSTATE_offline,
TEST_RUNSTATE_ADJUST,
TEST_RUNSTATE_DATA,
TEST_STEAL_TIME,
TEST_EVTCHN_MASKED,
TEST_EVTCHN_UNMASKED,
TEST_EVTCHN_SLOWPATH,
TEST_EVTCHN_SEND_IOCTL,
TEST_EVTCHN_HCALL,
TEST_EVTCHN_HCALL_SLOWPATH,
TEST_EVTCHN_HCALL_EVENTFD,
TEST_TIMER_SETUP,
TEST_TIMER_WAIT,
TEST_TIMER_RESTORE,
TEST_POLL_READY,
TEST_POLL_TIMEOUT,
TEST_POLL_MASKED,
TEST_POLL_WAKE,
TEST_TIMER_PAST,
TEST_LOCKING_SEND_RACE,
TEST_LOCKING_POLL_RACE,
TEST_LOCKING_POLL_TIMEOUT,
TEST_DONE,
TEST_GUEST_SAW_IRQ,
};
#define XEN_HYPERCALL_MSR 0x40000000
#define MIN_STEAL_TIME 50000
#define SHINFO_RACE_TIMEOUT 2 /* seconds */
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_event_channel_op 32
#define SCHEDOP_poll 3
#define EVTCHNOP_send 4
#define EVTCHNSTAT_interdomain 2
struct evtchn_send {
u32 port;
};
struct sched_poll {
u32 *ports;
unsigned int nr_ports;
u64 timeout;
};
struct pvclock_vcpu_time_info {
u32 version;
u32 pad0;
u64 tsc_timestamp;
u64 system_time;
u32 tsc_to_system_mul;
s8 tsc_shift;
u8 flags;
u8 pad[2];
} __attribute__((__packed__)); /* 32 bytes */
struct pvclock_wall_clock {
u32 version;
u32 sec;
u32 nsec;
} __attribute__((__packed__));
struct vcpu_runstate_info {
uint32_t state;
uint64_t state_entry_time;
uint64_t time[5]; /* Extra field for overrun check */
};
struct compat_vcpu_runstate_info {
uint32_t state;
uint64_t state_entry_time;
uint64_t time[5];
} __attribute__((__packed__));;
struct arch_vcpu_info {
unsigned long cr2;
unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
};
struct vcpu_info {
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
struct shared_info {
struct vcpu_info vcpu_info[32];
unsigned long evtchn_pending[64];
unsigned long evtchn_mask[64];
struct pvclock_wall_clock wc;
uint32_t wc_sec_hi;
/* arch_shared_info here */
};
#define RUNSTATE_running 0
#define RUNSTATE_runnable 1
#define RUNSTATE_blocked 2
#define RUNSTATE_offline 3
static const char *runstate_names[] = {
"running",
"runnable",
"blocked",
"offline"
};
struct {
struct kvm_irq_routing info;
struct kvm_irq_routing_entry entries[2];
} irq_routes;
static volatile bool guest_saw_irq;
static void evtchn_handler(struct ex_regs *regs)
{
struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
vi->evtchn_upcall_pending = 0;
vi->evtchn_pending_sel = 0;
guest_saw_irq = true;
GUEST_SYNC(TEST_GUEST_SAW_IRQ);
}
static void guest_wait_for_irq(void)
{
while (!guest_saw_irq)
__asm__ __volatile__ ("rep nop" : : : "memory");
guest_saw_irq = false;
}
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
int i;
__asm__ __volatile__(
"sti\n"
"nop\n"
);
/* Trigger an interrupt injection */
GUEST_SYNC(TEST_INJECT_VECTOR);
guest_wait_for_irq();
/* Test having the host set runstates manually */
GUEST_SYNC(TEST_RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
GUEST_ASSERT(rs->state == 0);
GUEST_SYNC(TEST_RUNSTATE_blocked);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] != 0);
GUEST_ASSERT(rs->state == 0);
GUEST_SYNC(TEST_RUNSTATE_offline);
GUEST_ASSERT(rs->time[RUNSTATE_offline] != 0);
GUEST_ASSERT(rs->state == 0);
/* Test runstate time adjust */
GUEST_SYNC(TEST_RUNSTATE_ADJUST);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b);
/* Test runstate time set */
GUEST_SYNC(TEST_RUNSTATE_DATA);
GUEST_ASSERT(rs->state_entry_time >= 0x8000);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a);
/* sched_yield() should result in some 'runnable' time */
GUEST_SYNC(TEST_STEAL_TIME);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME);
/* Attempt to deliver a *masked* interrupt */
GUEST_SYNC(TEST_EVTCHN_MASKED);
/* Wait until we see the bit set */
struct shared_info *si = (void *)SHINFO_VADDR;
while (!si->evtchn_pending[0])
__asm__ __volatile__ ("rep nop" : : : "memory");
/* Now deliver an *unmasked* interrupt */
GUEST_SYNC(TEST_EVTCHN_UNMASKED);
guest_wait_for_irq();
/* Change memslots and deliver an interrupt */
GUEST_SYNC(TEST_EVTCHN_SLOWPATH);
guest_wait_for_irq();
/* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */
GUEST_SYNC(TEST_EVTCHN_SEND_IOCTL);
guest_wait_for_irq();
GUEST_SYNC(TEST_EVTCHN_HCALL);
/* Our turn. Deliver event channel (to ourselves) with
* EVTCHNOP_send hypercall. */
struct evtchn_send s = { .port = 127 };
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH);
/*
* Same again, but this time the host has messed with memslots so it
* should take the slow path in kvm_xen_set_evtchn().
*/
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD);
/* Deliver "outbound" event channel to an eventfd which
* happens to be one of our own irqfds. */
s.port = 197;
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_SETUP);
/* Set a timer 100ms in the future. */
xen_hypercall(__HYPERVISOR_set_timer_op,
rs->state_entry_time + 100000000, NULL);
GUEST_SYNC(TEST_TIMER_WAIT);
/* Now wait for the timer */
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_RESTORE);
/* The host has 'restored' the timer. Just wait for it. */
guest_wait_for_irq();
GUEST_SYNC(TEST_POLL_READY);
/* Poll for an event channel port which is already set */
u32 ports[1] = { EVTCHN_TIMER };
struct sched_poll p = {
.ports = ports,
.nr_ports = 1,
.timeout = 0,
};
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_SYNC(TEST_POLL_TIMEOUT);
/* Poll for an unset port and wait for the timeout. */
p.timeout = 100000000;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_SYNC(TEST_POLL_MASKED);
/* A timer will wake the masked port we're waiting on, while we poll */
p.timeout = 0;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_SYNC(TEST_POLL_WAKE);
/* A timer wake an *unmasked* port which should wake us with an
* actual interrupt, while we're polling on a different port. */
ports[0]++;
p.timeout = 0;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_PAST);
/* Timer should have fired already */
guest_wait_for_irq();
GUEST_SYNC(TEST_LOCKING_SEND_RACE);
/* Racing host ioctls */
guest_wait_for_irq();
GUEST_SYNC(TEST_LOCKING_POLL_RACE);
/* Racing vmcall against host ioctl */
ports[0] = 0;
p = (struct sched_poll) {
.ports = ports,
.nr_ports = 1,
.timeout = 0
};
wait_for_timer:
/*
* Poll for a timer wake event while the worker thread is mucking with
* the shared info. KVM XEN drops timer IRQs if the shared info is
* invalid when the timer expires. Arbitrarily poll 100 times before
* giving up and asking the VMM to re-arm the timer. 100 polls should
* consume enough time to beat on KVM without taking too long if the
* timer IRQ is dropped due to an invalid event channel.
*/
for (i = 0; i < 100 && !guest_saw_irq; i++)
__xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
/*
* Re-send the timer IRQ if it was (likely) dropped due to the timer
* expiring while the event channel was invalid.
*/
if (!guest_saw_irq) {
GUEST_SYNC(TEST_LOCKING_POLL_TIMEOUT);
goto wait_for_timer;
}
guest_saw_irq = false;
GUEST_SYNC(TEST_DONE);
}
static int cmp_timespec(struct timespec *a, struct timespec *b)
{
if (a->tv_sec > b->tv_sec)
return 1;
else if (a->tv_sec < b->tv_sec)
return -1;
else if (a->tv_nsec > b->tv_nsec)
return 1;
else if (a->tv_nsec < b->tv_nsec)
return -1;
else
return 0;
}
static struct vcpu_info *vinfo;
static struct kvm_vcpu *vcpu;
static void handle_alrm(int sig)
{
if (vinfo)
printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
vcpu_dump(stdout, vcpu, 0);
TEST_FAIL("IRQ delivery timed out");
}
static void *juggle_shinfo_state(void *arg)
{
struct kvm_vm *vm = (struct kvm_vm *)arg;
struct kvm_xen_hvm_attr cache_activate = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
};
struct kvm_xen_hvm_attr cache_deactivate = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = KVM_XEN_INVALID_GFN
};
for (;;) {
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
pthread_testcancel();
}
return NULL;
}
int main(int argc, char *argv[])
{
struct timespec min_ts, max_ts, vm_ts;
struct kvm_xen_hvm_attr evt_reset;
struct kvm_vm *vm;
pthread_t thread;
bool verbose;
int ret;
verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
!strncmp(argv[1], "--verbose", 10));
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG);
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
clock_gettime(CLOCK_REALTIME, &min_ts);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Map a region for the shared_info page */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
int zero_fd = open("/dev/zero", O_RDONLY);
TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
.msr = XEN_HYPERCALL_MSR,
};
/* Let the kernel know that we *will* use it for sending all
* event channels, which lets it intercept SCHEDOP_poll */
if (do_evtchn_tests)
hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
struct kvm_xen_hvm_attr lm = {
.type = KVM_XEN_ATTR_TYPE_LONG_MODE,
.u.long_mode = 1,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
if (do_runstate_flag) {
struct kvm_xen_hvm_attr ruf = {
.type = KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG,
.u.runstate_update_flag = 1,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ruf);
ruf.u.runstate_update_flag = 0;
vm_ioctl(vm, KVM_XEN_HVM_GET_ATTR, &ruf);
TEST_ASSERT(ruf.u.runstate_update_flag == 1,
"Failed to read back RUNSTATE_UPDATE_FLAG attr");
}
struct kvm_xen_hvm_attr ha = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
/*
* Test what happens when the HVA of the shinfo page is remapped after
* the kernel has a reference to it. But make sure we copy the clock
* info over since that's only set at setup time, and we test it later.
*/
struct pvclock_wall_clock wc_copy = shinfo->wc;
void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0);
TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info");
shinfo->wc = wc_copy;
struct kvm_xen_vcpu_attr vi = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = VCPU_INFO_ADDR,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
struct kvm_xen_vcpu_attr pvclock = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
.u.gpa = PVTIME_ADDR,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
.u.vector = EVTCHN_VECTOR,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) {
struct kvm_xen_vcpu_attr st = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
.u.gpa = RUNSTATE_ADDR,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
}
int irq_fd[2] = { -1, -1 };
if (do_eventfd_tests) {
irq_fd[0] = eventfd(0, 0);
irq_fd[1] = eventfd(0, 0);
/* Unexpected, but not a KVM failure */
if (irq_fd[0] == -1 || irq_fd[1] == -1)
do_evtchn_tests = do_eventfd_tests = false;
}
if (do_eventfd_tests) {
irq_routes.info.nr = 2;
irq_routes.entries[0].gsi = 32;
irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
irq_routes.entries[0].u.xen_evtchn.port = EVTCHN_TEST1;
irq_routes.entries[0].u.xen_evtchn.vcpu = vcpu->id;
irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
irq_routes.entries[1].gsi = 33;
irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
irq_routes.entries[1].u.xen_evtchn.port = EVTCHN_TEST2;
irq_routes.entries[1].u.xen_evtchn.vcpu = vcpu->id;
irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes.info);
struct kvm_irqfd ifd = { };
ifd.fd = irq_fd[0];
ifd.gsi = 32;
vm_ioctl(vm, KVM_IRQFD, &ifd);
ifd.fd = irq_fd[1];
ifd.gsi = 33;
vm_ioctl(vm, KVM_IRQFD, &ifd);
struct sigaction sa = { };
sa.sa_handler = handle_alrm;
sigaction(SIGALRM, &sa, NULL);
}
struct kvm_xen_vcpu_attr tmr = {
.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
.u.timer.port = EVTCHN_TIMER,
.u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
.u.timer.expires_ns = 0
};
if (do_evtchn_tests) {
struct kvm_xen_hvm_attr inj = {
.type = KVM_XEN_ATTR_TYPE_EVTCHN,
.u.evtchn.send_port = 127,
.u.evtchn.type = EVTCHNSTAT_interdomain,
.u.evtchn.flags = 0,
.u.evtchn.deliver.port.port = EVTCHN_TEST1,
.u.evtchn.deliver.port.vcpu = vcpu->id + 1,
.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
/* Test migration to a different vCPU */
inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE;
inj.u.evtchn.deliver.port.vcpu = vcpu->id;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
inj.u.evtchn.send_port = 197;
inj.u.evtchn.deliver.eventfd.port = 0;
inj.u.evtchn.deliver.eventfd.fd = irq_fd[1];
inj.u.evtchn.flags = 0;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
}
vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0;
struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
rs->state = 0x5a;
bool evtchn_irq_expected = false;
for (;;) {
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC: {
struct kvm_xen_vcpu_attr rst;
long rundelay;
if (do_runstate_tests)
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
switch (uc.args[1]) {
case TEST_INJECT_VECTOR:
if (verbose)
printf("Delivering evtchn upcall\n");
evtchn_irq_expected = true;
vinfo->evtchn_upcall_pending = 1;
break;
case TEST_RUNSTATE_runnable...TEST_RUNSTATE_offline:
TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
if (!do_runstate_tests)
goto done;
if (verbose)
printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1] + RUNSTATE_runnable -
TEST_RUNSTATE_runnable;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case TEST_RUNSTATE_ADJUST:
if (verbose)
printf("Testing RUNSTATE_ADJUST\n");
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST;
memset(&rst.u, 0, sizeof(rst.u));
rst.u.runstate.state = (uint64_t)-1;
rst.u.runstate.time_blocked =
0x5a - rs->time[RUNSTATE_blocked];
rst.u.runstate.time_offline =
0x6b6b - rs->time[RUNSTATE_offline];
rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
rst.u.runstate.time_offline;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case TEST_RUNSTATE_DATA:
if (verbose)
printf("Testing RUNSTATE_DATA\n");
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA;
memset(&rst.u, 0, sizeof(rst.u));
rst.u.runstate.state = RUNSTATE_running;
rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
rst.u.runstate.time_blocked = 0x6b6b;
rst.u.runstate.time_offline = 0x5a;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case TEST_STEAL_TIME:
if (verbose)
printf("Testing steal time\n");
/* Yield until scheduler delay exceeds target */
rundelay = get_run_delay() + MIN_STEAL_TIME;
do {
sched_yield();
} while (get_run_delay() < rundelay);
break;
case TEST_EVTCHN_MASKED:
if (!do_eventfd_tests)
goto done;
if (verbose)
printf("Testing masked event channel\n");
shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1;
eventfd_write(irq_fd[0], 1UL);
alarm(1);
break;
case TEST_EVTCHN_UNMASKED:
if (verbose)
printf("Testing unmasked event channel\n");
/* Unmask that, but deliver the other one */
shinfo->evtchn_pending[0] = 0;
shinfo->evtchn_mask[0] = 0;
eventfd_write(irq_fd[1], 1UL);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_SLOWPATH:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
if (verbose)
printf("Testing event channel after memslot change\n");
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
DUMMY_REGION_GPA, DUMMY_REGION_SLOT, 1, 0);
eventfd_write(irq_fd[0], 1UL);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_SEND_IOCTL:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
if (!do_evtchn_tests)
goto done;
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n");
struct kvm_irq_routing_xen_evtchn e;
e.port = EVTCHN_TEST2;
e.vcpu = vcpu->id;
e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &e);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_HCALL:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
if (verbose)
printf("Testing guest EVTCHNOP_send direct to evtchn\n");
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_HCALL_SLOWPATH:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing guest EVTCHNOP_send direct to evtchn after memslot change\n");
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
DUMMY_REGION_GPA_2, DUMMY_REGION_SLOT_2, 1, 0);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_HCALL_EVENTFD:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing guest EVTCHNOP_send to eventfd\n");
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_TIMER_SETUP:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
if (verbose)
printf("Testing guest oneshot timer\n");
break;
case TEST_TIMER_WAIT:
memset(&tmr, 0, sizeof(tmr));
tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
"Timer port not returned");
TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
"Timer priority not returned");
TEST_ASSERT(tmr.u.timer.expires_ns > rs->state_entry_time,
"Timer expiry not returned");
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_TIMER_RESTORE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing restored oneshot timer\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_POLL_READY:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
if (verbose)
printf("Testing SCHEDOP_poll with already pending event\n");
shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 1UL << EVTCHN_TIMER;
alarm(1);
break;
case TEST_POLL_TIMEOUT:
if (verbose)
printf("Testing SCHEDOP_poll timeout\n");
shinfo->evtchn_pending[0] = 0;
alarm(1);
break;
case TEST_POLL_MASKED:
if (verbose)
printf("Testing SCHEDOP_poll wake on masked event\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1);
break;
case TEST_POLL_WAKE:
shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0;
if (verbose)
printf("Testing SCHEDOP_poll wake on unmasked event\n");
evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
/* Read it back and check the pending time is reported correctly */
tmr.u.timer.expires_ns = 0;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
"Timer not reported pending");
alarm(1);
break;
case TEST_TIMER_PAST:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
/* Read timer and check it is no longer pending */
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing timer in the past\n");
evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1);
break;
case TEST_LOCKING_SEND_RACE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
alarm(0);
if (verbose)
printf("Testing shinfo lock corruption (KVM_XEN_HVM_EVTCHN_SEND)\n");
ret = pthread_create(&thread, NULL, &juggle_shinfo_state, (void *)vm);
TEST_ASSERT(ret == 0, "pthread_create() failed: %s", strerror(ret));
struct kvm_irq_routing_xen_evtchn uxe = {
.port = 1,
.vcpu = vcpu->id,
.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL
};
evtchn_irq_expected = true;
for (time_t t = time(NULL) + SHINFO_RACE_TIMEOUT; time(NULL) < t;)
__vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe);
break;
case TEST_LOCKING_POLL_RACE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
if (verbose)
printf("Testing shinfo lock corruption (SCHEDOP_poll)\n");
shinfo->evtchn_pending[0] = 1;
evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time +
SHINFO_RACE_TIMEOUT * 1000000000ULL;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
break;
case TEST_LOCKING_POLL_TIMEOUT:
/*
* Optional and possibly repeated sync point.
* Injecting the timer IRQ may fail if the
* shinfo is invalid when the timer expires.
* If the timer has expired but the IRQ hasn't
* been delivered, rearm the timer and retry.
*/
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
/* Resume the guest if the timer is still pending. */
if (tmr.u.timer.expires_ns)
break;
/* All done if the IRQ was delivered. */
if (!evtchn_irq_expected)
break;
tmr.u.timer.expires_ns = rs->state_entry_time +
SHINFO_RACE_TIMEOUT * 1000000000ULL;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
break;
case TEST_DONE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
ret = pthread_cancel(thread);
TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
ret = pthread_join(thread, 0);
TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
goto done;
case TEST_GUEST_SAW_IRQ:
TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
evtchn_irq_expected = false;
break;
}
break;
}
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
}
done:
evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
alarm(0);
clock_gettime(CLOCK_REALTIME, &max_ts);
/*
* Just a *really* basic check that things are being put in the
* right place. The actual calculations are much the same for
* Xen as they are for the KVM variants, so no need to check.
*/
struct pvclock_wall_clock *wc;
struct pvclock_vcpu_time_info *ti, *ti2;
wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00);
ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20);
ti2 = addr_gpa2hva(vm, PVTIME_ADDR);
if (verbose) {
printf("Wall clock (v %d) %d.%09d\n", wc->version, wc->sec, wc->nsec);
printf("Time info 1: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
ti->version, ti->tsc_timestamp, ti->system_time, ti->tsc_to_system_mul,
ti->tsc_shift, ti->flags);
printf("Time info 2: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
ti2->version, ti2->tsc_timestamp, ti2->system_time, ti2->tsc_to_system_mul,
ti2->tsc_shift, ti2->flags);
}
vm_ts.tv_sec = wc->sec;
vm_ts.tv_nsec = wc->nsec;
TEST_ASSERT(wc->version && !(wc->version & 1),
"Bad wallclock version %x", wc->version);
TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old");
TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new");
TEST_ASSERT(ti->version && !(ti->version & 1),
"Bad time_info version %x", ti->version);
TEST_ASSERT(ti2->version && !(ti2->version & 1),
"Bad time_info version %x", ti->version);
if (do_runstate_tests) {
/*
* Fetch runstate and check sanity. Strictly speaking in the
* general case we might not expect the numbers to be identical
* but in this case we know we aren't running the vCPU any more.
*/
struct kvm_xen_vcpu_attr rst = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
if (verbose) {
printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
rs->state <= RUNSTATE_offline ? runstate_names[rs->state] : "unknown",
rs->state, rs->state_entry_time);
for (int i = RUNSTATE_running; i <= RUNSTATE_offline; i++) {
printf("State %s: %" PRIu64 " ns\n",
runstate_names[i], rs->time[i]);
}
}
/*
* Exercise runstate info at all points across the page boundary, in
* 32-bit and 64-bit mode. In particular, test the case where it is
* configured in 32-bit mode and then switched to 64-bit mode while
* active, which takes it onto the second page.
*/
unsigned long runstate_addr;
struct compat_vcpu_runstate_info *crs;
for (runstate_addr = SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - sizeof(*rs) - 4;
runstate_addr < SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE + 4; runstate_addr++) {
rs = addr_gpa2hva(vm, runstate_addr);
crs = (void *)rs;
memset(rs, 0xa5, sizeof(*rs));
/* Set to compatibility mode */
lm.u.long_mode = 0;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
/* Set runstate to new address (kernel will write it) */
struct kvm_xen_vcpu_attr st = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
.u.gpa = runstate_addr,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
if (verbose)
printf("Compatibility runstate at %08lx\n", runstate_addr);
TEST_ASSERT(crs->state == rst.u.runstate.state, "Runstate mismatch");
TEST_ASSERT(crs->state_entry_time == rst.u.runstate.state_entry_time,
"State entry time mismatch");
TEST_ASSERT(crs->time[RUNSTATE_running] == rst.u.runstate.time_running,
"Running time mismatch");
TEST_ASSERT(crs->time[RUNSTATE_runnable] == rst.u.runstate.time_runnable,
"Runnable time mismatch");
TEST_ASSERT(crs->time[RUNSTATE_blocked] == rst.u.runstate.time_blocked,
"Blocked time mismatch");
TEST_ASSERT(crs->time[RUNSTATE_offline] == rst.u.runstate.time_offline,
"Offline time mismatch");
TEST_ASSERT(crs->time[RUNSTATE_offline + 1] == 0xa5a5a5a5a5a5a5a5ULL,
"Structure overrun");
TEST_ASSERT(crs->state_entry_time == crs->time[0] +
crs->time[1] + crs->time[2] + crs->time[3],
"runstate times don't add up");
/* Now switch to 64-bit mode */
lm.u.long_mode = 1;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
memset(rs, 0xa5, sizeof(*rs));
/* Don't change the address, just trigger a write */
struct kvm_xen_vcpu_attr adj = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST,
.u.runstate.state = (uint64_t)-1
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj);
if (verbose)
printf("64-bit runstate at %08lx\n", runstate_addr);
TEST_ASSERT(rs->state == rst.u.runstate.state, "Runstate mismatch");
TEST_ASSERT(rs->state_entry_time == rst.u.runstate.state_entry_time,
"State entry time mismatch");
TEST_ASSERT(rs->time[RUNSTATE_running] == rst.u.runstate.time_running,
"Running time mismatch");
TEST_ASSERT(rs->time[RUNSTATE_runnable] == rst.u.runstate.time_runnable,
"Runnable time mismatch");
TEST_ASSERT(rs->time[RUNSTATE_blocked] == rst.u.runstate.time_blocked,
"Blocked time mismatch");
TEST_ASSERT(rs->time[RUNSTATE_offline] == rst.u.runstate.time_offline,
"Offline time mismatch");
TEST_ASSERT(rs->time[RUNSTATE_offline + 1] == 0xa5a5a5a5a5a5a5a5ULL,
"Structure overrun");
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3],
"runstate times don't add up");
}
}
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM guest debug register tests
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include <stdio.h>
#include <string.h>
#include "kvm_util.h"
#include "processor.h"
#include "apic.h"
#define DR6_BD (1 << 13)
#define DR7_GD (1 << 13)
#define IRQ_VECTOR 0xAA
/* For testing data access debug BP */
uint32_t guest_value;
extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
static void guest_code(void)
{
/* Create a pending interrupt on current vCPU */
x2apic_enable();
x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT |
APIC_DM_FIXED | IRQ_VECTOR);
/*
* Software BP tests.
*
* NOTE: sw_bp need to be before the cmd here, because int3 is an
* exception rather than a normal trap for KVM_SET_GUEST_DEBUG (we
* capture it using the vcpu exception bitmap).
*/
asm volatile("sw_bp: int3");
/* Hardware instruction BP test */
asm volatile("hw_bp: nop");
/* Hardware data BP test */
asm volatile("mov $1234,%%rax;\n\t"
"mov %%rax,%0;\n\t write_data:"
: "=m" (guest_value) : : "rax");
/*
* Single step test, covers 2 basic instructions and 2 emulated
*
* Enable interrupts during the single stepping to see that
* pending interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ
*/
asm volatile("ss_start: "
"sti\n\t"
"xor %%eax,%%eax\n\t"
"cpuid\n\t"
"movl $0x1a0,%%ecx\n\t"
"rdmsr\n\t"
"cli\n\t"
: : : "eax", "ebx", "ecx", "edx");
/* DR6.BD test */
asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
GUEST_DONE();
}
#define CAST_TO_RIP(v) ((unsigned long long)&(v))
static void vcpu_skip_insn(struct kvm_vcpu *vcpu, int insn_len)
{
struct kvm_regs regs;
vcpu_regs_get(vcpu, ®s);
regs.rip += insn_len;
vcpu_regs_set(vcpu, ®s);
}
int main(void)
{
struct kvm_guest_debug debug;
unsigned long long target_dr6, target_rip;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
uint64_t cmd;
int i;
/* Instruction lengths starting at ss_start */
int ss_size[6] = {
1, /* sti*/
2, /* xor */
2, /* cpuid */
5, /* mov */
2, /* rdmsr */
1, /* cli */
};
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
/* Test software BPs - int3 */
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == BP_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(sw_bp),
"INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, CAST_TO_RIP(sw_bp));
vcpu_skip_insn(vcpu, 1);
/* Test instruction HW BP over DR[0-3] */
for (i = 0; i < 4; i++) {
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(hw_bp) &&
run->debug.arch.dr6 == target_dr6,
"INS_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
i, run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, CAST_TO_RIP(hw_bp),
run->debug.arch.dr6, target_dr6);
}
/* Skip "nop" */
vcpu_skip_insn(vcpu, 1);
/* Test data access HW BP over DR[0-3] */
for (i = 0; i < 4; i++) {
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
(0x000d0000UL << (4*i));
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(write_data) &&
run->debug.arch.dr6 == target_dr6,
"DATA_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
i, run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, CAST_TO_RIP(write_data),
run->debug.arch.dr6, target_dr6);
/* Rollback the 4-bytes "mov" */
vcpu_skip_insn(vcpu, -7);
}
/* Skip the 4-bytes "mov" */
vcpu_skip_insn(vcpu, 7);
/* Test single step */
target_rip = CAST_TO_RIP(ss_start);
target_dr6 = 0xffff4ff0ULL;
for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
target_rip += ss_size[i];
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
KVM_GUESTDBG_BLOCKIRQ;
debug.arch.debugreg[7] = 0x00000400;
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == target_rip &&
run->debug.arch.dr6 == target_dr6,
"SINGLE_STEP[%d]: exit %d exception %d rip 0x%llx "
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
i, run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, target_rip, run->debug.arch.dr6,
target_dr6);
}
/* Finally test global disable */
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[7] = 0x400 | DR7_GD;
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | DR6_BD;
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(bd_start) &&
run->debug.arch.dr6 == target_dr6,
"DR7.GD: exit %d exception %d rip 0x%llx "
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, target_rip, run->debug.arch.dr6,
target_dr6);
/* Disable all debug controls, run to the end */
memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
cmd = get_ucall(vcpu, &uc);
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/debug_regs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_tsc_adjust_test
*
* Copyright (C) 2018, Google LLC.
*
* IA32_TSC_ADJUST test
*
* According to the SDM, "if an execution of WRMSR to the
* IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
* the logical processor also adds (or subtracts) value X from the
* IA32_TSC_ADJUST MSR.
*
* Note that when L1 doesn't intercept writes to IA32_TSC, a
* WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
* value.
*
* This test verifies that this unusual case is handled correctly.
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
#include "kselftest.h"
#ifndef MSR_IA32_TSC_ADJUST
#define MSR_IA32_TSC_ADJUST 0x3b
#endif
#define TSC_ADJUST_VALUE (1ll << 32)
#define TSC_OFFSET_VALUE -(1ll << 48)
enum {
PORT_ABORT = 0x1000,
PORT_REPORT,
PORT_DONE,
};
enum {
VMXON_PAGE = 0,
VMCS_PAGE,
MSR_BITMAP_PAGE,
NUM_VMX_PAGES,
};
/* The virtual machine object. */
static struct kvm_vm *vm;
static void check_ia32_tsc_adjust(int64_t max)
{
int64_t adjust;
adjust = rdmsr(MSR_IA32_TSC_ADJUST);
GUEST_SYNC(adjust);
GUEST_ASSERT(adjust <= max);
}
static void l2_guest_code(void)
{
uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
/* Exit to L1 */
__asm__ __volatile__("vmcall");
}
static void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint32_t control;
uintptr_t save_cr3;
GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
/* Jump into L2. First, test failure to load guest CR3. */
save_cr3 = vmreadz(GUEST_CR3);
vmwrite(GUEST_CR3, -1ull);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
(EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
vmwrite(GUEST_CR3, save_cr3);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
GUEST_DONE();
}
static void report(int64_t val)
{
pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
struct kvm_vcpu *vcpu;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
report(uc.args[1]);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Red Hat, Inc.
*
* Tests for Hyper-V features enablement
*/
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <stdint.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "hyperv.h"
/*
* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
* but to activate the feature it is sufficient to set it to a non-zero
* value. Use BIT(0) for that.
*/
#define HV_PV_SPINLOCKS_TEST \
KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
struct msr_data {
uint32_t idx;
bool fault_expected;
bool write;
u64 write_val;
};
struct hcall_data {
uint64_t control;
uint64_t expect;
bool ud_expected;
};
static bool is_write_only_msr(uint32_t msr)
{
return msr == HV_X64_MSR_EOI;
}
static void guest_msr(struct msr_data *msr)
{
uint8_t vector = 0;
uint64_t msr_val = 0;
GUEST_ASSERT(msr->idx);
if (msr->write)
vector = wrmsr_safe(msr->idx, msr->write_val);
if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
vector = rdmsr_safe(msr->idx, &msr_val);
if (msr->fault_expected)
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on %sMSR(0x%x), got vector '0x%x'",
msr->idx, msr->write ? "WR" : "RD", vector);
else
__GUEST_ASSERT(!vector,
"Expected success on %sMSR(0x%x), got vector '0x%x'",
msr->idx, msr->write ? "WR" : "RD", vector);
if (vector || is_write_only_msr(msr->idx))
goto done;
if (msr->write)
__GUEST_ASSERT(!vector,
"WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'",
msr->idx, msr->write_val, msr_val);
/* Invariant TSC bit appears when TSC invariant control MSR is written to */
if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
else
GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
!!(msr_val & HV_INVARIANT_TSC_EXPOSED));
}
done:
GUEST_DONE();
}
static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
{
u64 res, input, output;
uint8_t vector;
GUEST_ASSERT_NE(hcall->control, 0);
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
input = pgs_gpa;
output = pgs_gpa + 4096;
} else {
input = output = 0;
}
vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) {
__GUEST_ASSERT(vector == UD_VECTOR,
"Expected #UD for control '%u', got vector '0x%x'",
hcall->control, vector);
} else {
__GUEST_ASSERT(!vector,
"Expected no exception for control '%u', got vector '0x%x'",
hcall->control, vector);
GUEST_ASSERT_EQ(res, hcall->expect);
}
GUEST_DONE();
}
static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
{
/*
* Enable all supported Hyper-V features, then clear the leafs holding
* the features that will be tested one by one.
*/
vcpu_set_hv_cpuid(vcpu);
vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
}
static void guest_test_msrs_access(void)
{
struct kvm_cpuid2 *prev_cpuid = NULL;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
vm_vaddr_t msr_gva;
struct msr_data *msr;
bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
while (true) {
vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
msr_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
msr = addr_gva2hva(vm, msr_gva);
vcpu_args_set(vcpu, 1, msr_gva);
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
if (!prev_cpuid) {
vcpu_reset_hv_cpuid(vcpu);
prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
} else {
vcpu_init_cpuid(vcpu, prev_cpuid);
}
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
/* TODO: Make this entire test easier to maintain. */
if (stage >= 21)
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
switch (stage) {
case 0:
/*
* Only available when Hyper-V identification is set
*/
msr->idx = HV_X64_MSR_GUEST_OS_ID;
msr->write = false;
msr->fault_expected = true;
break;
case 1:
msr->idx = HV_X64_MSR_HYPERCALL;
msr->write = false;
msr->fault_expected = true;
break;
case 2:
vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
/*
* HV_X64_MSR_GUEST_OS_ID has to be written first to make
* HV_X64_MSR_HYPERCALL available.
*/
msr->idx = HV_X64_MSR_GUEST_OS_ID;
msr->write = true;
msr->write_val = HYPERV_LINUX_OS_ID;
msr->fault_expected = false;
break;
case 3:
msr->idx = HV_X64_MSR_GUEST_OS_ID;
msr->write = false;
msr->fault_expected = false;
break;
case 4:
msr->idx = HV_X64_MSR_HYPERCALL;
msr->write = false;
msr->fault_expected = false;
break;
case 5:
msr->idx = HV_X64_MSR_VP_RUNTIME;
msr->write = false;
msr->fault_expected = true;
break;
case 6:
vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
msr->idx = HV_X64_MSR_VP_RUNTIME;
msr->write = false;
msr->fault_expected = false;
break;
case 7:
/* Read only */
msr->idx = HV_X64_MSR_VP_RUNTIME;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = true;
break;
case 8:
msr->idx = HV_X64_MSR_TIME_REF_COUNT;
msr->write = false;
msr->fault_expected = true;
break;
case 9:
vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
msr->idx = HV_X64_MSR_TIME_REF_COUNT;
msr->write = false;
msr->fault_expected = false;
break;
case 10:
/* Read only */
msr->idx = HV_X64_MSR_TIME_REF_COUNT;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = true;
break;
case 11:
msr->idx = HV_X64_MSR_VP_INDEX;
msr->write = false;
msr->fault_expected = true;
break;
case 12:
vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
msr->idx = HV_X64_MSR_VP_INDEX;
msr->write = false;
msr->fault_expected = false;
break;
case 13:
/* Read only */
msr->idx = HV_X64_MSR_VP_INDEX;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = true;
break;
case 14:
msr->idx = HV_X64_MSR_RESET;
msr->write = false;
msr->fault_expected = true;
break;
case 15:
vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
msr->idx = HV_X64_MSR_RESET;
msr->write = false;
msr->fault_expected = false;
break;
case 16:
msr->idx = HV_X64_MSR_RESET;
msr->write = true;
/*
* TODO: the test only writes '0' to HV_X64_MSR_RESET
* at the moment, writing some other value there will
* trigger real vCPU reset and the code is not prepared
* to handle it yet.
*/
msr->write_val = 0;
msr->fault_expected = false;
break;
case 17:
msr->idx = HV_X64_MSR_REFERENCE_TSC;
msr->write = false;
msr->fault_expected = true;
break;
case 18:
vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
msr->idx = HV_X64_MSR_REFERENCE_TSC;
msr->write = false;
msr->fault_expected = false;
break;
case 19:
msr->idx = HV_X64_MSR_REFERENCE_TSC;
msr->write = true;
msr->write_val = 0;
msr->fault_expected = false;
break;
case 20:
msr->idx = HV_X64_MSR_EOM;
msr->write = false;
msr->fault_expected = true;
break;
case 21:
/*
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset.
*/
msr->idx = HV_X64_MSR_EOM;
msr->write = false;
msr->fault_expected = true;
break;
case 22:
vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
msr->idx = HV_X64_MSR_EOM;
msr->write = false;
msr->fault_expected = false;
break;
case 23:
msr->idx = HV_X64_MSR_EOM;
msr->write = true;
msr->write_val = 0;
msr->fault_expected = false;
break;
case 24:
msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = false;
msr->fault_expected = true;
break;
case 25:
vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = false;
msr->fault_expected = false;
break;
case 26:
msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = true;
msr->write_val = 0;
msr->fault_expected = false;
break;
case 27:
/* Direct mode test */
msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = true;
msr->write_val = 1 << 12;
msr->fault_expected = true;
break;
case 28:
vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = true;
msr->write_val = 1 << 12;
msr->fault_expected = false;
break;
case 29:
msr->idx = HV_X64_MSR_EOI;
msr->write = false;
msr->fault_expected = true;
break;
case 30:
vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
msr->idx = HV_X64_MSR_EOI;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = false;
break;
case 31:
msr->idx = HV_X64_MSR_TSC_FREQUENCY;
msr->write = false;
msr->fault_expected = true;
break;
case 32:
vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
msr->idx = HV_X64_MSR_TSC_FREQUENCY;
msr->write = false;
msr->fault_expected = false;
break;
case 33:
/* Read only */
msr->idx = HV_X64_MSR_TSC_FREQUENCY;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = true;
break;
case 34:
msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
msr->write = false;
msr->fault_expected = true;
break;
case 35:
vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
msr->write = false;
msr->fault_expected = false;
break;
case 36:
msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = false;
break;
case 37:
/* Can only write '0' */
msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = true;
break;
case 38:
msr->idx = HV_X64_MSR_CRASH_P0;
msr->write = false;
msr->fault_expected = true;
break;
case 39:
vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
msr->idx = HV_X64_MSR_CRASH_P0;
msr->write = false;
msr->fault_expected = false;
break;
case 40:
msr->idx = HV_X64_MSR_CRASH_P0;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = false;
break;
case 41:
msr->idx = HV_X64_MSR_SYNDBG_STATUS;
msr->write = false;
msr->fault_expected = true;
break;
case 42:
vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
msr->idx = HV_X64_MSR_SYNDBG_STATUS;
msr->write = false;
msr->fault_expected = false;
break;
case 43:
msr->idx = HV_X64_MSR_SYNDBG_STATUS;
msr->write = true;
msr->write_val = 0;
msr->fault_expected = false;
break;
case 44:
/* MSR is not available when CPUID feature bit is unset */
if (!has_invtsc)
continue;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false;
msr->fault_expected = true;
break;
case 45:
/* MSR is vailable when CPUID feature bit is set */
if (!has_invtsc)
continue;
vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false;
msr->fault_expected = false;
break;
case 46:
/* Writing bits other than 0 is forbidden */
if (!has_invtsc)
continue;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true;
msr->write_val = 0xdeadbeef;
msr->fault_expected = true;
break;
case 47:
/* Setting bit 0 enables the feature */
if (!has_invtsc)
continue;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true;
msr->write_val = 1;
msr->fault_expected = false;
break;
default:
kvm_vm_free(vm);
return;
}
vcpu_set_cpuid(vcpu);
memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
msr->idx, msr->write ? "write" : "read");
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
return;
}
stage++;
kvm_vm_free(vm);
}
}
static void guest_test_hcalls_access(void)
{
struct kvm_cpuid2 *prev_cpuid = NULL;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
vm_vaddr_t hcall_page, hcall_params;
struct hcall_data *hcall;
while (true) {
vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
/* Hypercall input/output */
hcall_page = vm_vaddr_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
hcall_params = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
hcall = addr_gva2hva(vm, hcall_params);
vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
if (!prev_cpuid) {
vcpu_reset_hv_cpuid(vcpu);
prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
} else {
vcpu_init_cpuid(vcpu, prev_cpuid);
}
switch (stage) {
case 0:
vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
hcall->control = 0xbeef;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
case 1:
hcall->control = HVCALL_POST_MESSAGE;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 2:
vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
hcall->control = HVCALL_POST_MESSAGE;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
case 3:
hcall->control = HVCALL_SIGNAL_EVENT;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 4:
vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
hcall->control = HVCALL_SIGNAL_EVENT;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
case 5:
hcall->control = HVCALL_RESET_DEBUG_SESSION;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
case 6:
vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
hcall->control = HVCALL_RESET_DEBUG_SESSION;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 7:
vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
hcall->control = HVCALL_RESET_DEBUG_SESSION;
hcall->expect = HV_STATUS_OPERATION_DENIED;
break;
case 8:
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 9:
vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 10:
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 11:
vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 12:
hcall->control = HVCALL_SEND_IPI;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 13:
vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
hcall->control = HVCALL_SEND_IPI;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
case 14:
/* Nothing in 'sparse banks' -> success */
hcall->control = HVCALL_SEND_IPI_EX;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 15:
hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 16:
vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 17:
/* XMM fast hypercall */
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
hcall->ud_expected = true;
break;
case 18:
vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
hcall->ud_expected = false;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 19:
hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 20:
vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
hcall->expect = HV_STATUS_INVALID_PARAMETER;
break;
case 21:
kvm_vm_free(vm);
return;
}
vcpu_set_cpuid(vcpu);
memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
return;
}
stage++;
kvm_vm_free(vm);
}
}
int main(void)
{
pr_info("Testing access to Hyper-V specific MSRs\n");
guest_test_msrs_access();
pr_info("Testing access to Hyper-V hypercalls\n");
guest_test_hcalls_access();
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_features.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#define __EXPORTED_HEADERS__
#include <stdio.h>
#include <stdlib.h>
#include <linux/fcntl.h>
#include <linux/memfd.h>
#include <unistd.h>
#include <sys/syscall.h>
#include "common.h"
int hugetlbfs_test = 0;
/*
* Copied from mlock2-tests.c
*/
unsigned long default_huge_page_size(void)
{
unsigned long hps = 0;
char *line = NULL;
size_t linelen = 0;
FILE *f = fopen("/proc/meminfo", "r");
if (!f)
return 0;
while (getline(&line, &linelen, f) > 0) {
if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
hps <<= 10;
break;
}
}
free(line);
fclose(f);
return hps;
}
int sys_memfd_create(const char *name, unsigned int flags)
{
if (hugetlbfs_test)
flags |= MFD_HUGETLB;
return syscall(__NR_memfd_create, name, flags);
}
| linux-master | tools/testing/selftests/memfd/common.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#define __EXPORTED_HEADERS__
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <linux/falloc.h>
#include <fcntl.h>
#include <linux/memfd.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
#include <ctype.h>
#include "common.h"
#define MEMFD_STR "memfd:"
#define MEMFD_HUGE_STR "memfd-hugetlb:"
#define SHARED_FT_STR "(shared file-table)"
#define MFD_DEF_SIZE 8192
#define STACK_SIZE 65536
#define F_SEAL_EXEC 0x0020
#define F_WX_SEALS (F_SEAL_SHRINK | \
F_SEAL_GROW | \
F_SEAL_WRITE | \
F_SEAL_FUTURE_WRITE | \
F_SEAL_EXEC)
#define MFD_NOEXEC_SEAL 0x0008U
/*
* Default is not to test hugetlbfs
*/
static size_t mfd_def_size = MFD_DEF_SIZE;
static const char *memfd_str = MEMFD_STR;
static int newpid_thread_fn2(void *arg);
static void join_newpid_thread(pid_t pid);
static ssize_t fd2name(int fd, char *buf, size_t bufsize)
{
char buf1[PATH_MAX];
int size;
ssize_t nbytes;
size = snprintf(buf1, PATH_MAX, "/proc/self/fd/%d", fd);
if (size < 0) {
printf("snprintf(%d) failed on %m\n", fd);
abort();
}
/*
* reserver one byte for string termination.
*/
nbytes = readlink(buf1, buf, bufsize-1);
if (nbytes == -1) {
printf("readlink(%s) failed %m\n", buf1);
abort();
}
buf[nbytes] = '\0';
return nbytes;
}
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
int r, fd;
fd = sys_memfd_create(name, flags);
if (fd < 0) {
printf("memfd_create(\"%s\", %u) failed: %m\n",
name, flags);
abort();
}
r = ftruncate(fd, sz);
if (r < 0) {
printf("ftruncate(%llu) failed: %m\n", (unsigned long long)sz);
abort();
}
return fd;
}
static void sysctl_assert_write(const char *val)
{
int fd = open("/proc/sys/vm/memfd_noexec", O_WRONLY | O_CLOEXEC);
if (fd < 0) {
printf("open sysctl failed: %m\n");
abort();
}
if (write(fd, val, strlen(val)) < 0) {
printf("write sysctl %s failed: %m\n", val);
abort();
}
}
static void sysctl_fail_write(const char *val)
{
int fd = open("/proc/sys/vm/memfd_noexec", O_WRONLY | O_CLOEXEC);
if (fd < 0) {
printf("open sysctl failed: %m\n");
abort();
}
if (write(fd, val, strlen(val)) >= 0) {
printf("write sysctl %s succeeded, but failure expected\n",
val);
abort();
}
}
static void sysctl_assert_equal(const char *val)
{
char *p, buf[128] = {};
int fd = open("/proc/sys/vm/memfd_noexec", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("open sysctl failed: %m\n");
abort();
}
if (read(fd, buf, sizeof(buf)) < 0) {
printf("read sysctl failed: %m\n");
abort();
}
/* Strip trailing whitespace. */
p = buf;
while (!isspace(*p))
p++;
*p = '\0';
if (strcmp(buf, val) != 0) {
printf("unexpected sysctl value: expected %s, got %s\n", val, buf);
abort();
}
}
static int mfd_assert_reopen_fd(int fd_in)
{
int fd;
char path[100];
sprintf(path, "/proc/self/fd/%d", fd_in);
fd = open(path, O_RDWR);
if (fd < 0) {
printf("re-open of existing fd %d failed\n", fd_in);
abort();
}
return fd;
}
static void mfd_fail_new(const char *name, unsigned int flags)
{
int r;
r = sys_memfd_create(name, flags);
if (r >= 0) {
printf("memfd_create(\"%s\", %u) succeeded, but failure expected\n",
name, flags);
close(r);
abort();
}
}
static unsigned int mfd_assert_get_seals(int fd)
{
int r;
r = fcntl(fd, F_GET_SEALS);
if (r < 0) {
printf("GET_SEALS(%d) failed: %m\n", fd);
abort();
}
return (unsigned int)r;
}
static void mfd_assert_has_seals(int fd, unsigned int seals)
{
char buf[PATH_MAX];
int nbytes;
unsigned int s;
fd2name(fd, buf, PATH_MAX);
s = mfd_assert_get_seals(fd);
if (s != seals) {
printf("%u != %u = GET_SEALS(%s)\n", seals, s, buf);
abort();
}
}
static void mfd_assert_add_seals(int fd, unsigned int seals)
{
int r;
unsigned int s;
s = mfd_assert_get_seals(fd);
r = fcntl(fd, F_ADD_SEALS, seals);
if (r < 0) {
printf("ADD_SEALS(%d, %u -> %u) failed: %m\n", fd, s, seals);
abort();
}
}
static void mfd_fail_add_seals(int fd, unsigned int seals)
{
int r;
unsigned int s;
r = fcntl(fd, F_GET_SEALS);
if (r < 0)
s = 0;
else
s = (unsigned int)r;
r = fcntl(fd, F_ADD_SEALS, seals);
if (r >= 0) {
printf("ADD_SEALS(%d, %u -> %u) didn't fail as expected\n",
fd, s, seals);
abort();
}
}
static void mfd_assert_size(int fd, size_t size)
{
struct stat st;
int r;
r = fstat(fd, &st);
if (r < 0) {
printf("fstat(%d) failed: %m\n", fd);
abort();
} else if (st.st_size != size) {
printf("wrong file size %lld, but expected %lld\n",
(long long)st.st_size, (long long)size);
abort();
}
}
static int mfd_assert_dup(int fd)
{
int r;
r = dup(fd);
if (r < 0) {
printf("dup(%d) failed: %m\n", fd);
abort();
}
return r;
}
static void *mfd_assert_mmap_shared(int fd)
{
void *p;
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
return p;
}
static void *mfd_assert_mmap_private(int fd)
{
void *p;
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_PRIVATE,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
return p;
}
static int mfd_assert_open(int fd, int flags, mode_t mode)
{
char buf[512];
int r;
sprintf(buf, "/proc/self/fd/%d", fd);
r = open(buf, flags, mode);
if (r < 0) {
printf("open(%s) failed: %m\n", buf);
abort();
}
return r;
}
static void mfd_fail_open(int fd, int flags, mode_t mode)
{
char buf[512];
int r;
sprintf(buf, "/proc/self/fd/%d", fd);
r = open(buf, flags, mode);
if (r >= 0) {
printf("open(%s) didn't fail as expected\n", buf);
abort();
}
}
static void mfd_assert_read(int fd)
{
char buf[16];
void *p;
ssize_t l;
l = read(fd, buf, sizeof(buf));
if (l != sizeof(buf)) {
printf("read() failed: %m\n");
abort();
}
/* verify PROT_READ *is* allowed */
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_PRIVATE,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
munmap(p, mfd_def_size);
/* verify MAP_PRIVATE is *always* allowed (even writable) */
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
munmap(p, mfd_def_size);
}
/* Test that PROT_READ + MAP_SHARED mappings work. */
static void mfd_assert_read_shared(int fd)
{
void *p;
/* verify PROT_READ and MAP_SHARED *is* allowed */
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
munmap(p, mfd_def_size);
}
static void mfd_assert_fork_private_write(int fd)
{
int *p;
pid_t pid;
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
p[0] = 22;
pid = fork();
if (pid == 0) {
p[0] = 33;
exit(0);
} else {
waitpid(pid, NULL, 0);
if (p[0] != 22) {
printf("MAP_PRIVATE copy-on-write failed: %m\n");
abort();
}
}
munmap(p, mfd_def_size);
}
static void mfd_assert_write(int fd)
{
ssize_t l;
void *p;
int r;
/*
* huegtlbfs does not support write, but we want to
* verify everything else here.
*/
if (!hugetlbfs_test) {
/* verify write() succeeds */
l = write(fd, "\0\0\0\0", 4);
if (l != 4) {
printf("write() failed: %m\n");
abort();
}
}
/* verify PROT_READ | PROT_WRITE is allowed */
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
*(char *)p = 0;
munmap(p, mfd_def_size);
/* verify PROT_WRITE is allowed */
p = mmap(NULL,
mfd_def_size,
PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
*(char *)p = 0;
munmap(p, mfd_def_size);
/* verify PROT_READ with MAP_SHARED is allowed and a following
* mprotect(PROT_WRITE) allows writing */
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
r = mprotect(p, mfd_def_size, PROT_READ | PROT_WRITE);
if (r < 0) {
printf("mprotect() failed: %m\n");
abort();
}
*(char *)p = 0;
munmap(p, mfd_def_size);
/* verify PUNCH_HOLE works */
r = fallocate(fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0,
mfd_def_size);
if (r < 0) {
printf("fallocate(PUNCH_HOLE) failed: %m\n");
abort();
}
}
static void mfd_fail_write(int fd)
{
ssize_t l;
void *p;
int r;
/* verify write() fails */
l = write(fd, "data", 4);
if (l != -EPERM) {
printf("expected EPERM on write(), but got %d: %m\n", (int)l);
abort();
}
/* verify PROT_READ | PROT_WRITE is not allowed */
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p != MAP_FAILED) {
printf("mmap() didn't fail as expected\n");
abort();
}
/* verify PROT_WRITE is not allowed */
p = mmap(NULL,
mfd_def_size,
PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p != MAP_FAILED) {
printf("mmap() didn't fail as expected\n");
abort();
}
/* Verify PROT_READ with MAP_SHARED with a following mprotect is not
* allowed. Note that for r/w the kernel already prevents the mmap. */
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_SHARED,
fd,
0);
if (p != MAP_FAILED) {
r = mprotect(p, mfd_def_size, PROT_READ | PROT_WRITE);
if (r >= 0) {
printf("mmap()+mprotect() didn't fail as expected\n");
abort();
}
munmap(p, mfd_def_size);
}
/* verify PUNCH_HOLE fails */
r = fallocate(fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0,
mfd_def_size);
if (r >= 0) {
printf("fallocate(PUNCH_HOLE) didn't fail as expected\n");
abort();
}
}
static void mfd_assert_shrink(int fd)
{
int r, fd2;
r = ftruncate(fd, mfd_def_size / 2);
if (r < 0) {
printf("ftruncate(SHRINK) failed: %m\n");
abort();
}
mfd_assert_size(fd, mfd_def_size / 2);
fd2 = mfd_assert_open(fd,
O_RDWR | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR);
close(fd2);
mfd_assert_size(fd, 0);
}
static void mfd_fail_shrink(int fd)
{
int r;
r = ftruncate(fd, mfd_def_size / 2);
if (r >= 0) {
printf("ftruncate(SHRINK) didn't fail as expected\n");
abort();
}
mfd_fail_open(fd,
O_RDWR | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR);
}
static void mfd_assert_grow(int fd)
{
int r;
r = ftruncate(fd, mfd_def_size * 2);
if (r < 0) {
printf("ftruncate(GROW) failed: %m\n");
abort();
}
mfd_assert_size(fd, mfd_def_size * 2);
r = fallocate(fd,
0,
0,
mfd_def_size * 4);
if (r < 0) {
printf("fallocate(ALLOC) failed: %m\n");
abort();
}
mfd_assert_size(fd, mfd_def_size * 4);
}
static void mfd_fail_grow(int fd)
{
int r;
r = ftruncate(fd, mfd_def_size * 2);
if (r >= 0) {
printf("ftruncate(GROW) didn't fail as expected\n");
abort();
}
r = fallocate(fd,
0,
0,
mfd_def_size * 4);
if (r >= 0) {
printf("fallocate(ALLOC) didn't fail as expected\n");
abort();
}
}
static void mfd_assert_grow_write(int fd)
{
static char *buf;
ssize_t l;
/* hugetlbfs does not support write */
if (hugetlbfs_test)
return;
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
abort();
}
l = pwrite(fd, buf, mfd_def_size * 8, 0);
if (l != (mfd_def_size * 8)) {
printf("pwrite() failed: %m\n");
abort();
}
mfd_assert_size(fd, mfd_def_size * 8);
}
static void mfd_fail_grow_write(int fd)
{
static char *buf;
ssize_t l;
/* hugetlbfs does not support write */
if (hugetlbfs_test)
return;
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
abort();
}
l = pwrite(fd, buf, mfd_def_size * 8, 0);
if (l == (mfd_def_size * 8)) {
printf("pwrite() didn't fail as expected\n");
abort();
}
}
static void mfd_assert_mode(int fd, int mode)
{
struct stat st;
char buf[PATH_MAX];
int nbytes;
fd2name(fd, buf, PATH_MAX);
if (fstat(fd, &st) < 0) {
printf("fstat(%s) failed: %m\n", buf);
abort();
}
if ((st.st_mode & 07777) != mode) {
printf("fstat(%s) wrong file mode 0%04o, but expected 0%04o\n",
buf, (int)st.st_mode & 07777, mode);
abort();
}
}
static void mfd_assert_chmod(int fd, int mode)
{
char buf[PATH_MAX];
int nbytes;
fd2name(fd, buf, PATH_MAX);
if (fchmod(fd, mode) < 0) {
printf("fchmod(%s, 0%04o) failed: %m\n", buf, mode);
abort();
}
mfd_assert_mode(fd, mode);
}
static void mfd_fail_chmod(int fd, int mode)
{
struct stat st;
char buf[PATH_MAX];
int nbytes;
fd2name(fd, buf, PATH_MAX);
if (fstat(fd, &st) < 0) {
printf("fstat(%s) failed: %m\n", buf);
abort();
}
if (fchmod(fd, mode) == 0) {
printf("fchmod(%s, 0%04o) didn't fail as expected\n",
buf, mode);
abort();
}
/* verify that file mode bits did not change */
mfd_assert_mode(fd, st.st_mode & 07777);
}
static int idle_thread_fn(void *arg)
{
sigset_t set;
int sig;
/* dummy waiter; SIGTERM terminates us anyway */
sigemptyset(&set);
sigaddset(&set, SIGTERM);
sigwait(&set, &sig);
return 0;
}
static pid_t spawn_thread(unsigned int flags, int (*fn)(void *), void *arg)
{
uint8_t *stack;
pid_t pid;
stack = malloc(STACK_SIZE);
if (!stack) {
printf("malloc(STACK_SIZE) failed: %m\n");
abort();
}
pid = clone(fn, stack + STACK_SIZE, SIGCHLD | flags, arg);
if (pid < 0) {
printf("clone() failed: %m\n");
abort();
}
return pid;
}
static void join_thread(pid_t pid)
{
int wstatus;
if (waitpid(pid, &wstatus, 0) < 0) {
printf("newpid thread: waitpid() failed: %m\n");
abort();
}
if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != 0) {
printf("newpid thread: exited with non-zero error code %d\n",
WEXITSTATUS(wstatus));
abort();
}
if (WIFSIGNALED(wstatus)) {
printf("newpid thread: killed by signal %d\n",
WTERMSIG(wstatus));
abort();
}
}
static pid_t spawn_idle_thread(unsigned int flags)
{
return spawn_thread(flags, idle_thread_fn, NULL);
}
static void join_idle_thread(pid_t pid)
{
kill(pid, SIGTERM);
waitpid(pid, NULL, 0);
}
/*
* Test memfd_create() syscall
* Verify syscall-argument validation, including name checks, flag validation
* and more.
*/
static void test_create(void)
{
char buf[2048];
int fd;
printf("%s CREATE\n", memfd_str);
/* test NULL name */
mfd_fail_new(NULL, 0);
/* test over-long name (not zero-terminated) */
memset(buf, 0xff, sizeof(buf));
mfd_fail_new(buf, 0);
/* test over-long zero-terminated name */
memset(buf, 0xff, sizeof(buf));
buf[sizeof(buf) - 1] = 0;
mfd_fail_new(buf, 0);
/* verify "" is a valid name */
fd = mfd_assert_new("", 0, 0);
close(fd);
/* verify invalid O_* open flags */
mfd_fail_new("", 0x0100);
mfd_fail_new("", ~MFD_CLOEXEC);
mfd_fail_new("", ~MFD_ALLOW_SEALING);
mfd_fail_new("", ~0);
mfd_fail_new("", 0x80000000U);
/* verify EXEC and NOEXEC_SEAL can't both be set */
mfd_fail_new("", MFD_EXEC | MFD_NOEXEC_SEAL);
/* verify MFD_CLOEXEC is allowed */
fd = mfd_assert_new("", 0, MFD_CLOEXEC);
close(fd);
/* verify MFD_ALLOW_SEALING is allowed */
fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING);
close(fd);
/* verify MFD_ALLOW_SEALING | MFD_CLOEXEC is allowed */
fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING | MFD_CLOEXEC);
close(fd);
}
/*
* Test basic sealing
* A very basic sealing test to see whether setting/retrieving seals works.
*/
static void test_basic(void)
{
int fd;
printf("%s BASIC\n", memfd_str);
fd = mfd_assert_new("kern_memfd_basic",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
/* add basic seals */
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_SHRINK |
F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_SHRINK |
F_SEAL_WRITE);
/* add them again */
mfd_assert_add_seals(fd, F_SEAL_SHRINK |
F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_SHRINK |
F_SEAL_WRITE);
/* add more seals and seal against sealing */
mfd_assert_add_seals(fd, F_SEAL_GROW | F_SEAL_SEAL);
mfd_assert_has_seals(fd, F_SEAL_SHRINK |
F_SEAL_GROW |
F_SEAL_WRITE |
F_SEAL_SEAL);
/* verify that sealing no longer works */
mfd_fail_add_seals(fd, F_SEAL_GROW);
mfd_fail_add_seals(fd, 0);
close(fd);
/* verify sealing does not work without MFD_ALLOW_SEALING */
fd = mfd_assert_new("kern_memfd_basic",
mfd_def_size,
MFD_CLOEXEC);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
mfd_fail_add_seals(fd, F_SEAL_SHRINK |
F_SEAL_GROW |
F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
close(fd);
}
/*
* Test SEAL_WRITE
* Test whether SEAL_WRITE actually prevents modifications.
*/
static void test_seal_write(void)
{
int fd;
printf("%s SEAL-WRITE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_write",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_WRITE);
mfd_assert_read(fd);
mfd_fail_write(fd);
mfd_assert_shrink(fd);
mfd_assert_grow(fd);
mfd_fail_grow_write(fd);
close(fd);
}
/*
* Test SEAL_FUTURE_WRITE
* Test whether SEAL_FUTURE_WRITE actually prevents modifications.
*/
static void test_seal_future_write(void)
{
int fd, fd2;
void *p;
printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_future_write",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
p = mfd_assert_mmap_shared(fd);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
/* read should pass, writes should fail */
mfd_assert_read(fd);
mfd_assert_read_shared(fd);
mfd_fail_write(fd);
fd2 = mfd_assert_reopen_fd(fd);
/* read should pass, writes should still fail */
mfd_assert_read(fd2);
mfd_assert_read_shared(fd2);
mfd_fail_write(fd2);
mfd_assert_fork_private_write(fd);
munmap(p, mfd_def_size);
close(fd2);
close(fd);
}
/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
*/
static void test_seal_shrink(void)
{
int fd;
printf("%s SEAL-SHRINK\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_shrink",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_SHRINK);
mfd_assert_has_seals(fd, F_SEAL_SHRINK);
mfd_assert_read(fd);
mfd_assert_write(fd);
mfd_fail_shrink(fd);
mfd_assert_grow(fd);
mfd_assert_grow_write(fd);
close(fd);
}
/*
* Test SEAL_GROW
* Test whether SEAL_GROW actually prevents growing
*/
static void test_seal_grow(void)
{
int fd;
printf("%s SEAL-GROW\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_grow",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_GROW);
mfd_assert_has_seals(fd, F_SEAL_GROW);
mfd_assert_read(fd);
mfd_assert_write(fd);
mfd_assert_shrink(fd);
mfd_fail_grow(fd);
mfd_fail_grow_write(fd);
close(fd);
}
/*
* Test SEAL_SHRINK | SEAL_GROW
* Test whether SEAL_SHRINK | SEAL_GROW actually prevents resizing
*/
static void test_seal_resize(void)
{
int fd;
printf("%s SEAL-RESIZE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_resize",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_SHRINK | F_SEAL_GROW);
mfd_assert_has_seals(fd, F_SEAL_SHRINK | F_SEAL_GROW);
mfd_assert_read(fd);
mfd_assert_write(fd);
mfd_fail_shrink(fd);
mfd_fail_grow(fd);
mfd_fail_grow_write(fd);
close(fd);
}
/*
* Test SEAL_EXEC
* Test fd is created with exec and allow sealing.
* chmod() cannot change x bits after sealing.
*/
static void test_exec_seal(void)
{
int fd;
printf("%s SEAL-EXEC\n", memfd_str);
printf("%s Apply SEAL_EXEC\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_exec",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_EXEC);
mfd_assert_mode(fd, 0777);
mfd_assert_chmod(fd, 0644);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_EXEC);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_assert_chmod(fd, 0600);
mfd_fail_chmod(fd, 0777);
mfd_fail_chmod(fd, 0670);
mfd_fail_chmod(fd, 0605);
mfd_fail_chmod(fd, 0700);
mfd_fail_chmod(fd, 0100);
mfd_assert_chmod(fd, 0666);
mfd_assert_write(fd);
close(fd);
printf("%s Apply ALL_SEALS\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_exec",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_EXEC);
mfd_assert_mode(fd, 0777);
mfd_assert_chmod(fd, 0700);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_EXEC);
mfd_assert_has_seals(fd, F_WX_SEALS);
mfd_fail_chmod(fd, 0711);
mfd_fail_chmod(fd, 0600);
mfd_fail_write(fd);
close(fd);
}
/*
* Test EXEC_NO_SEAL
* Test fd is created with exec and not allow sealing.
*/
static void test_exec_no_seal(void)
{
int fd;
printf("%s EXEC_NO_SEAL\n", memfd_str);
/* Create with EXEC but without ALLOW_SEALING */
fd = mfd_assert_new("kern_memfd_exec_no_sealing",
mfd_def_size,
MFD_CLOEXEC | MFD_EXEC);
mfd_assert_mode(fd, 0777);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
mfd_assert_chmod(fd, 0666);
close(fd);
}
/*
* Test memfd_create with MFD_NOEXEC flag
*/
static void test_noexec_seal(void)
{
int fd;
printf("%s NOEXEC_SEAL\n", memfd_str);
/* Create with NOEXEC and ALLOW_SEALING */
fd = mfd_assert_new("kern_memfd_noexec",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_NOEXEC_SEAL);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
/* Create with NOEXEC but without ALLOW_SEALING */
fd = mfd_assert_new("kern_memfd_noexec",
mfd_def_size,
MFD_CLOEXEC | MFD_NOEXEC_SEAL);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
}
static void test_sysctl_sysctl0(void)
{
int fd;
sysctl_assert_equal("0");
fd = mfd_assert_new("kern_memfd_sysctl_0_dfl",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0777);
mfd_assert_has_seals(fd, 0);
mfd_assert_chmod(fd, 0644);
close(fd);
}
static void test_sysctl_set_sysctl0(void)
{
sysctl_assert_write("0");
test_sysctl_sysctl0();
}
static void test_sysctl_sysctl1(void)
{
int fd;
sysctl_assert_equal("1");
fd = mfd_assert_new("kern_memfd_sysctl_1_dfl",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
fd = mfd_assert_new("kern_memfd_sysctl_1_exec",
mfd_def_size,
MFD_CLOEXEC | MFD_EXEC | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0777);
mfd_assert_has_seals(fd, 0);
mfd_assert_chmod(fd, 0644);
close(fd);
fd = mfd_assert_new("kern_memfd_sysctl_1_noexec",
mfd_def_size,
MFD_CLOEXEC | MFD_NOEXEC_SEAL | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
}
static void test_sysctl_set_sysctl1(void)
{
sysctl_assert_write("1");
test_sysctl_sysctl1();
}
static void test_sysctl_sysctl2(void)
{
int fd;
sysctl_assert_equal("2");
fd = mfd_assert_new("kern_memfd_sysctl_2_dfl",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
mfd_fail_new("kern_memfd_sysctl_2_exec",
MFD_CLOEXEC | MFD_EXEC | MFD_ALLOW_SEALING);
fd = mfd_assert_new("kern_memfd_sysctl_2_noexec",
mfd_def_size,
MFD_CLOEXEC | MFD_NOEXEC_SEAL | MFD_ALLOW_SEALING);
mfd_assert_mode(fd, 0666);
mfd_assert_has_seals(fd, F_SEAL_EXEC);
mfd_fail_chmod(fd, 0777);
close(fd);
}
static void test_sysctl_set_sysctl2(void)
{
sysctl_assert_write("2");
test_sysctl_sysctl2();
}
static int sysctl_simple_child(void *arg)
{
int fd;
int pid;
printf("%s sysctl 0\n", memfd_str);
test_sysctl_set_sysctl0();
printf("%s sysctl 1\n", memfd_str);
test_sysctl_set_sysctl1();
printf("%s sysctl 0\n", memfd_str);
test_sysctl_set_sysctl0();
printf("%s sysctl 2\n", memfd_str);
test_sysctl_set_sysctl2();
printf("%s sysctl 1\n", memfd_str);
test_sysctl_set_sysctl1();
printf("%s sysctl 0\n", memfd_str);
test_sysctl_set_sysctl0();
return 0;
}
/*
* Test sysctl
* A very basic test to make sure the core sysctl semantics work.
*/
static void test_sysctl_simple(void)
{
int pid = spawn_thread(CLONE_NEWPID, sysctl_simple_child, NULL);
join_thread(pid);
}
static int sysctl_nested(void *arg)
{
void (*fn)(void) = arg;
fn();
return 0;
}
static int sysctl_nested_wait(void *arg)
{
/* Wait for a SIGCONT. */
kill(getpid(), SIGSTOP);
return sysctl_nested(arg);
}
static void test_sysctl_sysctl1_failset(void)
{
sysctl_fail_write("0");
test_sysctl_sysctl1();
}
static void test_sysctl_sysctl2_failset(void)
{
sysctl_fail_write("1");
test_sysctl_sysctl2();
sysctl_fail_write("0");
test_sysctl_sysctl2();
}
static int sysctl_nested_child(void *arg)
{
int fd;
int pid;
printf("%s nested sysctl 0\n", memfd_str);
sysctl_assert_write("0");
/* A further nested pidns works the same. */
pid = spawn_thread(CLONE_NEWPID, sysctl_simple_child, NULL);
join_thread(pid);
printf("%s nested sysctl 1\n", memfd_str);
sysctl_assert_write("1");
/* Child inherits our setting. */
pid = spawn_thread(CLONE_NEWPID, sysctl_nested, test_sysctl_sysctl1);
join_thread(pid);
/* Child cannot raise the setting. */
pid = spawn_thread(CLONE_NEWPID, sysctl_nested,
test_sysctl_sysctl1_failset);
join_thread(pid);
/* Child can lower the setting. */
pid = spawn_thread(CLONE_NEWPID, sysctl_nested,
test_sysctl_set_sysctl2);
join_thread(pid);
/* Child lowering the setting has no effect on our setting. */
test_sysctl_sysctl1();
printf("%s nested sysctl 2\n", memfd_str);
sysctl_assert_write("2");
/* Child inherits our setting. */
pid = spawn_thread(CLONE_NEWPID, sysctl_nested, test_sysctl_sysctl2);
join_thread(pid);
/* Child cannot raise the setting. */
pid = spawn_thread(CLONE_NEWPID, sysctl_nested,
test_sysctl_sysctl2_failset);
join_thread(pid);
/* Verify that the rules are actually inherited after fork. */
printf("%s nested sysctl 0 -> 1 after fork\n", memfd_str);
sysctl_assert_write("0");
pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait,
test_sysctl_sysctl1_failset);
sysctl_assert_write("1");
kill(pid, SIGCONT);
join_thread(pid);
printf("%s nested sysctl 0 -> 2 after fork\n", memfd_str);
sysctl_assert_write("0");
pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait,
test_sysctl_sysctl2_failset);
sysctl_assert_write("2");
kill(pid, SIGCONT);
join_thread(pid);
/*
* Verify that the current effective setting is saved on fork, meaning
* that the parent lowering the sysctl doesn't affect already-forked
* children.
*/
printf("%s nested sysctl 2 -> 1 after fork\n", memfd_str);
sysctl_assert_write("2");
pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait,
test_sysctl_sysctl2);
sysctl_assert_write("1");
kill(pid, SIGCONT);
join_thread(pid);
printf("%s nested sysctl 2 -> 0 after fork\n", memfd_str);
sysctl_assert_write("2");
pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait,
test_sysctl_sysctl2);
sysctl_assert_write("0");
kill(pid, SIGCONT);
join_thread(pid);
printf("%s nested sysctl 1 -> 0 after fork\n", memfd_str);
sysctl_assert_write("1");
pid = spawn_thread(CLONE_NEWPID, sysctl_nested_wait,
test_sysctl_sysctl1);
sysctl_assert_write("0");
kill(pid, SIGCONT);
join_thread(pid);
return 0;
}
/*
* Test sysctl with nested pid namespaces
* Make sure that the sysctl nesting semantics work correctly.
*/
static void test_sysctl_nested(void)
{
int pid = spawn_thread(CLONE_NEWPID, sysctl_nested_child, NULL);
join_thread(pid);
}
/*
* Test sharing via dup()
* Test that seals are shared between dupped FDs and they're all equal.
*/
static void test_share_dup(char *banner, char *b_suffix)
{
int fd, fd2;
printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_dup",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
fd2 = mfd_assert_dup(fd);
mfd_assert_has_seals(fd2, 0);
mfd_assert_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd2, F_SEAL_WRITE);
mfd_assert_add_seals(fd2, F_SEAL_SHRINK);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK);
mfd_assert_has_seals(fd2, F_SEAL_WRITE | F_SEAL_SHRINK);
mfd_assert_add_seals(fd, F_SEAL_SEAL);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_SEAL);
mfd_assert_has_seals(fd2, F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_SEAL);
mfd_fail_add_seals(fd, F_SEAL_GROW);
mfd_fail_add_seals(fd2, F_SEAL_GROW);
mfd_fail_add_seals(fd, F_SEAL_SEAL);
mfd_fail_add_seals(fd2, F_SEAL_SEAL);
close(fd2);
mfd_fail_add_seals(fd, F_SEAL_GROW);
close(fd);
}
/*
* Test sealing with active mmap()s
* Modifying seals is only allowed if no other mmap() refs exist.
*/
static void test_share_mmap(char *banner, char *b_suffix)
{
int fd;
void *p;
printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_mmap",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
/* shared/writable ref prevents sealing WRITE, but allows others */
p = mfd_assert_mmap_shared(fd);
mfd_fail_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, 0);
mfd_assert_add_seals(fd, F_SEAL_SHRINK);
mfd_assert_has_seals(fd, F_SEAL_SHRINK);
munmap(p, mfd_def_size);
/* readable ref allows sealing */
p = mfd_assert_mmap_private(fd);
mfd_assert_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK);
munmap(p, mfd_def_size);
close(fd);
}
/*
* Test sealing with open(/proc/self/fd/%d)
* Via /proc we can get access to a separate file-context for the same memfd.
* This is *not* like dup(), but like a real separate open(). Make sure the
* semantics are as expected and we correctly check for RDONLY / WRONLY / RDWR.
*/
static void test_share_open(char *banner, char *b_suffix)
{
int fd, fd2;
printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_open",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
fd2 = mfd_assert_open(fd, O_RDWR, 0);
mfd_assert_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd2, F_SEAL_WRITE);
mfd_assert_add_seals(fd2, F_SEAL_SHRINK);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK);
mfd_assert_has_seals(fd2, F_SEAL_WRITE | F_SEAL_SHRINK);
close(fd);
fd = mfd_assert_open(fd2, O_RDONLY, 0);
mfd_fail_add_seals(fd, F_SEAL_SEAL);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK);
mfd_assert_has_seals(fd2, F_SEAL_WRITE | F_SEAL_SHRINK);
close(fd2);
fd2 = mfd_assert_open(fd, O_RDWR, 0);
mfd_assert_add_seals(fd2, F_SEAL_SEAL);
mfd_assert_has_seals(fd, F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_SEAL);
mfd_assert_has_seals(fd2, F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_SEAL);
close(fd2);
close(fd);
}
/*
* Test sharing via fork()
* Test whether seal-modifications work as expected with forked childs.
*/
static void test_share_fork(char *banner, char *b_suffix)
{
int fd;
pid_t pid;
printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_fork",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_has_seals(fd, 0);
pid = spawn_idle_thread(0);
mfd_assert_add_seals(fd, F_SEAL_SEAL);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
mfd_fail_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
join_idle_thread(pid);
mfd_fail_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_SEAL);
close(fd);
}
int main(int argc, char **argv)
{
pid_t pid;
if (argc == 2) {
if (!strcmp(argv[1], "hugetlbfs")) {
unsigned long hpage_size = default_huge_page_size();
if (!hpage_size) {
printf("Unable to determine huge page size\n");
abort();
}
hugetlbfs_test = 1;
memfd_str = MEMFD_HUGE_STR;
mfd_def_size = hpage_size * 2;
} else {
printf("Unknown option: %s\n", argv[1]);
abort();
}
}
test_create();
test_basic();
test_exec_seal();
test_exec_no_seal();
test_noexec_seal();
test_seal_write();
test_seal_future_write();
test_seal_shrink();
test_seal_grow();
test_seal_resize();
test_sysctl_simple();
test_sysctl_nested();
test_share_dup("SHARE-DUP", "");
test_share_mmap("SHARE-MMAP", "");
test_share_open("SHARE-OPEN", "");
test_share_fork("SHARE-FORK", "");
/* Run test-suite in a multi-threaded environment with a shared
* file-table. */
pid = spawn_idle_thread(CLONE_FILES | CLONE_FS | CLONE_VM);
test_share_dup("SHARE-DUP", SHARED_FT_STR);
test_share_mmap("SHARE-MMAP", SHARED_FT_STR);
test_share_open("SHARE-OPEN", SHARED_FT_STR);
test_share_fork("SHARE-FORK", SHARED_FT_STR);
join_idle_thread(pid);
printf("memfd: DONE\n");
return 0;
}
| linux-master | tools/testing/selftests/memfd/memfd_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* memfd test file-system
* This file uses FUSE to create a dummy file-system with only one file /memfd.
* This file is read-only and takes 1s per read.
*
* This file-system is used by the memfd test-cases to force the kernel to pin
* pages during reads(). Due to the 1s delay of this file-system, this is a
* nice way to test race-conditions against get_user_pages() in the kernel.
*
* We use direct_io==1 to force the kernel to use direct-IO for this
* file-system.
*/
#define FUSE_USE_VERSION 26
#include <fuse.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
static const char memfd_content[] = "memfd-example-content";
static const char memfd_path[] = "/memfd";
static int memfd_getattr(const char *path, struct stat *st)
{
memset(st, 0, sizeof(*st));
if (!strcmp(path, "/")) {
st->st_mode = S_IFDIR | 0755;
st->st_nlink = 2;
} else if (!strcmp(path, memfd_path)) {
st->st_mode = S_IFREG | 0444;
st->st_nlink = 1;
st->st_size = strlen(memfd_content);
} else {
return -ENOENT;
}
return 0;
}
static int memfd_readdir(const char *path,
void *buf,
fuse_fill_dir_t filler,
off_t offset,
struct fuse_file_info *fi)
{
if (strcmp(path, "/"))
return -ENOENT;
filler(buf, ".", NULL, 0);
filler(buf, "..", NULL, 0);
filler(buf, memfd_path + 1, NULL, 0);
return 0;
}
static int memfd_open(const char *path, struct fuse_file_info *fi)
{
if (strcmp(path, memfd_path))
return -ENOENT;
if ((fi->flags & 3) != O_RDONLY)
return -EACCES;
/* force direct-IO */
fi->direct_io = 1;
return 0;
}
static int memfd_read(const char *path,
char *buf,
size_t size,
off_t offset,
struct fuse_file_info *fi)
{
size_t len;
if (strcmp(path, memfd_path) != 0)
return -ENOENT;
sleep(1);
len = strlen(memfd_content);
if (offset < len) {
if (offset + size > len)
size = len - offset;
memcpy(buf, memfd_content + offset, size);
} else {
size = 0;
}
return size;
}
static struct fuse_operations memfd_ops = {
.getattr = memfd_getattr,
.readdir = memfd_readdir,
.open = memfd_open,
.read = memfd_read,
};
int main(int argc, char *argv[])
{
return fuse_main(argc, argv, &memfd_ops, NULL);
}
| linux-master | tools/testing/selftests/memfd/fuse_mnt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* memfd GUP test-case
* This tests memfd interactions with get_user_pages(). We require the
* fuse_mnt.c program to provide a fake direct-IO FUSE mount-point for us. This
* file-system delays _all_ reads by 1s and forces direct-IO. This means, any
* read() on files in that file-system will pin the receive-buffer pages for at
* least 1s via get_user_pages().
*
* We use this trick to race ADD_SEALS against a write on a memfd object. The
* ADD_SEALS must fail if the memfd pages are still pinned. Note that we use
* the read() syscall with our memory-mapped memfd object as receive buffer to
* force the kernel to write into our memfd object.
*/
#define _GNU_SOURCE
#define __EXPORTED_HEADERS__
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <linux/falloc.h>
#include <fcntl.h>
#include <linux/memfd.h>
#include <linux/types.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
#include "common.h"
#define MFD_DEF_SIZE 8192
#define STACK_SIZE 65536
static size_t mfd_def_size = MFD_DEF_SIZE;
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
int r, fd;
fd = sys_memfd_create(name, flags);
if (fd < 0) {
printf("memfd_create(\"%s\", %u) failed: %m\n",
name, flags);
abort();
}
r = ftruncate(fd, sz);
if (r < 0) {
printf("ftruncate(%llu) failed: %m\n", (unsigned long long)sz);
abort();
}
return fd;
}
static __u64 mfd_assert_get_seals(int fd)
{
long r;
r = fcntl(fd, F_GET_SEALS);
if (r < 0) {
printf("GET_SEALS(%d) failed: %m\n", fd);
abort();
}
return r;
}
static void mfd_assert_has_seals(int fd, __u64 seals)
{
__u64 s;
s = mfd_assert_get_seals(fd);
if (s != seals) {
printf("%llu != %llu = GET_SEALS(%d)\n",
(unsigned long long)seals, (unsigned long long)s, fd);
abort();
}
}
static void mfd_assert_add_seals(int fd, __u64 seals)
{
long r;
__u64 s;
s = mfd_assert_get_seals(fd);
r = fcntl(fd, F_ADD_SEALS, seals);
if (r < 0) {
printf("ADD_SEALS(%d, %llu -> %llu) failed: %m\n",
fd, (unsigned long long)s, (unsigned long long)seals);
abort();
}
}
static int mfd_busy_add_seals(int fd, __u64 seals)
{
long r;
__u64 s;
r = fcntl(fd, F_GET_SEALS);
if (r < 0)
s = 0;
else
s = r;
r = fcntl(fd, F_ADD_SEALS, seals);
if (r < 0 && errno != EBUSY) {
printf("ADD_SEALS(%d, %llu -> %llu) didn't fail as expected with EBUSY: %m\n",
fd, (unsigned long long)s, (unsigned long long)seals);
abort();
}
return r;
}
static void *mfd_assert_mmap_shared(int fd)
{
void *p;
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
return p;
}
static void *mfd_assert_mmap_private(int fd)
{
void *p;
p = mmap(NULL,
mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
return p;
}
static int global_mfd = -1;
static void *global_p = NULL;
static int sealing_thread_fn(void *arg)
{
int sig, r;
/*
* This thread first waits 200ms so any pending operation in the parent
* is correctly started. After that, it tries to seal @global_mfd as
* SEAL_WRITE. This _must_ fail as the parent thread has a read() into
* that memory mapped object still ongoing.
* We then wait one more second and try sealing again. This time it
* must succeed as there shouldn't be anyone else pinning the pages.
*/
/* wait 200ms for FUSE-request to be active */
usleep(200000);
/* unmount mapping before sealing to avoid i_mmap_writable failures */
munmap(global_p, mfd_def_size);
/* Try sealing the global file; expect EBUSY or success. Current
* kernels will never succeed, but in the future, kernels might
* implement page-replacements or other fancy ways to avoid racing
* writes. */
r = mfd_busy_add_seals(global_mfd, F_SEAL_WRITE);
if (r >= 0) {
printf("HURRAY! This kernel fixed GUP races!\n");
} else {
/* wait 1s more so the FUSE-request is done */
sleep(1);
/* try sealing the global file again */
mfd_assert_add_seals(global_mfd, F_SEAL_WRITE);
}
return 0;
}
static pid_t spawn_sealing_thread(void)
{
uint8_t *stack;
pid_t pid;
stack = malloc(STACK_SIZE);
if (!stack) {
printf("malloc(STACK_SIZE) failed: %m\n");
abort();
}
pid = clone(sealing_thread_fn,
stack + STACK_SIZE,
SIGCHLD | CLONE_FILES | CLONE_FS | CLONE_VM,
NULL);
if (pid < 0) {
printf("clone() failed: %m\n");
abort();
}
return pid;
}
static void join_sealing_thread(pid_t pid)
{
waitpid(pid, NULL, 0);
}
int main(int argc, char **argv)
{
char *zero;
int fd, mfd, r;
void *p;
int was_sealed;
pid_t pid;
if (argc < 2) {
printf("error: please pass path to file in fuse_mnt mount-point\n");
abort();
}
if (argc >= 3) {
if (!strcmp(argv[2], "hugetlbfs")) {
unsigned long hpage_size = default_huge_page_size();
if (!hpage_size) {
printf("Unable to determine huge page size\n");
abort();
}
hugetlbfs_test = 1;
mfd_def_size = hpage_size * 2;
} else {
printf("Unknown option: %s\n", argv[2]);
abort();
}
}
zero = calloc(sizeof(*zero), mfd_def_size);
/* open FUSE memfd file for GUP testing */
printf("opening: %s\n", argv[1]);
fd = open(argv[1], O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("cannot open(\"%s\"): %m\n", argv[1]);
abort();
}
/* create new memfd-object */
mfd = mfd_assert_new("kern_memfd_fuse",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
/* mmap memfd-object for writing */
p = mfd_assert_mmap_shared(mfd);
/* pass mfd+mapping to a separate sealing-thread which tries to seal
* the memfd objects with SEAL_WRITE while we write into it */
global_mfd = mfd;
global_p = p;
pid = spawn_sealing_thread();
/* Use read() on the FUSE file to read into our memory-mapped memfd
* object. This races the other thread which tries to seal the
* memfd-object.
* If @fd is on the memfd-fake-FUSE-FS, the read() is delayed by 1s.
* This guarantees that the receive-buffer is pinned for 1s until the
* data is written into it. The racing ADD_SEALS should thus fail as
* the pages are still pinned. */
r = read(fd, p, mfd_def_size);
if (r < 0) {
printf("read() failed: %m\n");
abort();
} else if (!r) {
printf("unexpected EOF on read()\n");
abort();
}
was_sealed = mfd_assert_get_seals(mfd) & F_SEAL_WRITE;
/* Wait for sealing-thread to finish and verify that it
* successfully sealed the file after the second try. */
join_sealing_thread(pid);
mfd_assert_has_seals(mfd, F_SEAL_WRITE);
/* *IF* the memfd-object was sealed at the time our read() returned,
* then the kernel did a page-replacement or canceled the read() (or
* whatever magic it did..). In that case, the memfd object is still
* all zero.
* In case the memfd-object was *not* sealed, the read() was successfull
* and the memfd object must *not* be all zero.
* Note that in real scenarios, there might be a mixture of both, but
* in this test-cases, we have explicit 200ms delays which should be
* enough to avoid any in-flight writes. */
p = mfd_assert_mmap_private(mfd);
if (was_sealed && memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed during read() but data not discarded\n");
abort();
} else if (!was_sealed && !memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed after read() but data discarded\n");
abort();
}
close(mfd);
close(fd);
printf("fuse: DONE\n");
free(zero);
return 0;
}
| linux-master | tools/testing/selftests/memfd/fuse_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*
* These tests are "kernel integrity" tests. They are looking for kernel
* WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
* features. It does not attempt to verify that the system calls are doing what
* they are supposed to do.
*
* The basic philosophy is to run a sequence of calls that will succeed and then
* sweep every failure injection point on that call chain to look for
* interesting things in error handling.
*
* This test is best run with:
* echo 1 > /proc/sys/kernel/panic_on_warn
* If something is actually going wrong.
*/
#include <fcntl.h>
#include <dirent.h>
#define __EXPORTED_HEADERS__
#include <linux/vfio.h>
#include "iommufd_utils.h"
static bool have_fault_injection;
static int writeat(int dfd, const char *fn, const char *val)
{
size_t val_len = strlen(val);
ssize_t res;
int fd;
fd = openat(dfd, fn, O_WRONLY);
if (fd == -1)
return -1;
res = write(fd, val, val_len);
assert(res == val_len);
close(fd);
return 0;
}
static __attribute__((constructor)) void setup_buffer(void)
{
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
BUFFER_SIZE = 2*1024*1024;
buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
}
/*
* This sets up fail_injection in a way that is useful for this test.
* It does not attempt to restore things back to how they were.
*/
static __attribute__((constructor)) void setup_fault_injection(void)
{
DIR *debugfs = opendir("/sys/kernel/debug/");
struct dirent *dent;
if (!debugfs)
return;
/* Allow any allocation call to be fault injected */
if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
return;
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
while ((dent = readdir(debugfs))) {
char fn[300];
if (strncmp(dent->d_name, "fail", 4) != 0)
continue;
/* We are looking for kernel splats, quiet down the log */
snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
writeat(dirfd(debugfs), fn, "0");
}
closedir(debugfs);
have_fault_injection = true;
}
struct fail_nth_state {
int proc_fd;
unsigned int iteration;
};
static void fail_nth_first(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state)
{
char buf[300];
snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
nth_state->proc_fd = open(buf, O_RDWR);
ASSERT_NE(-1, nth_state->proc_fd);
}
static bool fail_nth_next(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state,
int test_result)
{
static const char disable_nth[] = "0";
char buf[300];
/*
* This is just an arbitrary limit based on the current kernel
* situation. Changes in the kernel can dramtically change the number of
* required fault injection sites, so if this hits it doesn't
* necessarily mean a test failure, just that the limit has to be made
* bigger.
*/
ASSERT_GT(400, nth_state->iteration);
if (nth_state->iteration != 0) {
ssize_t res;
ssize_t res2;
buf[0] = 0;
/*
* Annoyingly disabling the nth can also fail. This means
* the test passed without triggering failure
*/
res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
if (res == -1 && errno == EFAULT) {
buf[0] = '1';
buf[1] = '\n';
res = 2;
}
res2 = pwrite(nth_state->proc_fd, disable_nth,
ARRAY_SIZE(disable_nth) - 1, 0);
if (res2 == -1 && errno == EFAULT) {
res2 = pwrite(nth_state->proc_fd, disable_nth,
ARRAY_SIZE(disable_nth) - 1, 0);
buf[0] = '1';
buf[1] = '\n';
}
ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
/* printf(" nth %u result=%d nth=%u\n", nth_state->iteration,
test_result, atoi(buf)); */
fflush(stdout);
ASSERT_LT(1, res);
if (res != 2 || buf[0] != '0' || buf[1] != '\n')
return false;
} else {
/* printf(" nth %u result=%d\n", nth_state->iteration,
test_result); */
}
nth_state->iteration++;
return true;
}
/*
* This is called during the test to start failure injection. It allows the test
* to do some setup that has already been swept and thus reduce the required
* iterations.
*/
void __fail_nth_enable(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state)
{
char buf[300];
size_t len;
if (!nth_state->iteration)
return;
len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
}
#define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
#define TEST_FAIL_NTH(fixture_name, name) \
static int test_nth_##name(struct __test_metadata *_metadata, \
FIXTURE_DATA(fixture_name) *self, \
const FIXTURE_VARIANT(fixture_name) \
*variant, \
struct fail_nth_state *_nth_state); \
TEST_F(fixture_name, name) \
{ \
struct fail_nth_state nth_state = {}; \
int test_result = 0; \
\
if (!have_fault_injection) \
SKIP(return, \
"fault injection is not enabled in the kernel"); \
fail_nth_first(_metadata, &nth_state); \
ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
&nth_state)); \
while (fail_nth_next(_metadata, &nth_state, test_result)) { \
fixture_name##_teardown(_metadata, self, variant); \
fixture_name##_setup(_metadata, self, variant); \
test_result = test_nth_##name(_metadata, self, \
variant, &nth_state); \
}; \
ASSERT_EQ(0, test_result); \
} \
static int test_nth_##name( \
struct __test_metadata __attribute__((unused)) *_metadata, \
FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
*variant, \
struct fail_nth_state *_nth_state)
FIXTURE(basic_fail_nth)
{
int fd;
uint32_t access_id;
};
FIXTURE_SETUP(basic_fail_nth)
{
self->fd = -1;
self->access_id = 0;
}
FIXTURE_TEARDOWN(basic_fail_nth)
{
int rc;
if (self->access_id) {
/* The access FD holds the iommufd open until it closes */
rc = _test_cmd_destroy_access(self->access_id);
assert(rc == 0);
}
teardown_iommufd(self->fd, _metadata);
}
/* Cover ioas.c */
TEST_FAIL_NTH(basic_fail_nth, basic)
{
struct iommu_iova_range ranges[10];
uint32_t ioas_id;
__u64 iova;
fail_nth_enable();
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
{
struct iommu_ioas_iova_ranges ranges_cmd = {
.size = sizeof(ranges_cmd),
.num_iovas = ARRAY_SIZE(ranges),
.ioas_id = ioas_id,
.allowed_iovas = (uintptr_t)ranges,
};
if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
return -1;
}
{
struct iommu_ioas_allow_iovas allow_cmd = {
.size = sizeof(allow_cmd),
.ioas_id = ioas_id,
.num_iovas = 1,
.allowed_iovas = (uintptr_t)ranges,
};
ranges[0].start = 16*1024;
ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
return -1;
}
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
.flags = IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE,
.dst_ioas_id = ioas_id,
.src_ioas_id = ioas_id,
.src_iova = iova,
.length = sizeof(ranges),
};
if (ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd))
return -1;
}
if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
NULL))
return -1;
/* Failure path of no IOVA to unmap */
_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
return 0;
}
/* iopt_area_fill_domains() and iopt_area_fill_domain() */
TEST_FAIL_NTH(basic_fail_nth, map_domain)
{
uint32_t ioas_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
{
uint32_t ioas_id;
__u32 stdev_id2;
__u32 stdev_id;
__u32 hwpt_id2;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id2))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, access_rw)
{
uint64_t tmp_big[4096];
uint32_t ioas_id;
uint16_t tmp[32];
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
fail_nth_enable();
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
return -1;
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_RW,
.id = self->access_id,
.access_rw = { .iova = iova,
.length = sizeof(tmp),
.uptr = (uintptr_t)tmp },
};
// READ
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
MOCK_ACCESS_RW_WRITE;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
}
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_RW,
.id = self->access_id,
.access_rw = { .iova = iova,
.flags = MOCK_ACCESS_RW_SLOW_PATH,
.length = sizeof(tmp_big),
.uptr = (uintptr_t)tmp_big },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
}
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
return 0;
}
/* pages.c access functions */
TEST_FAIL_NTH(basic_fail_nth, access_pin)
{
uint32_t access_pages_id;
uint32_t ioas_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
return -1;
fail_nth_enable();
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.id = self->access_id,
.access_pages = { .iova = iova,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_pages_id = access_cmd.access_pages.out_access_pages_id;
}
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
access_pages_id))
return -1;
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
return 0;
}
/* iopt_pages_fill_xarray() */
TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
{
uint32_t access_pages_id;
uint32_t ioas_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
return -1;
fail_nth_enable();
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.id = self->access_id,
.access_pages = { .iova = iova,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_pages_id = access_cmd.access_pages.out_access_pages_id;
}
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
access_pages_id))
return -1;
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
return 0;
}
/* device.c */
TEST_FAIL_NTH(basic_fail_nth, device)
{
struct iommu_test_hw_info info;
uint32_t ioas_id;
uint32_t ioas_id2;
uint32_t stdev_id;
uint32_t idev_id;
uint32_t hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
return -1;
iova = MOCK_APERTURE_START;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
&idev_id))
return -1;
if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info)))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
return -1;
return 0;
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/iommu/iommufd_fail_nth.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/eventfd.h>
#define __EXPORTED_HEADERS__
#include <linux/vfio.h>
#include "iommufd_utils.h"
static unsigned long HUGEPAGE_SIZE;
#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
static unsigned long get_huge_page_size(void)
{
char buf[80];
int ret;
int fd;
fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
O_RDONLY);
if (fd < 0)
return 2 * 1024 * 1024;
ret = read(fd, buf, sizeof(buf));
close(fd);
if (ret <= 0 || ret == sizeof(buf))
return 2 * 1024 * 1024;
buf[ret] = 0;
return strtoul(buf, NULL, 10);
}
static __attribute__((constructor)) void setup_sizes(void)
{
void *vrc;
int rc;
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
HUGEPAGE_SIZE = get_huge_page_size();
BUFFER_SIZE = PAGE_SIZE * 16;
rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
assert(!rc);
assert(buffer);
assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
assert(vrc == buffer);
}
FIXTURE(iommufd)
{
int fd;
};
FIXTURE_SETUP(iommufd)
{
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
}
FIXTURE_TEARDOWN(iommufd)
{
teardown_iommufd(self->fd, _metadata);
}
TEST_F(iommufd, simple_close)
{
}
TEST_F(iommufd, cmd_fail)
{
struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
/* object id is invalid */
EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
/* Bad pointer */
EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
/* Unknown ioctl */
EXPECT_ERRNO(ENOTTY,
ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
&cmd));
}
TEST_F(iommufd, cmd_length)
{
#define TEST_LENGTH(_struct, _ioctl) \
{ \
struct { \
struct _struct cmd; \
uint8_t extra; \
} cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \
.extra = UINT8_MAX }; \
int old_errno; \
int rc; \
\
EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
cmd.cmd.size = sizeof(struct _struct) + 1; \
EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
cmd.cmd.size = sizeof(struct _struct); \
rc = ioctl(self->fd, _ioctl, &cmd); \
old_errno = errno; \
cmd.cmd.size = sizeof(struct _struct) + 1; \
cmd.extra = 0; \
if (rc) { \
EXPECT_ERRNO(old_errno, \
ioctl(self->fd, _ioctl, &cmd)); \
} else { \
ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
} \
}
TEST_LENGTH(iommu_destroy, IOMMU_DESTROY);
TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO);
TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC);
TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES);
TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS);
TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP);
TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY);
TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP);
TEST_LENGTH(iommu_option, IOMMU_OPTION);
TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS);
#undef TEST_LENGTH
}
TEST_F(iommufd, cmd_ex_fail)
{
struct {
struct iommu_destroy cmd;
__u64 future;
} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
/* object id is invalid and command is longer */
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
/* future area is non-zero */
cmd.future = 1;
EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
/* Original command "works" */
cmd.cmd.size = sizeof(cmd.cmd);
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
/* Short command fails */
cmd.cmd.size = sizeof(cmd.cmd) - 1;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
}
TEST_F(iommufd, global_options)
{
struct iommu_option cmd = {
.size = sizeof(cmd),
.option_id = IOMMU_OPTION_RLIMIT_MODE,
.op = IOMMU_OPTION_OP_GET,
.val64 = 1,
};
cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
ASSERT_EQ(0, cmd.val64);
/* This requires root */
cmd.op = IOMMU_OPTION_OP_SET;
cmd.val64 = 1;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.val64 = 2;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.op = IOMMU_OPTION_OP_GET;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
ASSERT_EQ(1, cmd.val64);
cmd.op = IOMMU_OPTION_OP_SET;
cmd.val64 = 0;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.op = IOMMU_OPTION_OP_GET;
cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.op = IOMMU_OPTION_OP_SET;
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
}
FIXTURE(iommufd_ioas)
{
int fd;
uint32_t ioas_id;
uint32_t stdev_id;
uint32_t hwpt_id;
uint32_t device_id;
uint64_t base_iova;
};
FIXTURE_VARIANT(iommufd_ioas)
{
unsigned int mock_domains;
unsigned int memory_limit;
};
FIXTURE_SETUP(iommufd_ioas)
{
unsigned int i;
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
test_ioctl_ioas_alloc(&self->ioas_id);
if (!variant->memory_limit) {
test_ioctl_set_default_memory_limit();
} else {
test_ioctl_set_temp_memory_limit(variant->memory_limit);
}
for (i = 0; i != variant->mock_domains; i++) {
test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
&self->hwpt_id, &self->device_id);
self->base_iova = MOCK_APERTURE_START;
}
}
FIXTURE_TEARDOWN(iommufd_ioas)
{
test_ioctl_set_default_memory_limit();
teardown_iommufd(self->fd, _metadata);
}
FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
{
};
FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
{
.mock_domains = 1,
};
FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
{
.mock_domains = 2,
};
FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
{
.mock_domains = 1,
.memory_limit = 16,
};
TEST_F(iommufd_ioas, ioas_auto_destroy)
{
}
TEST_F(iommufd_ioas, ioas_destroy)
{
if (self->stdev_id) {
/* IOAS cannot be freed while a device has a HWPT using it */
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
} else {
/* Can allocate and manually free an IOAS table */
test_ioctl_destroy(self->ioas_id);
}
}
TEST_F(iommufd_ioas, hwpt_attach)
{
/* Create a device attached directly to a hwpt */
if (self->stdev_id) {
test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
} else {
test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
}
}
TEST_F(iommufd_ioas, ioas_area_destroy)
{
/* Adding an area does not change ability to destroy */
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
if (self->stdev_id)
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
else
test_ioctl_destroy(self->ioas_id);
}
TEST_F(iommufd_ioas, ioas_area_auto_destroy)
{
int i;
/* Can allocate and automatically free an IOAS table with many areas */
for (i = 0; i != 10; i++) {
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
self->base_iova + i * PAGE_SIZE);
}
}
TEST_F(iommufd_ioas, get_hw_info)
{
struct iommu_test_hw_info buffer_exact;
struct iommu_test_hw_info_buffer_larger {
struct iommu_test_hw_info info;
uint64_t trailing_bytes;
} buffer_larger;
struct iommu_test_hw_info_buffer_smaller {
__u32 flags;
} buffer_smaller;
if (self->device_id) {
/* Provide a zero-size user_buffer */
test_cmd_get_hw_info(self->device_id, NULL, 0);
/* Provide a user_buffer with exact size */
test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
/*
* Provide a user_buffer with size larger than the exact size to check if
* kernel zero the trailing bytes.
*/
test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
/*
* Provide a user_buffer with size smaller than the exact size to check if
* the fields within the size range still gets updated.
*/
test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
} else {
test_err_get_hw_info(ENOENT, self->device_id,
&buffer_exact, sizeof(buffer_exact));
test_err_get_hw_info(ENOENT, self->device_id,
&buffer_larger, sizeof(buffer_larger));
}
}
TEST_F(iommufd_ioas, area)
{
int i;
/* Unmap fails if nothing is mapped */
for (i = 0; i != 10; i++)
test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
/* Unmap works */
for (i = 0; i != 10; i++)
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
self->base_iova + i * PAGE_SIZE);
for (i = 0; i != 10; i++)
test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
PAGE_SIZE);
/* Split fails */
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
self->base_iova + 16 * PAGE_SIZE);
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
PAGE_SIZE);
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
PAGE_SIZE);
/* Over map fails */
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
self->base_iova + 16 * PAGE_SIZE);
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
self->base_iova + 16 * PAGE_SIZE);
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
self->base_iova + 17 * PAGE_SIZE);
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
self->base_iova + 15 * PAGE_SIZE);
test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
self->base_iova + 15 * PAGE_SIZE);
/* unmap all works */
test_ioctl_ioas_unmap(0, UINT64_MAX);
/* Unmap all succeeds on an empty IOAS */
test_ioctl_ioas_unmap(0, UINT64_MAX);
}
TEST_F(iommufd_ioas, unmap_fully_contained_areas)
{
uint64_t unmap_len;
int i;
/* Give no_domain some space to rewind base_iova */
self->base_iova += 4 * PAGE_SIZE;
for (i = 0; i != 4; i++)
test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
self->base_iova + i * 16 * PAGE_SIZE);
/* Unmap not fully contained area doesn't work */
test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
8 * PAGE_SIZE);
test_err_ioctl_ioas_unmap(ENOENT,
self->base_iova + 3 * 16 * PAGE_SIZE +
8 * PAGE_SIZE - 4 * PAGE_SIZE,
8 * PAGE_SIZE);
/* Unmap fully contained areas works */
ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
self->base_iova - 4 * PAGE_SIZE,
3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
4 * PAGE_SIZE,
&unmap_len));
ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
}
TEST_F(iommufd_ioas, area_auto_iova)
{
struct iommu_test_cmd test_cmd = {
.size = sizeof(test_cmd),
.op = IOMMU_TEST_OP_ADD_RESERVED,
.id = self->ioas_id,
.add_reserved = { .start = PAGE_SIZE * 4,
.length = PAGE_SIZE * 100 },
};
struct iommu_iova_range ranges[1] = {};
struct iommu_ioas_allow_iovas allow_cmd = {
.size = sizeof(allow_cmd),
.ioas_id = self->ioas_id,
.num_iovas = 1,
.allowed_iovas = (uintptr_t)ranges,
};
__u64 iovas[10];
int i;
/* Simple 4k pages */
for (i = 0; i != 10; i++)
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
for (i = 0; i != 10; i++)
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
/* Kernel automatically aligns IOVAs properly */
for (i = 0; i != 10; i++) {
size_t length = PAGE_SIZE * (i + 1);
if (self->stdev_id) {
test_ioctl_ioas_map(buffer, length, &iovas[i]);
} else {
test_ioctl_ioas_map((void *)(1UL << 31), length,
&iovas[i]);
}
EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
}
for (i = 0; i != 10; i++)
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
/* Avoids a reserved region */
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
&test_cmd));
for (i = 0; i != 10; i++) {
size_t length = PAGE_SIZE * (i + 1);
test_ioctl_ioas_map(buffer, length, &iovas[i]);
EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
EXPECT_EQ(false,
iovas[i] > test_cmd.add_reserved.start &&
iovas[i] <
test_cmd.add_reserved.start +
test_cmd.add_reserved.length);
}
for (i = 0; i != 10; i++)
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
/* Allowed region intersects with a reserved region */
ranges[0].start = PAGE_SIZE;
ranges[0].last = PAGE_SIZE * 600;
EXPECT_ERRNO(EADDRINUSE,
ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
/* Allocate from an allowed region */
if (self->stdev_id) {
ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
} else {
ranges[0].start = PAGE_SIZE * 200;
ranges[0].last = PAGE_SIZE * 600 - 1;
}
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
for (i = 0; i != 10; i++) {
size_t length = PAGE_SIZE * (i + 1);
test_ioctl_ioas_map(buffer, length, &iovas[i]);
EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
EXPECT_EQ(true, iovas[i] >= ranges[0].start);
EXPECT_EQ(true, iovas[i] <= ranges[0].last);
EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
}
for (i = 0; i != 10; i++)
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
}
TEST_F(iommufd_ioas, area_allowed)
{
struct iommu_test_cmd test_cmd = {
.size = sizeof(test_cmd),
.op = IOMMU_TEST_OP_ADD_RESERVED,
.id = self->ioas_id,
.add_reserved = { .start = PAGE_SIZE * 4,
.length = PAGE_SIZE * 100 },
};
struct iommu_iova_range ranges[1] = {};
struct iommu_ioas_allow_iovas allow_cmd = {
.size = sizeof(allow_cmd),
.ioas_id = self->ioas_id,
.num_iovas = 1,
.allowed_iovas = (uintptr_t)ranges,
};
/* Reserved intersects an allowed */
allow_cmd.num_iovas = 1;
ranges[0].start = self->base_iova;
ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
test_cmd.add_reserved.length = PAGE_SIZE;
EXPECT_ERRNO(EADDRINUSE,
ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
&test_cmd));
allow_cmd.num_iovas = 0;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
/* Allowed intersects a reserved */
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
&test_cmd));
allow_cmd.num_iovas = 1;
ranges[0].start = self->base_iova;
ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
EXPECT_ERRNO(EADDRINUSE,
ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
}
TEST_F(iommufd_ioas, copy_area)
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
.dst_ioas_id = self->ioas_id,
.src_ioas_id = self->ioas_id,
.length = PAGE_SIZE,
};
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
/* Copy inside a single IOAS */
copy_cmd.src_iova = self->base_iova;
copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
/* Copy between IOAS's */
copy_cmd.src_iova = self->base_iova;
copy_cmd.dst_iova = 0;
test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
}
TEST_F(iommufd_ioas, iova_ranges)
{
struct iommu_test_cmd test_cmd = {
.size = sizeof(test_cmd),
.op = IOMMU_TEST_OP_ADD_RESERVED,
.id = self->ioas_id,
.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
};
struct iommu_iova_range *ranges = buffer;
struct iommu_ioas_iova_ranges ranges_cmd = {
.size = sizeof(ranges_cmd),
.ioas_id = self->ioas_id,
.num_iovas = BUFFER_SIZE / sizeof(*ranges),
.allowed_iovas = (uintptr_t)ranges,
};
/* Range can be read */
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
EXPECT_EQ(1, ranges_cmd.num_iovas);
if (!self->stdev_id) {
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(SIZE_MAX, ranges[0].last);
EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
} else {
EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
}
/* Buffer too small */
memset(ranges, 0, BUFFER_SIZE);
ranges_cmd.num_iovas = 0;
EXPECT_ERRNO(EMSGSIZE,
ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
EXPECT_EQ(1, ranges_cmd.num_iovas);
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(0, ranges[0].last);
/* 2 ranges */
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
&test_cmd));
ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
if (!self->stdev_id) {
EXPECT_EQ(2, ranges_cmd.num_iovas);
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
EXPECT_EQ(SIZE_MAX, ranges[1].last);
} else {
EXPECT_EQ(1, ranges_cmd.num_iovas);
EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
}
/* Buffer too small */
memset(ranges, 0, BUFFER_SIZE);
ranges_cmd.num_iovas = 1;
if (!self->stdev_id) {
EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
&ranges_cmd));
EXPECT_EQ(2, ranges_cmd.num_iovas);
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
} else {
ASSERT_EQ(0,
ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
EXPECT_EQ(1, ranges_cmd.num_iovas);
EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
}
EXPECT_EQ(0, ranges[1].start);
EXPECT_EQ(0, ranges[1].last);
}
TEST_F(iommufd_ioas, access_domain_destory)
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.access_pages = { .iova = self->base_iova + PAGE_SIZE,
.length = PAGE_SIZE},
};
size_t buf_size = 2 * HUGEPAGE_SIZE;
uint8_t *buf;
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
test_cmd_create_access(self->ioas_id, &access_cmd.id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
/* Causes a complicated unpin across a huge page boundary */
if (self->stdev_id)
test_ioctl_destroy(self->stdev_id);
test_cmd_destroy_access_pages(
access_cmd.id, access_cmd.access_pages.out_access_pages_id);
test_cmd_destroy_access(access_cmd.id);
ASSERT_EQ(0, munmap(buf, buf_size));
}
TEST_F(iommufd_ioas, access_pin)
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.access_pages = { .iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
struct iommu_test_cmd check_map_cmd = {
.size = sizeof(check_map_cmd),
.op = IOMMU_TEST_OP_MD_CHECK_MAP,
.check_map = { .iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
uint32_t access_pages_id;
unsigned int npages;
test_cmd_create_access(self->ioas_id, &access_cmd.id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
uint32_t mock_stdev_id;
uint32_t mock_hwpt_id;
access_cmd.access_pages.length = npages * PAGE_SIZE;
/* Single map/unmap */
test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
MOCK_APERTURE_START);
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
test_cmd_destroy_access_pages(
access_cmd.id,
access_cmd.access_pages.out_access_pages_id);
/* Double user */
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
access_pages_id = access_cmd.access_pages.out_access_pages_id;
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
test_cmd_destroy_access_pages(
access_cmd.id,
access_cmd.access_pages.out_access_pages_id);
test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
/* Add/remove a domain with a user */
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&mock_hwpt_id, NULL);
check_map_cmd.id = mock_hwpt_id;
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
&check_map_cmd));
test_ioctl_destroy(mock_stdev_id);
test_cmd_destroy_access_pages(
access_cmd.id,
access_cmd.access_pages.out_access_pages_id);
test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
}
test_cmd_destroy_access(access_cmd.id);
}
TEST_F(iommufd_ioas, access_pin_unmap)
{
struct iommu_test_cmd access_pages_cmd = {
.size = sizeof(access_pages_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.access_pages = { .iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_pages_cmd));
/* Trigger the unmap op */
test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
/* kernel removed the item for us */
test_err_destroy_access_pages(
ENOENT, access_pages_cmd.id,
access_pages_cmd.access_pages.out_access_pages_id);
}
static void check_access_rw(struct __test_metadata *_metadata, int fd,
unsigned int access_id, uint64_t iova,
unsigned int def_flags)
{
uint16_t tmp[32];
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_RW,
.id = access_id,
.access_rw = { .uptr = (uintptr_t)tmp },
};
uint16_t *buffer16 = buffer;
unsigned int i;
void *tmp2;
for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
buffer16[i] = rand();
for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
access_cmd.access_rw.iova++) {
for (access_cmd.access_rw.length = 1;
access_cmd.access_rw.length < sizeof(tmp);
access_cmd.access_rw.length++) {
access_cmd.access_rw.flags = def_flags;
ASSERT_EQ(0, ioctl(fd,
_IOMMU_TEST_CMD(
IOMMU_TEST_OP_ACCESS_RW),
&access_cmd));
ASSERT_EQ(0,
memcmp(buffer + (access_cmd.access_rw.iova -
iova),
tmp, access_cmd.access_rw.length));
for (i = 0; i != ARRAY_SIZE(tmp); i++)
tmp[i] = rand();
access_cmd.access_rw.flags = def_flags |
MOCK_ACCESS_RW_WRITE;
ASSERT_EQ(0, ioctl(fd,
_IOMMU_TEST_CMD(
IOMMU_TEST_OP_ACCESS_RW),
&access_cmd));
ASSERT_EQ(0,
memcmp(buffer + (access_cmd.access_rw.iova -
iova),
tmp, access_cmd.access_rw.length));
}
}
/* Multi-page test */
tmp2 = malloc(BUFFER_SIZE);
ASSERT_NE(NULL, tmp2);
access_cmd.access_rw.iova = iova;
access_cmd.access_rw.length = BUFFER_SIZE;
access_cmd.access_rw.flags = def_flags;
access_cmd.access_rw.uptr = (uintptr_t)tmp2;
ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd));
ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
free(tmp2);
}
TEST_F(iommufd_ioas, access_rw)
{
__u32 access_id;
__u64 iova;
test_cmd_create_access(self->ioas_id, &access_id, 0);
test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
check_access_rw(_metadata, self->fd, access_id, iova, 0);
check_access_rw(_metadata, self->fd, access_id, iova,
MOCK_ACCESS_RW_SLOW_PATH);
test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
test_cmd_destroy_access(access_id);
}
TEST_F(iommufd_ioas, access_rw_unaligned)
{
__u32 access_id;
__u64 iova;
test_cmd_create_access(self->ioas_id, &access_id, 0);
/* Unaligned pages */
iova = self->base_iova + MOCK_PAGE_SIZE;
test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
check_access_rw(_metadata, self->fd, access_id, iova, 0);
test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
test_cmd_destroy_access(access_id);
}
TEST_F(iommufd_ioas, fork_gone)
{
__u32 access_id;
pid_t child;
test_cmd_create_access(self->ioas_id, &access_id, 0);
/* Create a mapping with a different mm */
child = fork();
if (!child) {
test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
MOCK_APERTURE_START);
exit(0);
}
ASSERT_NE(-1, child);
ASSERT_EQ(child, waitpid(child, NULL, 0));
if (self->stdev_id) {
/*
* If a domain already existed then everything was pinned within
* the fork, so this copies from one domain to another.
*/
test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
check_access_rw(_metadata, self->fd, access_id,
MOCK_APERTURE_START, 0);
} else {
/*
* Otherwise we need to actually pin pages which can't happen
* since the fork is gone.
*/
test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
}
test_cmd_destroy_access(access_id);
}
TEST_F(iommufd_ioas, fork_present)
{
__u32 access_id;
int pipefds[2];
uint64_t tmp;
pid_t child;
int efd;
test_cmd_create_access(self->ioas_id, &access_id, 0);
ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
efd = eventfd(0, EFD_CLOEXEC);
ASSERT_NE(-1, efd);
/* Create a mapping with a different mm */
child = fork();
if (!child) {
__u64 iova;
uint64_t one = 1;
close(pipefds[1]);
test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
MOCK_APERTURE_START);
if (write(efd, &one, sizeof(one)) != sizeof(one))
exit(100);
if (read(pipefds[0], &iova, 1) != 1)
exit(100);
exit(0);
}
close(pipefds[0]);
ASSERT_NE(-1, child);
ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
/* Read pages from the remote process */
test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
ASSERT_EQ(0, close(pipefds[1]));
ASSERT_EQ(child, waitpid(child, NULL, 0));
test_cmd_destroy_access(access_id);
}
TEST_F(iommufd_ioas, ioas_option_huge_pages)
{
struct iommu_option cmd = {
.size = sizeof(cmd),
.option_id = IOMMU_OPTION_HUGE_PAGES,
.op = IOMMU_OPTION_OP_GET,
.val64 = 3,
.object_id = self->ioas_id,
};
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
ASSERT_EQ(1, cmd.val64);
cmd.op = IOMMU_OPTION_OP_SET;
cmd.val64 = 0;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.op = IOMMU_OPTION_OP_GET;
cmd.val64 = 3;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
ASSERT_EQ(0, cmd.val64);
cmd.op = IOMMU_OPTION_OP_SET;
cmd.val64 = 2;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
cmd.op = IOMMU_OPTION_OP_SET;
cmd.val64 = 1;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
}
TEST_F(iommufd_ioas, ioas_iova_alloc)
{
unsigned int length;
__u64 iova;
for (length = 1; length != PAGE_SIZE * 2; length++) {
if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
} else {
test_ioctl_ioas_map(buffer, length, &iova);
test_ioctl_ioas_unmap(iova, length);
}
}
}
TEST_F(iommufd_ioas, ioas_align_change)
{
struct iommu_option cmd = {
.size = sizeof(cmd),
.option_id = IOMMU_OPTION_HUGE_PAGES,
.op = IOMMU_OPTION_OP_SET,
.object_id = self->ioas_id,
/* 0 means everything must be aligned to PAGE_SIZE */
.val64 = 0,
};
/*
* We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
* and map are present.
*/
if (variant->mock_domains)
return;
/*
* We can upgrade to PAGE_SIZE alignment when things are aligned right
*/
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
/* Misalignment is rejected at map time */
test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
PAGE_SIZE,
MOCK_APERTURE_START + PAGE_SIZE);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
/* Reduce alignment */
cmd.val64 = 1;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
/* Confirm misalignment is rejected during alignment upgrade */
test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
MOCK_APERTURE_START + PAGE_SIZE);
cmd.val64 = 0;
EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
}
TEST_F(iommufd_ioas, copy_sweep)
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
.src_ioas_id = self->ioas_id,
.dst_iova = MOCK_APERTURE_START,
.length = MOCK_PAGE_SIZE,
};
unsigned int dst_ioas_id;
uint64_t last_iova;
uint64_t iova;
test_ioctl_ioas_alloc(&dst_ioas_id);
copy_cmd.dst_ioas_id = dst_ioas_id;
if (variant->mock_domains)
last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
else
last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
MOCK_APERTURE_START);
for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
iova += 511) {
copy_cmd.src_iova = iova;
if (iova < MOCK_APERTURE_START ||
iova + copy_cmd.length - 1 > last_iova) {
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
©_cmd));
} else {
ASSERT_EQ(0,
ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
copy_cmd.length);
}
}
test_ioctl_destroy(dst_ioas_id);
}
FIXTURE(iommufd_mock_domain)
{
int fd;
uint32_t ioas_id;
uint32_t hwpt_id;
uint32_t hwpt_ids[2];
uint32_t stdev_ids[2];
uint32_t idev_ids[2];
int mmap_flags;
size_t mmap_buf_size;
};
FIXTURE_VARIANT(iommufd_mock_domain)
{
unsigned int mock_domains;
bool hugepages;
};
FIXTURE_SETUP(iommufd_mock_domain)
{
unsigned int i;
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
test_ioctl_ioas_alloc(&self->ioas_id);
ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
for (i = 0; i != variant->mock_domains; i++)
test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
&self->hwpt_ids[i], &self->idev_ids[i]);
self->hwpt_id = self->hwpt_ids[0];
self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
self->mmap_buf_size = PAGE_SIZE * 8;
if (variant->hugepages) {
/*
* MAP_POPULATE will cause the kernel to fail mmap if THPs are
* not available.
*/
self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
self->mmap_buf_size = HUGEPAGE_SIZE * 2;
}
}
FIXTURE_TEARDOWN(iommufd_mock_domain)
{
teardown_iommufd(self->fd, _metadata);
}
FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
{
.mock_domains = 1,
.hugepages = false,
};
FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
{
.mock_domains = 2,
.hugepages = false,
};
FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
{
.mock_domains = 1,
.hugepages = true,
};
FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
{
.mock_domains = 2,
.hugepages = true,
};
/* Have the kernel check that the user pages made it to the iommu_domain */
#define check_mock_iova(_ptr, _iova, _length) \
({ \
struct iommu_test_cmd check_map_cmd = { \
.size = sizeof(check_map_cmd), \
.op = IOMMU_TEST_OP_MD_CHECK_MAP, \
.id = self->hwpt_id, \
.check_map = { .iova = _iova, \
.length = _length, \
.uptr = (uintptr_t)(_ptr) }, \
}; \
ASSERT_EQ(0, \
ioctl(self->fd, \
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
&check_map_cmd)); \
if (self->hwpt_ids[1]) { \
check_map_cmd.id = self->hwpt_ids[1]; \
ASSERT_EQ(0, \
ioctl(self->fd, \
_IOMMU_TEST_CMD( \
IOMMU_TEST_OP_MD_CHECK_MAP), \
&check_map_cmd)); \
} \
})
TEST_F(iommufd_mock_domain, basic)
{
size_t buf_size = self->mmap_buf_size;
uint8_t *buf;
__u64 iova;
/* Simple one page map */
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
check_mock_iova(buffer, iova, PAGE_SIZE);
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
/* EFAULT half way through mapping */
ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
/* EFAULT on first page */
ASSERT_EQ(0, munmap(buf, buf_size / 2));
test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
}
TEST_F(iommufd_mock_domain, ro_unshare)
{
uint8_t *buf;
__u64 iova;
int fd;
fd = open("/proc/self/exe", O_RDONLY);
ASSERT_NE(-1, fd);
buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
ASSERT_NE(MAP_FAILED, buf);
close(fd);
/*
* There have been lots of changes to the "unshare" mechanism in
* get_user_pages(), make sure it works right. The write to the page
* after we map it for reading should not change the assigned PFN.
*/
ASSERT_EQ(0,
_test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
&iova, IOMMU_IOAS_MAP_READABLE));
check_mock_iova(buf, iova, PAGE_SIZE);
memset(buf, 1, PAGE_SIZE);
check_mock_iova(buf, iova, PAGE_SIZE);
ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
}
TEST_F(iommufd_mock_domain, all_aligns)
{
size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
MOCK_PAGE_SIZE;
size_t buf_size = self->mmap_buf_size;
unsigned int start;
unsigned int end;
uint8_t *buf;
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
check_refs(buf, buf_size, 0);
/*
* Map every combination of page size and alignment within a big region,
* less for hugepage case as it takes so long to finish.
*/
for (start = 0; start < buf_size; start += test_step) {
if (variant->hugepages)
end = buf_size;
else
end = start + MOCK_PAGE_SIZE;
for (; end < buf_size; end += MOCK_PAGE_SIZE) {
size_t length = end - start;
__u64 iova;
test_ioctl_ioas_map(buf + start, length, &iova);
check_mock_iova(buf + start, iova, length);
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
end / PAGE_SIZE * PAGE_SIZE -
start / PAGE_SIZE * PAGE_SIZE,
1);
test_ioctl_ioas_unmap(iova, length);
}
}
check_refs(buf, buf_size, 0);
ASSERT_EQ(0, munmap(buf, buf_size));
}
TEST_F(iommufd_mock_domain, all_aligns_copy)
{
size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
MOCK_PAGE_SIZE;
size_t buf_size = self->mmap_buf_size;
unsigned int start;
unsigned int end;
uint8_t *buf;
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
check_refs(buf, buf_size, 0);
/*
* Map every combination of page size and alignment within a big region,
* less for hugepage case as it takes so long to finish.
*/
for (start = 0; start < buf_size; start += test_step) {
if (variant->hugepages)
end = buf_size;
else
end = start + MOCK_PAGE_SIZE;
for (; end < buf_size; end += MOCK_PAGE_SIZE) {
size_t length = end - start;
unsigned int old_id;
uint32_t mock_stdev_id;
__u64 iova;
test_ioctl_ioas_map(buf + start, length, &iova);
/* Add and destroy a domain while the area exists */
old_id = self->hwpt_ids[1];
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&self->hwpt_ids[1], NULL);
check_mock_iova(buf + start, iova, length);
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
end / PAGE_SIZE * PAGE_SIZE -
start / PAGE_SIZE * PAGE_SIZE,
1);
test_ioctl_destroy(mock_stdev_id);
self->hwpt_ids[1] = old_id;
test_ioctl_ioas_unmap(iova, length);
}
}
check_refs(buf, buf_size, 0);
ASSERT_EQ(0, munmap(buf, buf_size));
}
TEST_F(iommufd_mock_domain, user_copy)
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.access_pages = { .length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
.dst_ioas_id = self->ioas_id,
.dst_iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
};
struct iommu_ioas_unmap unmap_cmd = {
.size = sizeof(unmap_cmd),
.ioas_id = self->ioas_id,
.iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
};
unsigned int new_ioas_id, ioas_id;
/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
©_cmd.src_iova);
test_cmd_create_access(ioas_id, &access_cmd.id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
access_cmd.access_pages.iova = copy_cmd.src_iova;
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
copy_cmd.src_ioas_id = ioas_id;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
/* Now replace the ioas with a new one */
test_ioctl_ioas_alloc(&new_ioas_id);
test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
©_cmd.src_iova);
test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
/* Destroy the old ioas and cleanup copied mapping */
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
test_ioctl_destroy(ioas_id);
/* Then run the same test again with the new ioas */
access_cmd.access_pages.iova = copy_cmd.src_iova;
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
copy_cmd.src_ioas_id = new_ioas_id;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
test_cmd_destroy_access_pages(
access_cmd.id, access_cmd.access_pages.out_access_pages_id);
test_cmd_destroy_access(access_cmd.id);
test_ioctl_destroy(new_ioas_id);
}
TEST_F(iommufd_mock_domain, replace)
{
uint32_t ioas_id;
test_ioctl_ioas_alloc(&ioas_id);
test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
/*
* Replacing the IOAS causes the prior HWPT to be deallocated, thus we
* should get enoent when we try to use it.
*/
if (variant->mock_domains == 1)
test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
self->hwpt_ids[0]);
test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
if (variant->mock_domains >= 2) {
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[1]);
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[1]);
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[0]);
}
test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
test_ioctl_destroy(ioas_id);
}
TEST_F(iommufd_mock_domain, alloc_hwpt)
{
int i;
for (i = 0; i != variant->mock_domains; i++) {
uint32_t stddev_id;
uint32_t hwpt_id;
test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id);
test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
test_ioctl_destroy(stddev_id);
test_ioctl_destroy(hwpt_id);
}
}
/* VFIO compatibility IOCTLs */
TEST_F(iommufd, simple_ioctls)
{
ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
}
TEST_F(iommufd, unmap_cmd)
{
struct vfio_iommu_type1_dma_unmap unmap_cmd = {
.iova = MOCK_APERTURE_START,
.size = PAGE_SIZE,
};
unmap_cmd.argsz = 1;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
unmap_cmd.argsz = sizeof(unmap_cmd);
unmap_cmd.flags = 1 << 31;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
unmap_cmd.flags = 0;
EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
}
TEST_F(iommufd, map_cmd)
{
struct vfio_iommu_type1_dma_map map_cmd = {
.iova = MOCK_APERTURE_START,
.size = PAGE_SIZE,
.vaddr = (__u64)buffer,
};
map_cmd.argsz = 1;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
map_cmd.argsz = sizeof(map_cmd);
map_cmd.flags = 1 << 31;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
/* Requires a domain to be attached */
map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
}
TEST_F(iommufd, info_cmd)
{
struct vfio_iommu_type1_info info_cmd = {};
/* Invalid argsz */
info_cmd.argsz = 1;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
info_cmd.argsz = sizeof(info_cmd);
EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
}
TEST_F(iommufd, set_iommu_cmd)
{
/* Requires a domain to be attached */
EXPECT_ERRNO(ENODEV,
ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
}
TEST_F(iommufd, vfio_ioas)
{
struct iommu_vfio_ioas vfio_ioas_cmd = {
.size = sizeof(vfio_ioas_cmd),
.op = IOMMU_VFIO_IOAS_GET,
};
__u32 ioas_id;
/* ENODEV if there is no compat ioas */
EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
/* Invalid id for set */
vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
/* Valid id for set*/
test_ioctl_ioas_alloc(&ioas_id);
vfio_ioas_cmd.ioas_id = ioas_id;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
/* Same id comes back from get */
vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
/* Clear works */
vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
}
FIXTURE(vfio_compat_mock_domain)
{
int fd;
uint32_t ioas_id;
};
FIXTURE_VARIANT(vfio_compat_mock_domain)
{
unsigned int version;
};
FIXTURE_SETUP(vfio_compat_mock_domain)
{
struct iommu_vfio_ioas vfio_ioas_cmd = {
.size = sizeof(vfio_ioas_cmd),
.op = IOMMU_VFIO_IOAS_SET,
};
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
/* Create what VFIO would consider a group */
test_ioctl_ioas_alloc(&self->ioas_id);
test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
/* Attach it to the vfio compat */
vfio_ioas_cmd.ioas_id = self->ioas_id;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
}
FIXTURE_TEARDOWN(vfio_compat_mock_domain)
{
teardown_iommufd(self->fd, _metadata);
}
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
{
.version = VFIO_TYPE1v2_IOMMU,
};
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
{
.version = VFIO_TYPE1_IOMMU,
};
TEST_F(vfio_compat_mock_domain, simple_close)
{
}
TEST_F(vfio_compat_mock_domain, option_huge_pages)
{
struct iommu_option cmd = {
.size = sizeof(cmd),
.option_id = IOMMU_OPTION_HUGE_PAGES,
.op = IOMMU_OPTION_OP_GET,
.val64 = 3,
.object_id = self->ioas_id,
};
ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
if (variant->version == VFIO_TYPE1_IOMMU) {
ASSERT_EQ(0, cmd.val64);
} else {
ASSERT_EQ(1, cmd.val64);
}
}
/*
* Execute an ioctl command stored in buffer and check that the result does not
* overflow memory.
*/
static bool is_filled(const void *buf, uint8_t c, size_t len)
{
const uint8_t *cbuf = buf;
for (; len; cbuf++, len--)
if (*cbuf != c)
return false;
return true;
}
#define ioctl_check_buf(fd, cmd) \
({ \
size_t _cmd_len = *(__u32 *)buffer; \
\
memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
BUFFER_SIZE - _cmd_len)); \
})
static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
struct vfio_iommu_type1_info *info_cmd)
{
const struct vfio_info_cap_header *cap;
ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
cap = buffer + info_cmd->cap_offset;
while (true) {
size_t cap_size;
if (cap->next)
cap_size = (buffer + cap->next) - (void *)cap;
else
cap_size = (buffer + info_cmd->argsz) - (void *)cap;
switch (cap->id) {
case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
struct vfio_iommu_type1_info_cap_iova_range *data =
(void *)cap;
ASSERT_EQ(1, data->header.version);
ASSERT_EQ(1, data->nr_iovas);
EXPECT_EQ(MOCK_APERTURE_START,
data->iova_ranges[0].start);
EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
break;
}
case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
struct vfio_iommu_type1_info_dma_avail *data =
(void *)cap;
ASSERT_EQ(1, data->header.version);
ASSERT_EQ(sizeof(*data), cap_size);
break;
}
default:
ASSERT_EQ(false, true);
break;
}
if (!cap->next)
break;
ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
ASSERT_GE(buffer + cap->next, (void *)cap);
cap = buffer + cap->next;
}
}
TEST_F(vfio_compat_mock_domain, get_info)
{
struct vfio_iommu_type1_info *info_cmd = buffer;
unsigned int i;
size_t caplen;
/* Pre-cap ABI */
*info_cmd = (struct vfio_iommu_type1_info){
.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
};
ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
ASSERT_NE(0, info_cmd->iova_pgsizes);
ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
info_cmd->flags);
/* Read the cap chain size */
*info_cmd = (struct vfio_iommu_type1_info){
.argsz = sizeof(*info_cmd),
};
ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
ASSERT_NE(0, info_cmd->iova_pgsizes);
ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
info_cmd->flags);
ASSERT_EQ(0, info_cmd->cap_offset);
ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
/* Read the caps, kernel should never create a corrupted caps */
caplen = info_cmd->argsz;
for (i = sizeof(*info_cmd); i < caplen; i++) {
*info_cmd = (struct vfio_iommu_type1_info){
.argsz = i,
};
ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
info_cmd->flags);
if (!info_cmd->cap_offset)
continue;
check_vfio_info_cap_chain(_metadata, info_cmd);
}
}
static void shuffle_array(unsigned long *array, size_t nelms)
{
unsigned int i;
/* Shuffle */
for (i = 0; i != nelms; i++) {
unsigned long tmp = array[i];
unsigned int other = rand() % (nelms - i);
array[i] = array[other];
array[other] = tmp;
}
}
TEST_F(vfio_compat_mock_domain, map)
{
struct vfio_iommu_type1_dma_map map_cmd = {
.argsz = sizeof(map_cmd),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
.vaddr = (uintptr_t)buffer,
.size = BUFFER_SIZE,
.iova = MOCK_APERTURE_START,
};
struct vfio_iommu_type1_dma_unmap unmap_cmd = {
.argsz = sizeof(unmap_cmd),
.size = BUFFER_SIZE,
.iova = MOCK_APERTURE_START,
};
unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
unsigned int i;
/* Simple map/unmap */
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
/* UNMAP_FLAG_ALL requres 0 iova/size */
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
unmap_cmd.iova = 0;
unmap_cmd.size = 0;
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
/* Small pages */
for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
map_cmd.iova = pages_iova[i] =
MOCK_APERTURE_START + i * PAGE_SIZE;
map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
map_cmd.size = PAGE_SIZE;
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
}
shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
unmap_cmd.flags = 0;
unmap_cmd.size = PAGE_SIZE;
for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
unmap_cmd.iova = pages_iova[i];
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
}
}
TEST_F(vfio_compat_mock_domain, huge_map)
{
size_t buf_size = HUGEPAGE_SIZE * 2;
struct vfio_iommu_type1_dma_map map_cmd = {
.argsz = sizeof(map_cmd),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
.size = buf_size,
.iova = MOCK_APERTURE_START,
};
struct vfio_iommu_type1_dma_unmap unmap_cmd = {
.argsz = sizeof(unmap_cmd),
};
unsigned long pages_iova[16];
unsigned int i;
void *buf;
/* Test huge pages and splitting */
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
map_cmd.vaddr = (uintptr_t)buf;
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
/* type1 mode can cut up larger mappings, type1v2 always fails */
for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
unmap_cmd.iova = pages_iova[i];
unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
if (variant->version == VFIO_TYPE1_IOMMU) {
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
&unmap_cmd));
} else {
EXPECT_ERRNO(ENOENT,
ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
&unmap_cmd));
}
}
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/iommu/iommufd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This is a series of tests that exercises the sigreturn(2) syscall and
* the IRET / SYSRET paths in the kernel.
*
* For now, this focuses on the effects of unusual CS and SS values,
* and it has a bunch of tests to make sure that ESP/RSP is restored
* properly.
*
* The basic idea behind these tests is to raise(SIGUSR1) to create a
* sigcontext frame, plug in the values to be tested, and then return,
* which implicitly invokes sigreturn(2) and programs the user context
* as desired.
*
* For tests for which we expect sigreturn and the subsequent return to
* user mode to succeed, we return to a short trampoline that generates
* SIGTRAP so that the meat of the tests can be ordinary C code in a
* SIGTRAP handler.
*
* The inner workings of each test is documented below.
*
* Do not run on outdated, unpatched kernels at risk of nasty crashes.
*/
#define _GNU_SOURCE
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <asm/ldt.h>
#include <err.h>
#include <setjmp.h>
#include <stddef.h>
#include <stdbool.h>
#include <sys/ptrace.h>
#include <sys/user.h>
/* Pull in AR_xyz defines. */
typedef unsigned int u32;
typedef unsigned short u16;
#include "../../../../arch/x86/include/asm/desc_defs.h"
/*
* Copied from asm/ucontext.h, as asm/ucontext.h conflicts badly with the glibc
* headers.
*/
#ifdef __x86_64__
/*
* UC_SIGCONTEXT_SS will be set when delivering 64-bit or x32 signals on
* kernels that save SS in the sigcontext. All kernels that set
* UC_SIGCONTEXT_SS will correctly restore at least the low 32 bits of esp
* regardless of SS (i.e. they implement espfix).
*
* Kernels that set UC_SIGCONTEXT_SS will also set UC_STRICT_RESTORE_SS
* when delivering a signal that came from 64-bit code.
*
* Sigreturn restores SS as follows:
*
* if (saved SS is valid || UC_STRICT_RESTORE_SS is set ||
* saved CS is not 64-bit)
* new SS = saved SS (will fail IRET and signal if invalid)
* else
* new SS = a flat 32-bit data segment
*/
#define UC_SIGCONTEXT_SS 0x2
#define UC_STRICT_RESTORE_SS 0x4
#endif
/*
* In principle, this test can run on Linux emulation layers (e.g.
* Illumos "LX branded zones"). Solaris-based kernels reserve LDT
* entries 0-5 for their own internal purposes, so start our LDT
* allocations above that reservation. (The tests don't pass on LX
* branded zones, but at least this lets them run.)
*/
#define LDT_OFFSET 6
/* An aligned stack accessible through some of our segments. */
static unsigned char stack16[65536] __attribute__((aligned(4096)));
/*
* An aligned int3 instruction used as a trampoline. Some of the tests
* want to fish out their ss values, so this trampoline copies ss to eax
* before the int3.
*/
asm (".pushsection .text\n\t"
".type int3, @function\n\t"
".align 4096\n\t"
"int3:\n\t"
"mov %ss,%ecx\n\t"
"int3\n\t"
".size int3, . - int3\n\t"
".align 4096, 0xcc\n\t"
".popsection");
extern char int3[4096];
/*
* At startup, we prepapre:
*
* - ldt_nonexistent_sel: An LDT entry that doesn't exist (all-zero
* descriptor or out of bounds).
* - code16_sel: A 16-bit LDT code segment pointing to int3.
* - data16_sel: A 16-bit LDT data segment pointing to stack16.
* - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3.
* - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16.
* - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16.
* - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to
* stack16.
*
* For no particularly good reason, xyz_sel is a selector value with the
* RPL and LDT bits filled in, whereas xyz_idx is just an index into the
* descriptor table. These variables will be zero if their respective
* segments could not be allocated.
*/
static unsigned short ldt_nonexistent_sel;
static unsigned short code16_sel, data16_sel, npcode32_sel, npdata32_sel;
static unsigned short gdt_data16_idx, gdt_npdata32_idx;
static unsigned short GDT3(int idx)
{
return (idx << 3) | 3;
}
static unsigned short LDT3(int idx)
{
return (idx << 3) | 7;
}
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void add_ldt(const struct user_desc *desc, unsigned short *var,
const char *name)
{
if (syscall(SYS_modify_ldt, 1, desc, sizeof(*desc)) == 0) {
*var = LDT3(desc->entry_number);
} else {
printf("[NOTE]\tFailed to create %s segment\n", name);
*var = 0;
}
}
static void setup_ldt(void)
{
if ((unsigned long)stack16 > (1ULL << 32) - sizeof(stack16))
errx(1, "stack16 is too high\n");
if ((unsigned long)int3 > (1ULL << 32) - sizeof(int3))
errx(1, "int3 is too high\n");
ldt_nonexistent_sel = LDT3(LDT_OFFSET + 2);
const struct user_desc code16_desc = {
.entry_number = LDT_OFFSET + 0,
.base_addr = (unsigned long)int3,
.limit = 4095,
.seg_32bit = 0,
.contents = 2, /* Code, not conforming */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
add_ldt(&code16_desc, &code16_sel, "code16");
const struct user_desc data16_desc = {
.entry_number = LDT_OFFSET + 1,
.base_addr = (unsigned long)stack16,
.limit = 0xffff,
.seg_32bit = 0,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
add_ldt(&data16_desc, &data16_sel, "data16");
const struct user_desc npcode32_desc = {
.entry_number = LDT_OFFSET + 3,
.base_addr = (unsigned long)int3,
.limit = 4095,
.seg_32bit = 1,
.contents = 2, /* Code, not conforming */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 1,
.useable = 0
};
add_ldt(&npcode32_desc, &npcode32_sel, "npcode32");
const struct user_desc npdata32_desc = {
.entry_number = LDT_OFFSET + 4,
.base_addr = (unsigned long)stack16,
.limit = 0xffff,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 1,
.useable = 0
};
add_ldt(&npdata32_desc, &npdata32_sel, "npdata32");
struct user_desc gdt_data16_desc = {
.entry_number = -1,
.base_addr = (unsigned long)stack16,
.limit = 0xffff,
.seg_32bit = 0,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
if (syscall(SYS_set_thread_area, &gdt_data16_desc) == 0) {
/*
* This probably indicates vulnerability to CVE-2014-8133.
* Merely getting here isn't definitive, though, and we'll
* diagnose the problem for real later on.
*/
printf("[WARN]\tset_thread_area allocated data16 at index %d\n",
gdt_data16_desc.entry_number);
gdt_data16_idx = gdt_data16_desc.entry_number;
} else {
printf("[OK]\tset_thread_area refused 16-bit data\n");
}
struct user_desc gdt_npdata32_desc = {
.entry_number = -1,
.base_addr = (unsigned long)stack16,
.limit = 0xffff,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 1,
.useable = 0
};
if (syscall(SYS_set_thread_area, &gdt_npdata32_desc) == 0) {
/*
* As a hardening measure, newer kernels don't allow this.
*/
printf("[WARN]\tset_thread_area allocated npdata32 at index %d\n",
gdt_npdata32_desc.entry_number);
gdt_npdata32_idx = gdt_npdata32_desc.entry_number;
} else {
printf("[OK]\tset_thread_area refused 16-bit data\n");
}
}
/* State used by our signal handlers. */
static gregset_t initial_regs, requested_regs, resulting_regs;
/* Instructions for the SIGUSR1 handler. */
static volatile unsigned short sig_cs, sig_ss;
static volatile sig_atomic_t sig_trapped, sig_err, sig_trapno;
#ifdef __x86_64__
static volatile sig_atomic_t sig_corrupt_final_ss;
#endif
/* Abstractions for some 32-bit vs 64-bit differences. */
#ifdef __x86_64__
# define REG_IP REG_RIP
# define REG_SP REG_RSP
# define REG_CX REG_RCX
struct selectors {
unsigned short cs, gs, fs, ss;
};
static unsigned short *ssptr(ucontext_t *ctx)
{
struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS];
return &sels->ss;
}
static unsigned short *csptr(ucontext_t *ctx)
{
struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS];
return &sels->cs;
}
#else
# define REG_IP REG_EIP
# define REG_SP REG_ESP
# define REG_CX REG_ECX
static greg_t *ssptr(ucontext_t *ctx)
{
return &ctx->uc_mcontext.gregs[REG_SS];
}
static greg_t *csptr(ucontext_t *ctx)
{
return &ctx->uc_mcontext.gregs[REG_CS];
}
#endif
/*
* Checks a given selector for its code bitness or returns -1 if it's not
* a usable code segment selector.
*/
int cs_bitness(unsigned short cs)
{
uint32_t valid = 0, ar;
asm ("lar %[cs], %[ar]\n\t"
"jnz 1f\n\t"
"mov $1, %[valid]\n\t"
"1:"
: [ar] "=r" (ar), [valid] "+rm" (valid)
: [cs] "r" (cs));
if (!valid)
return -1;
bool db = (ar & (1 << 22));
bool l = (ar & (1 << 21));
if (!(ar & (1<<11)))
return -1; /* Not code. */
if (l && !db)
return 64;
else if (!l && db)
return 32;
else if (!l && !db)
return 16;
else
return -1; /* Unknown bitness. */
}
/*
* Checks a given selector for its code bitness or returns -1 if it's not
* a usable code segment selector.
*/
bool is_valid_ss(unsigned short cs)
{
uint32_t valid = 0, ar;
asm ("lar %[cs], %[ar]\n\t"
"jnz 1f\n\t"
"mov $1, %[valid]\n\t"
"1:"
: [ar] "=r" (ar), [valid] "+rm" (valid)
: [cs] "r" (cs));
if (!valid)
return false;
if ((ar & AR_TYPE_MASK) != AR_TYPE_RWDATA &&
(ar & AR_TYPE_MASK) != AR_TYPE_RWDATA_EXPDOWN)
return false;
return (ar & AR_P);
}
/* Number of errors in the current test case. */
static volatile sig_atomic_t nerrs;
static void validate_signal_ss(int sig, ucontext_t *ctx)
{
#ifdef __x86_64__
bool was_64bit = (cs_bitness(*csptr(ctx)) == 64);
if (!(ctx->uc_flags & UC_SIGCONTEXT_SS)) {
printf("[FAIL]\tUC_SIGCONTEXT_SS was not set\n");
nerrs++;
/*
* This happens on Linux 4.1. The rest will fail, too, so
* return now to reduce the noise.
*/
return;
}
/* UC_STRICT_RESTORE_SS is set iff we came from 64-bit mode. */
if (!!(ctx->uc_flags & UC_STRICT_RESTORE_SS) != was_64bit) {
printf("[FAIL]\tUC_STRICT_RESTORE_SS was wrong in signal %d\n",
sig);
nerrs++;
}
if (is_valid_ss(*ssptr(ctx))) {
/*
* DOSEMU was written before 64-bit sigcontext had SS, and
* it tries to figure out the signal source SS by looking at
* the physical register. Make sure that keeps working.
*/
unsigned short hw_ss;
asm ("mov %%ss, %0" : "=rm" (hw_ss));
if (hw_ss != *ssptr(ctx)) {
printf("[FAIL]\tHW SS didn't match saved SS\n");
nerrs++;
}
}
#endif
}
/*
* SIGUSR1 handler. Sets CS and SS as requested and points IP to the
* int3 trampoline. Sets SP to a large known value so that we can see
* whether the value round-trips back to user mode correctly.
*/
static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
validate_signal_ss(sig, ctx);
memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
*csptr(ctx) = sig_cs;
*ssptr(ctx) = sig_ss;
ctx->uc_mcontext.gregs[REG_IP] =
sig_cs == code16_sel ? 0 : (unsigned long)&int3;
ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
ctx->uc_mcontext.gregs[REG_CX] = 0;
#ifdef __i386__
/*
* Make sure the kernel doesn't inadvertently use DS or ES-relative
* accesses in a region where user DS or ES is loaded.
*
* Skip this for 64-bit builds because long mode doesn't care about
* DS and ES and skipping it increases test coverage a little bit,
* since 64-bit kernels can still run the 32-bit build.
*/
ctx->uc_mcontext.gregs[REG_DS] = 0;
ctx->uc_mcontext.gregs[REG_ES] = 0;
#endif
memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */
return;
}
/*
* Called after a successful sigreturn (via int3) or from a failed
* sigreturn (directly by kernel). Restores our state so that the
* original raise(SIGUSR1) returns.
*/
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
validate_signal_ss(sig, ctx);
sig_err = ctx->uc_mcontext.gregs[REG_ERR];
sig_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO];
unsigned short ss;
asm ("mov %%ss,%0" : "=r" (ss));
greg_t asm_ss = ctx->uc_mcontext.gregs[REG_CX];
if (asm_ss != sig_ss && sig == SIGTRAP) {
/* Sanity check failure. */
printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
ss, *ssptr(ctx), (unsigned long long)asm_ss);
nerrs++;
}
memcpy(&resulting_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
#ifdef __x86_64__
if (sig_corrupt_final_ss) {
if (ctx->uc_flags & UC_STRICT_RESTORE_SS) {
printf("[FAIL]\tUC_STRICT_RESTORE_SS was set inappropriately\n");
nerrs++;
} else {
/*
* DOSEMU transitions from 32-bit to 64-bit mode by
* adjusting sigcontext, and it requires that this work
* even if the saved SS is bogus.
*/
printf("\tCorrupting SS on return to 64-bit mode\n");
*ssptr(ctx) = 0;
}
}
#endif
sig_trapped = sig;
}
#ifdef __x86_64__
/* Tests recovery if !UC_STRICT_RESTORE_SS */
static void sigusr2(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (!(ctx->uc_flags & UC_STRICT_RESTORE_SS)) {
printf("[FAIL]\traise(2) didn't set UC_STRICT_RESTORE_SS\n");
nerrs++;
return; /* We can't do the rest. */
}
ctx->uc_flags &= ~UC_STRICT_RESTORE_SS;
*ssptr(ctx) = 0;
/* Return. The kernel should recover without sending another signal. */
}
static int test_nonstrict_ss(void)
{
clearhandler(SIGUSR1);
clearhandler(SIGTRAP);
clearhandler(SIGSEGV);
clearhandler(SIGILL);
sethandler(SIGUSR2, sigusr2, 0);
nerrs = 0;
printf("[RUN]\tClear UC_STRICT_RESTORE_SS and corrupt SS\n");
raise(SIGUSR2);
if (!nerrs)
printf("[OK]\tIt worked\n");
return nerrs;
}
#endif
/* Finds a usable code segment of the requested bitness. */
int find_cs(int bitness)
{
unsigned short my_cs;
asm ("mov %%cs,%0" : "=r" (my_cs));
if (cs_bitness(my_cs) == bitness)
return my_cs;
if (cs_bitness(my_cs + (2 << 3)) == bitness)
return my_cs + (2 << 3);
if (my_cs > (2<<3) && cs_bitness(my_cs - (2 << 3)) == bitness)
return my_cs - (2 << 3);
if (cs_bitness(code16_sel) == bitness)
return code16_sel;
printf("[WARN]\tCould not find %d-bit CS\n", bitness);
return -1;
}
static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
{
int cs = find_cs(cs_bits);
if (cs == -1) {
printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n",
cs_bits, use_16bit_ss ? 16 : 32);
return 0;
}
if (force_ss != -1) {
sig_ss = force_ss;
} else {
if (use_16bit_ss) {
if (!data16_sel) {
printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n",
cs_bits);
return 0;
}
sig_ss = data16_sel;
} else {
asm volatile ("mov %%ss,%0" : "=r" (sig_ss));
}
}
sig_cs = cs;
printf("[RUN]\tValid sigreturn: %d-bit CS (%hx), %d-bit SS (%hx%s)\n",
cs_bits, sig_cs, use_16bit_ss ? 16 : 32, sig_ss,
(sig_ss & 4) ? "" : ", GDT");
raise(SIGUSR1);
nerrs = 0;
/*
* Check that each register had an acceptable value when the
* int3 trampoline was invoked.
*/
for (int i = 0; i < NGREG; i++) {
greg_t req = requested_regs[i], res = resulting_regs[i];
if (i == REG_TRAPNO || i == REG_IP)
continue; /* don't care */
if (i == REG_SP) {
/*
* If we were using a 16-bit stack segment, then
* the kernel is a bit stuck: IRET only restores
* the low 16 bits of ESP/RSP if SS is 16-bit.
* The kernel uses a hack to restore bits 31:16,
* but that hack doesn't help with bits 63:32.
* On Intel CPUs, bits 63:32 end up zeroed, and, on
* AMD CPUs, they leak the high bits of the kernel
* espfix64 stack pointer. There's very little that
* the kernel can do about it.
*
* Similarly, if we are returning to a 32-bit context,
* the CPU will often lose the high 32 bits of RSP.
*/
if (res == req)
continue;
if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
printf("[NOTE]\tSP: %llx -> %llx\n",
(unsigned long long)req,
(unsigned long long)res);
continue;
}
printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
(unsigned long long)requested_regs[i],
(unsigned long long)resulting_regs[i]);
nerrs++;
continue;
}
bool ignore_reg = false;
#if __i386__
if (i == REG_UESP)
ignore_reg = true;
#else
if (i == REG_CSGSFS) {
struct selectors *req_sels =
(void *)&requested_regs[REG_CSGSFS];
struct selectors *res_sels =
(void *)&resulting_regs[REG_CSGSFS];
if (req_sels->cs != res_sels->cs) {
printf("[FAIL]\tCS mismatch: requested 0x%hx; got 0x%hx\n",
req_sels->cs, res_sels->cs);
nerrs++;
}
if (req_sels->ss != res_sels->ss) {
printf("[FAIL]\tSS mismatch: requested 0x%hx; got 0x%hx\n",
req_sels->ss, res_sels->ss);
nerrs++;
}
continue;
}
#endif
/* Sanity check on the kernel */
if (i == REG_CX && req != res) {
printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
(unsigned long long)req,
(unsigned long long)res);
nerrs++;
continue;
}
if (req != res && !ignore_reg) {
printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
i, (unsigned long long)req,
(unsigned long long)res);
nerrs++;
}
}
if (nerrs == 0)
printf("[OK]\tall registers okay\n");
return nerrs;
}
static int test_bad_iret(int cs_bits, unsigned short ss, int force_cs)
{
int cs = force_cs == -1 ? find_cs(cs_bits) : force_cs;
if (cs == -1)
return 0;
sig_cs = cs;
sig_ss = ss;
printf("[RUN]\t%d-bit CS (%hx), bogus SS (%hx)\n",
cs_bits, sig_cs, sig_ss);
sig_trapped = 0;
raise(SIGUSR1);
if (sig_trapped) {
char errdesc[32] = "";
if (sig_err) {
const char *src = (sig_err & 1) ? " EXT" : "";
const char *table;
if ((sig_err & 0x6) == 0x0)
table = "GDT";
else if ((sig_err & 0x6) == 0x4)
table = "LDT";
else if ((sig_err & 0x6) == 0x2)
table = "IDT";
else
table = "???";
sprintf(errdesc, "%s%s index %d, ",
table, src, sig_err >> 3);
}
char trapname[32];
if (sig_trapno == 13)
strcpy(trapname, "GP");
else if (sig_trapno == 11)
strcpy(trapname, "NP");
else if (sig_trapno == 12)
strcpy(trapname, "SS");
else if (sig_trapno == 32)
strcpy(trapname, "IRET"); /* X86_TRAP_IRET */
else
sprintf(trapname, "%d", sig_trapno);
printf("[OK]\tGot #%s(0x%lx) (i.e. %s%s)\n",
trapname, (unsigned long)sig_err,
errdesc, strsignal(sig_trapped));
return 0;
} else {
/*
* This also implicitly tests UC_STRICT_RESTORE_SS:
* We check that these signals set UC_STRICT_RESTORE_SS and,
* if UC_STRICT_RESTORE_SS doesn't cause strict behavior,
* then we won't get SIGSEGV.
*/
printf("[FAIL]\tDid not get SIGSEGV\n");
return 1;
}
}
int main()
{
int total_nerrs = 0;
unsigned short my_cs, my_ss;
asm volatile ("mov %%cs,%0" : "=r" (my_cs));
asm volatile ("mov %%ss,%0" : "=r" (my_ss));
setup_ldt();
stack_t stack = {
/* Our sigaltstack scratch space. */
.ss_sp = malloc(sizeof(char) * SIGSTKSZ),
.ss_size = SIGSTKSZ,
};
if (sigaltstack(&stack, NULL) != 0)
err(1, "sigaltstack");
sethandler(SIGUSR1, sigusr1, 0);
sethandler(SIGTRAP, sigtrap, SA_ONSTACK);
/* Easy cases: return to a 32-bit SS in each possible CS bitness. */
total_nerrs += test_valid_sigreturn(64, false, -1);
total_nerrs += test_valid_sigreturn(32, false, -1);
total_nerrs += test_valid_sigreturn(16, false, -1);
/*
* Test easy espfix cases: return to a 16-bit LDT SS in each possible
* CS bitness. NB: with a long mode CS, the SS bitness is irrelevant.
*
* This catches the original missing-espfix-on-64-bit-kernels issue
* as well as CVE-2014-8134.
*/
total_nerrs += test_valid_sigreturn(64, true, -1);
total_nerrs += test_valid_sigreturn(32, true, -1);
total_nerrs += test_valid_sigreturn(16, true, -1);
if (gdt_data16_idx) {
/*
* For performance reasons, Linux skips espfix if SS points
* to the GDT. If we were able to allocate a 16-bit SS in
* the GDT, see if it leaks parts of the kernel stack pointer.
*
* This tests for CVE-2014-8133.
*/
total_nerrs += test_valid_sigreturn(64, true,
GDT3(gdt_data16_idx));
total_nerrs += test_valid_sigreturn(32, true,
GDT3(gdt_data16_idx));
total_nerrs += test_valid_sigreturn(16, true,
GDT3(gdt_data16_idx));
}
#ifdef __x86_64__
/* Nasty ABI case: check SS corruption handling. */
sig_corrupt_final_ss = 1;
total_nerrs += test_valid_sigreturn(32, false, -1);
total_nerrs += test_valid_sigreturn(32, true, -1);
sig_corrupt_final_ss = 0;
#endif
/*
* We're done testing valid sigreturn cases. Now we test states
* for which sigreturn itself will succeed but the subsequent
* entry to user mode will fail.
*
* Depending on the failure mode and the kernel bitness, these
* entry failures can generate SIGSEGV, SIGBUS, or SIGILL.
*/
clearhandler(SIGTRAP);
sethandler(SIGSEGV, sigtrap, SA_ONSTACK);
sethandler(SIGBUS, sigtrap, SA_ONSTACK);
sethandler(SIGILL, sigtrap, SA_ONSTACK); /* 32-bit kernels do this */
/* Easy failures: invalid SS, resulting in #GP(0) */
test_bad_iret(64, ldt_nonexistent_sel, -1);
test_bad_iret(32, ldt_nonexistent_sel, -1);
test_bad_iret(16, ldt_nonexistent_sel, -1);
/* These fail because SS isn't a data segment, resulting in #GP(SS) */
test_bad_iret(64, my_cs, -1);
test_bad_iret(32, my_cs, -1);
test_bad_iret(16, my_cs, -1);
/* Try to return to a not-present code segment, triggering #NP(SS). */
test_bad_iret(32, my_ss, npcode32_sel);
/*
* Try to return to a not-present but otherwise valid data segment.
* This will cause IRET to fail with #SS on the espfix stack. This
* exercises CVE-2014-9322.
*
* Note that, if espfix is enabled, 64-bit Linux will lose track
* of the actual cause of failure and report #GP(0) instead.
* This would be very difficult for Linux to avoid, because
* espfix64 causes IRET failures to be promoted to #DF, so the
* original exception frame is never pushed onto the stack.
*/
test_bad_iret(32, npdata32_sel, -1);
/*
* Try to return to a not-present but otherwise valid data
* segment without invoking espfix. Newer kernels don't allow
* this to happen in the first place. On older kernels, though,
* this can trigger CVE-2014-9322.
*/
if (gdt_npdata32_idx)
test_bad_iret(32, GDT3(gdt_npdata32_idx), -1);
#ifdef __x86_64__
total_nerrs += test_nonstrict_ss();
#endif
free(stack.ss_sp);
return total_nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/x86/sigreturn.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* check_initial_reg_state.c - check that execve sets the correct state
* Copyright (c) 2014-2016 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <stdio.h>
unsigned long ax, bx, cx, dx, si, di, bp, sp, flags;
unsigned long r8, r9, r10, r11, r12, r13, r14, r15;
asm (
".pushsection .text\n\t"
".type real_start, @function\n\t"
".global real_start\n\t"
"real_start:\n\t"
#ifdef __x86_64__
"mov %rax, ax\n\t"
"mov %rbx, bx\n\t"
"mov %rcx, cx\n\t"
"mov %rdx, dx\n\t"
"mov %rsi, si\n\t"
"mov %rdi, di\n\t"
"mov %rbp, bp\n\t"
"mov %rsp, sp\n\t"
"mov %r8, r8\n\t"
"mov %r9, r9\n\t"
"mov %r10, r10\n\t"
"mov %r11, r11\n\t"
"mov %r12, r12\n\t"
"mov %r13, r13\n\t"
"mov %r14, r14\n\t"
"mov %r15, r15\n\t"
"pushfq\n\t"
"popq flags\n\t"
#else
"mov %eax, ax\n\t"
"mov %ebx, bx\n\t"
"mov %ecx, cx\n\t"
"mov %edx, dx\n\t"
"mov %esi, si\n\t"
"mov %edi, di\n\t"
"mov %ebp, bp\n\t"
"mov %esp, sp\n\t"
"pushfl\n\t"
"popl flags\n\t"
#endif
"jmp _start\n\t"
".size real_start, . - real_start\n\t"
".popsection");
int main()
{
int nerrs = 0;
if (sp == 0) {
printf("[FAIL]\tTest was built incorrectly\n");
return 1;
}
if (ax || bx || cx || dx || si || di || bp
#ifdef __x86_64__
|| r8 || r9 || r10 || r11 || r12 || r13 || r14 || r15
#endif
) {
printf("[FAIL]\tAll GPRs except SP should be 0\n");
#define SHOW(x) printf("\t" #x " = 0x%lx\n", x);
SHOW(ax);
SHOW(bx);
SHOW(cx);
SHOW(dx);
SHOW(si);
SHOW(di);
SHOW(bp);
SHOW(sp);
#ifdef __x86_64__
SHOW(r8);
SHOW(r9);
SHOW(r10);
SHOW(r11);
SHOW(r12);
SHOW(r13);
SHOW(r14);
SHOW(r15);
#endif
nerrs++;
} else {
printf("[OK]\tAll GPRs except SP are 0\n");
}
if (flags != 0x202) {
printf("[FAIL]\tFLAGS is 0x%lx, but it should be 0x202\n", flags);
nerrs++;
} else {
printf("[OK]\tFLAGS is 0x202\n");
}
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/x86/check_initial_reg_state.c |
// SPDX-License-Identifier: GPL-2.0
#undef _GNU_SOURCE
#define _GNU_SOURCE 1
#undef __USE_GNU
#define __USE_GNU 1
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <fenv.h>
enum {
CF = 1 << 0,
PF = 1 << 2,
ZF = 1 << 6,
ARITH = CF | PF | ZF,
};
long res_fcomi_pi_1;
long res_fcomi_1_pi;
long res_fcomi_1_1;
long res_fcomi_nan_1;
/* sNaN is s|111 1111 1|1xx xxxx xxxx xxxx xxxx xxxx */
/* qNaN is s|111 1111 1|0xx xxxx xxxx xxxx xxxx xxxx (some x must be nonzero) */
int snan = 0x7fc11111;
int qnan = 0x7f811111;
unsigned short snan1[5];
/* sNaN80 is s|111 1111 1111 1111 |10xx xx...xx (some x must be nonzero) */
unsigned short snan80[5] = { 0x1111, 0x1111, 0x1111, 0x8111, 0x7fff };
int test(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" fld1""\n"
" fldpi""\n"
" fcomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_1_pi""\n"
" push %0""\n"
" popf""\n"
" fldpi""\n"
" fld1""\n"
" fcomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_pi_1""\n"
" push %0""\n"
" popf""\n"
" fld1""\n"
" fld1""\n"
" fcomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_1_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_1_pi & ARITH) != (0)) {
printf("[BAD]\tfcomi_1_pi with flags:%lx\n", flags);
return 1;
}
if ((res_fcomi_pi_1 & ARITH) != (CF)) {
printf("[BAD]\tfcomi_pi_1 with flags:%lx->%lx\n", flags, res_fcomi_pi_1 & ARITH);
return 1;
}
if ((res_fcomi_1_1 & ARITH) != (ZF)) {
printf("[BAD]\tfcomi_1_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != 0) {
printf("[BAD]\tFE_INVALID is set in %s\n", __func__);
return 1;
}
return 0;
}
int test_qnan(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" flds qnan""\n"
" fld1""\n"
" fnclex""\n" // fld of a qnan raised FE_INVALID, clear it
" fcomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_nan_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_nan_1 & ARITH) != (ZF|CF|PF)) {
printf("[BAD]\tfcomi_qnan_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != FE_INVALID) {
printf("[BAD]\tFE_INVALID is not set in %s\n", __func__);
return 1;
}
return 0;
}
int testu_qnan(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" flds qnan""\n"
" fld1""\n"
" fnclex""\n" // fld of a qnan raised FE_INVALID, clear it
" fucomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_nan_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_nan_1 & ARITH) != (ZF|CF|PF)) {
printf("[BAD]\tfcomi_qnan_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != 0) {
printf("[BAD]\tFE_INVALID is set in %s\n", __func__);
return 1;
}
return 0;
}
int testu_snan(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
// " flds snan""\n" // WRONG, this will convert 32-bit fp snan to a *qnan* in 80-bit fp register!
// " fstpt snan1""\n" // if uncommented, it prints "snan1:7fff c111 1100 0000 0000" - c111, not 8111!
// " fnclex""\n" // flds of a snan raised FE_INVALID, clear it
" fldt snan80""\n" // fldt never raise FE_INVALID
" fld1""\n"
" fucomi %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" ffree %%st(1)" "\n"
" pushf""\n"
" pop res_fcomi_nan_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_nan_1 & ARITH) != (ZF|CF|PF)) {
printf("[BAD]\tfcomi_qnan_1 with flags:%lx\n", flags);
return 1;
}
// printf("snan:%x snan1:%04x %04x %04x %04x %04x\n", snan, snan1[4], snan1[3], snan1[2], snan1[1], snan1[0]);
if (fetestexcept(FE_INVALID) != FE_INVALID) {
printf("[BAD]\tFE_INVALID is not set in %s\n", __func__);
return 1;
}
return 0;
}
int testp(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" fld1""\n"
" fldpi""\n"
" fcomip %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" pushf""\n"
" pop res_fcomi_1_pi""\n"
" push %0""\n"
" popf""\n"
" fldpi""\n"
" fld1""\n"
" fcomip %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" pushf""\n"
" pop res_fcomi_pi_1""\n"
" push %0""\n"
" popf""\n"
" fld1""\n"
" fld1""\n"
" fcomip %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" pushf""\n"
" pop res_fcomi_1_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_1_pi & ARITH) != (0)) {
printf("[BAD]\tfcomi_1_pi with flags:%lx\n", flags);
return 1;
}
if ((res_fcomi_pi_1 & ARITH) != (CF)) {
printf("[BAD]\tfcomi_pi_1 with flags:%lx->%lx\n", flags, res_fcomi_pi_1 & ARITH);
return 1;
}
if ((res_fcomi_1_1 & ARITH) != (ZF)) {
printf("[BAD]\tfcomi_1_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != 0) {
printf("[BAD]\tFE_INVALID is set in %s\n", __func__);
return 1;
}
return 0;
}
int testp_qnan(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" flds qnan""\n"
" fld1""\n"
" fnclex""\n" // fld of a qnan raised FE_INVALID, clear it
" fcomip %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" pushf""\n"
" pop res_fcomi_nan_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_nan_1 & ARITH) != (ZF|CF|PF)) {
printf("[BAD]\tfcomi_qnan_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != FE_INVALID) {
printf("[BAD]\tFE_INVALID is not set in %s\n", __func__);
return 1;
}
return 0;
}
int testup_qnan(long flags)
{
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm ("\n"
" push %0""\n"
" popf""\n"
" flds qnan""\n"
" fld1""\n"
" fnclex""\n" // fld of a qnan raised FE_INVALID, clear it
" fucomip %%st(1), %%st" "\n"
" ffree %%st(0)" "\n"
" pushf""\n"
" pop res_fcomi_nan_1""\n"
:
: "r" (flags)
);
if ((res_fcomi_nan_1 & ARITH) != (ZF|CF|PF)) {
printf("[BAD]\tfcomi_qnan_1 with flags:%lx\n", flags);
return 1;
}
if (fetestexcept(FE_INVALID) != 0) {
printf("[BAD]\tFE_INVALID is set in %s\n", __func__);
return 1;
}
return 0;
}
void sighandler(int sig)
{
printf("[FAIL]\tGot signal %d, exiting\n", sig);
exit(1);
}
int main(int argc, char **argv, char **envp)
{
int err = 0;
/* SIGILL triggers on 32-bit kernels w/o fcomi emulation
* when run with "no387 nofxsr". Other signals are caught
* just in case.
*/
signal(SIGILL, sighandler);
signal(SIGFPE, sighandler);
signal(SIGSEGV, sighandler);
printf("[RUN]\tTesting f[u]comi[p] instructions\n");
err |= test(0);
err |= test_qnan(0);
err |= testu_qnan(0);
err |= testu_snan(0);
err |= test(CF|ZF|PF);
err |= test_qnan(CF|ZF|PF);
err |= testu_qnan(CF|ZF|PF);
err |= testu_snan(CF|ZF|PF);
err |= testp(0);
err |= testp_qnan(0);
err |= testup_qnan(0);
err |= testp(CF|ZF|PF);
err |= testp_qnan(CF|ZF|PF);
err |= testup_qnan(CF|ZF|PF);
if (!err)
printf("[OK]\tf[u]comi[p]\n");
else
printf("[FAIL]\tf[u]comi[p] errors: %d\n", err);
return err;
}
| linux-master | tools/testing/selftests/x86/test_FCOMI.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 32-bit test to check vDSO mremap.
*
* Copyright (c) 2016 Dmitry Safonov
* Suggested-by: Andrew Lutomirski
*/
/*
* Can be built statically:
* gcc -Os -Wall -static -m32 test_mremap_vdso.c
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/auxv.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#define PAGE_SIZE 4096
static int try_to_remap(void *vdso_addr, unsigned long size)
{
void *dest_addr, *new_addr;
/* Searching for memory location where to remap */
dest_addr = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (dest_addr == MAP_FAILED) {
printf("[WARN]\tmmap failed (%d): %m\n", errno);
return 0;
}
printf("[NOTE]\tMoving vDSO: [%p, %#lx] -> [%p, %#lx]\n",
vdso_addr, (unsigned long)vdso_addr + size,
dest_addr, (unsigned long)dest_addr + size);
fflush(stdout);
new_addr = mremap(vdso_addr, size, size,
MREMAP_FIXED|MREMAP_MAYMOVE, dest_addr);
if ((unsigned long)new_addr == (unsigned long)-1) {
munmap(dest_addr, size);
if (errno == EINVAL) {
printf("[NOTE]\tvDSO partial move failed, will try with bigger size\n");
return -1; /* Retry with larger */
}
printf("[FAIL]\tmremap failed (%d): %m\n", errno);
return 1;
}
return 0;
}
int main(int argc, char **argv, char **envp)
{
pid_t child;
child = fork();
if (child == -1) {
printf("[WARN]\tfailed to fork (%d): %m\n", errno);
return 1;
}
if (child == 0) {
unsigned long vdso_size = PAGE_SIZE;
unsigned long auxval;
int ret = -1;
auxval = getauxval(AT_SYSINFO_EHDR);
printf("\tAT_SYSINFO_EHDR is %#lx\n", auxval);
if (!auxval || auxval == -ENOENT) {
printf("[WARN]\tgetauxval failed\n");
return 0;
}
/* Simpler than parsing ELF header */
while (ret < 0) {
ret = try_to_remap((void *)auxval, vdso_size);
vdso_size += PAGE_SIZE;
}
#ifdef __i386__
/* Glibc is likely to explode now - exit with raw syscall */
asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
#else /* __x86_64__ */
syscall(SYS_exit, ret);
#endif
} else {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
printf("[FAIL]\tmremap() of the vDSO does not work on this kernel!\n");
return 1;
} else if (WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed with %d\n",
WEXITSTATUS(status));
return 1;
}
printf("[OK]\n");
}
return 0;
}
| linux-master | tools/testing/selftests/x86/test_mremap_vdso.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_restorer.c - tests vDSO-based signal restore
* Copyright (c) 2015 Andrew Lutomirski
*
* This makes sure that sa_restorer == NULL keeps working on 32-bit
* configurations. Modern glibc doesn't use it under any circumstances,
* so it's easy to overlook breakage.
*
* 64-bit userspace has never supported sa_restorer == NULL, so this is
* 32-bit only.
*/
#define _GNU_SOURCE
#include <err.h>
#include <stdio.h>
#include <dlfcn.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <syscall.h>
#include <sys/syscall.h>
/* Open-code this -- the headers are too messy to easily use them. */
struct real_sigaction {
void *handler;
unsigned long flags;
void *restorer;
unsigned int mask[2];
};
static volatile sig_atomic_t handler_called;
static void handler_with_siginfo(int sig, siginfo_t *info, void *ctx_void)
{
handler_called = 1;
}
static void handler_without_siginfo(int sig)
{
handler_called = 1;
}
int main()
{
int nerrs = 0;
struct real_sigaction sa;
void *vdso = dlopen("linux-vdso.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-gate.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso) {
printf("[SKIP]\tFailed to find vDSO. Tests are not expected to work.\n");
return 0;
}
memset(&sa, 0, sizeof(sa));
sa.handler = handler_with_siginfo;
sa.flags = SA_SIGINFO;
sa.restorer = NULL; /* request kernel-provided restorer */
printf("[RUN]\tRaise a signal, SA_SIGINFO, sa.restorer == NULL\n");
if (syscall(SYS_rt_sigaction, SIGUSR1, &sa, NULL, 8) != 0)
err(1, "raw rt_sigaction syscall");
raise(SIGUSR1);
if (handler_called) {
printf("[OK]\tSA_SIGINFO handler returned successfully\n");
} else {
printf("[FAIL]\tSA_SIGINFO handler was not called\n");
nerrs++;
}
printf("[RUN]\tRaise a signal, !SA_SIGINFO, sa.restorer == NULL\n");
sa.flags = 0;
sa.handler = handler_without_siginfo;
if (syscall(SYS_sigaction, SIGUSR1, &sa, 0) != 0)
err(1, "raw sigaction syscall");
handler_called = 0;
raise(SIGUSR1);
if (handler_called) {
printf("[OK]\t!SA_SIGINFO handler returned successfully\n");
} else {
printf("[FAIL]\t!SA_SIGINFO handler was not called\n");
nerrs++;
}
}
| linux-master | tools/testing/selftests/x86/vdso_restorer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sysret_ss_attrs.c - test that syscalls return valid hidden SS attributes
* Copyright (c) 2015 Andrew Lutomirski
*
* On AMD CPUs, SYSRET can return with a valid SS descriptor with with
* the hidden attributes set to an unusable state. Make sure the kernel
* doesn't let this happen.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <pthread.h>
static void *threadproc(void *ctx)
{
/*
* Do our best to cause sleeps on this CPU to exit the kernel and
* re-enter with SS = 0.
*/
while (true)
;
return NULL;
}
#ifdef __x86_64__
extern unsigned long call32_from_64(void *stack, void (*function)(void));
asm (".pushsection .text\n\t"
".code32\n\t"
"test_ss:\n\t"
"pushl $0\n\t"
"popl %eax\n\t"
"ret\n\t"
".code64");
extern void test_ss(void);
#endif
int main()
{
/*
* Start a busy-looping thread on the same CPU we're on.
* For simplicity, just stick everything to CPU 0. This will
* fail in some containers, but that's probably okay.
*/
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
printf("[WARN]\tsched_setaffinity failed\n");
pthread_t thread;
if (pthread_create(&thread, 0, threadproc, 0) != 0)
err(1, "pthread_create");
#ifdef __x86_64__
unsigned char *stack32 = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if (stack32 == MAP_FAILED)
err(1, "mmap");
#endif
printf("[RUN]\tSyscalls followed by SS validation\n");
for (int i = 0; i < 1000; i++) {
/*
* Go to sleep and return using sysret (if we're 64-bit
* or we're 32-bit on AMD on a 64-bit kernel). On AMD CPUs,
* SYSRET doesn't fix up the cached SS descriptor, so the
* kernel needs some kind of workaround to make sure that we
* end the system call with a valid stack segment. This
* can be a confusing failure because the SS *selector*
* is the same regardless.
*/
usleep(2);
#ifdef __x86_64__
/*
* On 32-bit, just doing a syscall through glibc is enough
* to cause a crash if our cached SS descriptor is invalid.
* On 64-bit, it's not, so try extra hard.
*/
call32_from_64(stack32 + 4088, test_ss);
#endif
}
printf("[OK]\tWe survived\n");
#ifdef __x86_64__
munmap(stack32, 4096);
#endif
return 0;
}
| linux-master | tools/testing/selftests/x86/sysret_ss_attrs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* syscall_arg_fault.c - tests faults 32-bit fast syscall stack args
* Copyright (c) 2015 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <err.h>
#include <setjmp.h>
#include <errno.h>
#include "helpers.h"
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static volatile sig_atomic_t sig_traps;
static sigjmp_buf jmpbuf;
static volatile sig_atomic_t n_errs;
#ifdef __x86_64__
#define REG_AX REG_RAX
#define REG_IP REG_RIP
#else
#define REG_AX REG_EAX
#define REG_IP REG_EIP
#endif
static void sigsegv_or_sigbus(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
long ax = (long)ctx->uc_mcontext.gregs[REG_AX];
if (ax != -EFAULT && ax != -ENOSYS) {
printf("[FAIL]\tAX had the wrong value: 0x%lx\n",
(unsigned long)ax);
printf("\tIP = 0x%lx\n", (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
n_errs++;
} else {
printf("[OK]\tSeems okay\n");
}
siglongjmp(jmpbuf, 1);
}
static volatile sig_atomic_t sigtrap_consecutive_syscalls;
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
/*
* KVM has some bugs that can cause us to stop making progress.
* detect them and complain, but don't infinite loop or fail the
* test.
*/
ucontext_t *ctx = (ucontext_t*)ctx_void;
unsigned short *ip = (unsigned short *)ctx->uc_mcontext.gregs[REG_IP];
if (*ip == 0x340f || *ip == 0x050f) {
/* The trap was on SYSCALL or SYSENTER */
sigtrap_consecutive_syscalls++;
if (sigtrap_consecutive_syscalls > 3) {
printf("[WARN]\tGot stuck single-stepping -- you probably have a KVM bug\n");
siglongjmp(jmpbuf, 1);
}
} else {
sigtrap_consecutive_syscalls = 0;
}
}
static void sigill(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
unsigned short *ip = (unsigned short *)ctx->uc_mcontext.gregs[REG_IP];
if (*ip == 0x0b0f) {
/* one of the ud2 instructions faulted */
printf("[OK]\tSYSCALL returned normally\n");
} else {
printf("[SKIP]\tIllegal instruction\n");
}
siglongjmp(jmpbuf, 1);
}
int main()
{
stack_t stack = {
/* Our sigaltstack scratch space. */
.ss_sp = malloc(sizeof(char) * SIGSTKSZ),
.ss_size = SIGSTKSZ,
};
if (sigaltstack(&stack, NULL) != 0)
err(1, "sigaltstack");
sethandler(SIGSEGV, sigsegv_or_sigbus, SA_ONSTACK);
/*
* The actual exception can vary. On Atom CPUs, we get #SS
* instead of #PF when the vDSO fails to access the stack when
* ESP is too close to 2^32, and #SS causes SIGBUS.
*/
sethandler(SIGBUS, sigsegv_or_sigbus, SA_ONSTACK);
sethandler(SIGILL, sigill, SA_ONSTACK);
/*
* Exercise another nasty special case. The 32-bit SYSCALL
* and SYSENTER instructions (even in compat mode) each
* clobber one register. A Linux system call has a syscall
* number and six arguments, and the user stack pointer
* needs to live in some register on return. That means
* that we need eight registers, but SYSCALL and SYSENTER
* only preserve seven registers. As a result, one argument
* ends up on the stack. The stack is user memory, which
* means that the kernel can fail to read it.
*
* The 32-bit fast system calls don't have a defined ABI:
* we're supposed to invoke them through the vDSO. So we'll
* fudge it: we set all regs to invalid pointer values and
* invoke the entry instruction. The return will fail no
* matter what, and we completely lose our program state,
* but we can fix it up with a signal handler.
*/
printf("[RUN]\tSYSENTER with invalid state\n");
if (sigsetjmp(jmpbuf, 1) == 0) {
asm volatile (
"movl $-1, %%eax\n\t"
"movl $-1, %%ebx\n\t"
"movl $-1, %%ecx\n\t"
"movl $-1, %%edx\n\t"
"movl $-1, %%esi\n\t"
"movl $-1, %%edi\n\t"
"movl $-1, %%ebp\n\t"
"movl $-1, %%esp\n\t"
"sysenter"
: : : "memory", "flags");
}
printf("[RUN]\tSYSCALL with invalid state\n");
if (sigsetjmp(jmpbuf, 1) == 0) {
asm volatile (
"movl $-1, %%eax\n\t"
"movl $-1, %%ebx\n\t"
"movl $-1, %%ecx\n\t"
"movl $-1, %%edx\n\t"
"movl $-1, %%esi\n\t"
"movl $-1, %%edi\n\t"
"movl $-1, %%ebp\n\t"
"movl $-1, %%esp\n\t"
"syscall\n\t"
"ud2" /* make sure we recover cleanly */
: : : "memory", "flags");
}
printf("[RUN]\tSYSENTER with TF and invalid state\n");
sethandler(SIGTRAP, sigtrap, SA_ONSTACK);
if (sigsetjmp(jmpbuf, 1) == 0) {
sigtrap_consecutive_syscalls = 0;
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile (
"movl $-1, %%eax\n\t"
"movl $-1, %%ebx\n\t"
"movl $-1, %%ecx\n\t"
"movl $-1, %%edx\n\t"
"movl $-1, %%esi\n\t"
"movl $-1, %%edi\n\t"
"movl $-1, %%ebp\n\t"
"movl $-1, %%esp\n\t"
"sysenter"
: : : "memory", "flags");
}
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
printf("[RUN]\tSYSCALL with TF and invalid state\n");
if (sigsetjmp(jmpbuf, 1) == 0) {
sigtrap_consecutive_syscalls = 0;
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile (
"movl $-1, %%eax\n\t"
"movl $-1, %%ebx\n\t"
"movl $-1, %%ecx\n\t"
"movl $-1, %%edx\n\t"
"movl $-1, %%esi\n\t"
"movl $-1, %%edi\n\t"
"movl $-1, %%ebp\n\t"
"movl $-1, %%esp\n\t"
"syscall\n\t"
"ud2" /* make sure we recover cleanly */
: : : "memory", "flags");
}
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
#ifdef __x86_64__
printf("[RUN]\tSYSENTER with TF, invalid state, and GSBASE < 0\n");
if (sigsetjmp(jmpbuf, 1) == 0) {
sigtrap_consecutive_syscalls = 0;
asm volatile ("wrgsbase %%rax\n\t"
:: "a" (0xffffffffffff0000UL));
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile (
"movl $-1, %%eax\n\t"
"movl $-1, %%ebx\n\t"
"movl $-1, %%ecx\n\t"
"movl $-1, %%edx\n\t"
"movl $-1, %%esi\n\t"
"movl $-1, %%edi\n\t"
"movl $-1, %%ebp\n\t"
"movl $-1, %%esp\n\t"
"sysenter"
: : : "memory", "flags");
}
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
#endif
free(stack.ss_sp);
return 0;
}
| linux-master | tools/testing/selftests/x86/syscall_arg_fault.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <dlfcn.h>
#include <string.h>
#include <inttypes.h>
#include <signal.h>
#include <sys/ucontext.h>
#include <errno.h>
#include <err.h>
#include <sched.h>
#include <stdbool.h>
#include <setjmp.h>
#include <sys/uio.h>
#include "helpers.h"
#ifdef __x86_64__
# define VSYS(x) (x)
#else
# define VSYS(x) 0
#endif
#ifndef SYS_getcpu
# ifdef __x86_64__
# define SYS_getcpu 309
# else
# define SYS_getcpu 318
# endif
#endif
/* max length of lines in /proc/self/maps - anything longer is skipped here */
#define MAPS_LINE_LEN 128
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
/* vsyscalls and vDSO */
bool vsyscall_map_r = false, vsyscall_map_x = false;
typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
const gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
gtod_t vdso_gtod;
typedef int (*vgettime_t)(clockid_t, struct timespec *);
vgettime_t vdso_gettime;
typedef long (*time_func_t)(time_t *t);
const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
time_func_t vdso_time;
typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
getcpu_t vdso_getcpu;
static void init_vdso(void)
{
void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso) {
printf("[WARN]\tfailed to find vDSO\n");
return;
}
vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
if (!vdso_gtod)
printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
if (!vdso_gettime)
printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
if (!vdso_time)
printf("[WARN]\tfailed to find time in vDSO\n");
vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
if (!vdso_getcpu)
printf("[WARN]\tfailed to find getcpu in vDSO\n");
}
static int init_vsys(void)
{
#ifdef __x86_64__
int nerrs = 0;
FILE *maps;
char line[MAPS_LINE_LEN];
bool found = false;
maps = fopen("/proc/self/maps", "r");
if (!maps) {
printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
vsyscall_map_r = true;
return 0;
}
while (fgets(line, MAPS_LINE_LEN, maps)) {
char r, x;
void *start, *end;
char name[MAPS_LINE_LEN];
/* sscanf() is safe here as strlen(name) >= strlen(line) */
if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
&start, &end, &r, &x, name) != 5)
continue;
if (strcmp(name, "[vsyscall]"))
continue;
printf("\tvsyscall map: %s", line);
if (start != (void *)0xffffffffff600000 ||
end != (void *)0xffffffffff601000) {
printf("[FAIL]\taddress range is nonsense\n");
nerrs++;
}
printf("\tvsyscall permissions are %c-%c\n", r, x);
vsyscall_map_r = (r == 'r');
vsyscall_map_x = (x == 'x');
found = true;
break;
}
fclose(maps);
if (!found) {
printf("\tno vsyscall map in /proc/self/maps\n");
vsyscall_map_r = false;
vsyscall_map_x = false;
}
return nerrs;
#else
return 0;
#endif
}
/* syscalls */
static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
{
return syscall(SYS_gettimeofday, tv, tz);
}
static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
{
return syscall(SYS_clock_gettime, id, ts);
}
static inline long sys_time(time_t *t)
{
return syscall(SYS_time, t);
}
static inline long sys_getcpu(unsigned * cpu, unsigned * node,
void* cache)
{
return syscall(SYS_getcpu, cpu, node, cache);
}
static jmp_buf jmpbuf;
static volatile unsigned long segv_err;
static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
segv_err = ctx->uc_mcontext.gregs[REG_ERR];
siglongjmp(jmpbuf, 1);
}
static double tv_diff(const struct timeval *a, const struct timeval *b)
{
return (double)(a->tv_sec - b->tv_sec) +
(double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
}
static int check_gtod(const struct timeval *tv_sys1,
const struct timeval *tv_sys2,
const struct timezone *tz_sys,
const char *which,
const struct timeval *tv_other,
const struct timezone *tz_other)
{
int nerrs = 0;
double d1, d2;
if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
printf("[FAIL] %s tz mismatch\n", which);
nerrs++;
}
d1 = tv_diff(tv_other, tv_sys1);
d2 = tv_diff(tv_sys2, tv_other);
printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
if (d1 < 0 || d2 < 0) {
printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
nerrs++;
} else {
printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
}
return nerrs;
}
static int test_gtod(void)
{
struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
struct timezone tz_sys, tz_vdso, tz_vsys;
long ret_vdso = -1;
long ret_vsys = -1;
int nerrs = 0;
printf("[RUN]\ttest gettimeofday()\n");
if (sys_gtod(&tv_sys1, &tz_sys) != 0)
err(1, "syscall gettimeofday");
if (vdso_gtod)
ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
if (vsyscall_map_x)
ret_vsys = vgtod(&tv_vsys, &tz_vsys);
if (sys_gtod(&tv_sys2, &tz_sys) != 0)
err(1, "syscall gettimeofday");
if (vdso_gtod) {
if (ret_vdso == 0) {
nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
} else {
printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
nerrs++;
}
}
if (vsyscall_map_x) {
if (ret_vsys == 0) {
nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
} else {
printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
nerrs++;
}
}
return nerrs;
}
static int test_time(void) {
int nerrs = 0;
printf("[RUN]\ttest time()\n");
long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
t_sys1 = sys_time(&t2_sys1);
if (vdso_time)
t_vdso = vdso_time(&t2_vdso);
if (vsyscall_map_x)
t_vsys = vtime(&t2_vsys);
t_sys2 = sys_time(&t2_sys2);
if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
nerrs++;
return nerrs;
}
if (vdso_time) {
if (t_vdso < 0 || t_vdso != t2_vdso) {
printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
nerrs++;
} else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
nerrs++;
} else {
printf("[OK]\tvDSO time() is okay\n");
}
}
if (vsyscall_map_x) {
if (t_vsys < 0 || t_vsys != t2_vsys) {
printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
nerrs++;
} else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
nerrs++;
} else {
printf("[OK]\tvsyscall time() is okay\n");
}
}
return nerrs;
}
static int test_getcpu(int cpu)
{
int nerrs = 0;
long ret_sys, ret_vdso = -1, ret_vsys = -1;
printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
printf("[SKIP]\tfailed to force CPU %d\n", cpu);
return nerrs;
}
unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
unsigned node = 0;
bool have_node = false;
ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
if (vdso_getcpu)
ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
if (vsyscall_map_x)
ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
if (ret_sys == 0) {
if (cpu_sys != cpu) {
printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
nerrs++;
}
have_node = true;
node = node_sys;
}
if (vdso_getcpu) {
if (ret_vdso) {
printf("[FAIL]\tvDSO getcpu() failed\n");
nerrs++;
} else {
if (!have_node) {
have_node = true;
node = node_vdso;
}
if (cpu_vdso != cpu) {
printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
nerrs++;
} else {
printf("[OK]\tvDSO reported correct CPU\n");
}
if (node_vdso != node) {
printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
nerrs++;
} else {
printf("[OK]\tvDSO reported correct node\n");
}
}
}
if (vsyscall_map_x) {
if (ret_vsys) {
printf("[FAIL]\tvsyscall getcpu() failed\n");
nerrs++;
} else {
if (!have_node) {
have_node = true;
node = node_vsys;
}
if (cpu_vsys != cpu) {
printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
nerrs++;
} else {
printf("[OK]\tvsyscall reported correct CPU\n");
}
if (node_vsys != node) {
printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
nerrs++;
} else {
printf("[OK]\tvsyscall reported correct node\n");
}
}
}
return nerrs;
}
static int test_vsys_r(void)
{
#ifdef __x86_64__
printf("[RUN]\tChecking read access to the vsyscall page\n");
bool can_read;
if (sigsetjmp(jmpbuf, 1) == 0) {
*(volatile int *)0xffffffffff600000;
can_read = true;
} else {
can_read = false;
}
if (can_read && !vsyscall_map_r) {
printf("[FAIL]\tWe have read access, but we shouldn't\n");
return 1;
} else if (!can_read && vsyscall_map_r) {
printf("[FAIL]\tWe don't have read access, but we should\n");
return 1;
} else if (can_read) {
printf("[OK]\tWe have read access\n");
} else {
printf("[OK]\tWe do not have read access: #PF(0x%lx)\n",
segv_err);
}
#endif
return 0;
}
static int test_vsys_x(void)
{
#ifdef __x86_64__
if (vsyscall_map_x) {
/* We already tested this adequately. */
return 0;
}
printf("[RUN]\tMake sure that vsyscalls really page fault\n");
bool can_exec;
if (sigsetjmp(jmpbuf, 1) == 0) {
vgtod(NULL, NULL);
can_exec = true;
} else {
can_exec = false;
}
if (can_exec) {
printf("[FAIL]\tExecuting the vsyscall did not page fault\n");
return 1;
} else if (segv_err & (1 << 4)) { /* INSTR */
printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
segv_err);
} else {
printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
segv_err);
return 1;
}
#endif
return 0;
}
/*
* Debuggers expect ptrace() to be able to peek at the vsyscall page.
* Use process_vm_readv() as a proxy for ptrace() to test this. We
* want it to work in the vsyscall=emulate case and to fail in the
* vsyscall=xonly case.
*
* It's worth noting that this ABI is a bit nutty. write(2) can't
* read from the vsyscall page on any kernel version or mode. The
* fact that ptrace() ever worked was a nice courtesy of old kernels,
* but the code to support it is fairly gross.
*/
static int test_process_vm_readv(void)
{
#ifdef __x86_64__
char buf[4096];
struct iovec local, remote;
int ret;
printf("[RUN]\tprocess_vm_readv() from vsyscall page\n");
local.iov_base = buf;
local.iov_len = 4096;
remote.iov_base = (void *)0xffffffffff600000;
remote.iov_len = 4096;
ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
if (ret != 4096) {
/*
* We expect process_vm_readv() to work if and only if the
* vsyscall page is readable.
*/
printf("[%s]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", vsyscall_map_r ? "FAIL" : "OK", ret, errno);
return vsyscall_map_r ? 1 : 0;
}
if (vsyscall_map_r) {
if (!memcmp(buf, remote.iov_base, sizeof(buf))) {
printf("[OK]\tIt worked and read correct data\n");
} else {
printf("[FAIL]\tIt worked but returned incorrect data\n");
return 1;
}
} else {
printf("[FAIL]\tprocess_rm_readv() succeeded, but it should have failed in this configuration\n");
return 1;
}
#endif
return 0;
}
#ifdef __x86_64__
static volatile sig_atomic_t num_vsyscall_traps;
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
num_vsyscall_traps++;
}
static int test_emulation(void)
{
time_t tmp;
bool is_native;
if (!vsyscall_map_x)
return 0;
printf("[RUN]\tchecking that vsyscalls are emulated\n");
sethandler(SIGTRAP, sigtrap, 0);
set_eflags(get_eflags() | X86_EFLAGS_TF);
vtime(&tmp);
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
/*
* If vsyscalls are emulated, we expect a single trap in the
* vsyscall page -- the call instruction will trap with RIP
* pointing to the entry point before emulation takes over.
* In native mode, we expect two traps, since whatever code
* the vsyscall page contains will be more than just a ret
* instruction.
*/
is_native = (num_vsyscall_traps > 1);
printf("[%s]\tvsyscalls are %s (%d instructions in vsyscall page)\n",
(is_native ? "FAIL" : "OK"),
(is_native ? "native" : "emulated"),
(int)num_vsyscall_traps);
return is_native;
}
#endif
int main(int argc, char **argv)
{
int nerrs = 0;
init_vdso();
nerrs += init_vsys();
nerrs += test_gtod();
nerrs += test_time();
nerrs += test_getcpu(0);
nerrs += test_getcpu(1);
sethandler(SIGSEGV, sigsegv, 0);
nerrs += test_vsys_r();
nerrs += test_vsys_x();
nerrs += test_process_vm_readv();
#ifdef __x86_64__
nerrs += test_emulation();
#endif
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/x86/test_vsyscall.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* fsgsbase.c, an fsgsbase test
* Copyright (c) 2014-2016 Andy Lutomirski
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <err.h>
#include <sys/user.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include <signal.h>
#include <limits.h>
#include <sys/ucontext.h>
#include <sched.h>
#include <linux/futex.h>
#include <pthread.h>
#include <asm/ldt.h>
#include <sys/mman.h>
#include <stddef.h>
#include <sys/ptrace.h>
#include <sys/wait.h>
#include <setjmp.h>
#ifndef __x86_64__
# error This test is 64-bit only
#endif
static volatile sig_atomic_t want_segv;
static volatile unsigned long segv_addr;
static unsigned short *shared_scratch;
static int nerrs;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (!want_segv) {
clearhandler(SIGSEGV);
return; /* Crash cleanly. */
}
want_segv = false;
segv_addr = (unsigned long)si->si_addr;
ctx->uc_mcontext.gregs[REG_RIP] += 4; /* Skip the faulting mov */
}
static jmp_buf jmpbuf;
static void sigill(int sig, siginfo_t *si, void *ctx_void)
{
siglongjmp(jmpbuf, 1);
}
static bool have_fsgsbase;
static inline unsigned long rdgsbase(void)
{
unsigned long gsbase;
asm volatile("rdgsbase %0" : "=r" (gsbase) :: "memory");
return gsbase;
}
static inline unsigned long rdfsbase(void)
{
unsigned long fsbase;
asm volatile("rdfsbase %0" : "=r" (fsbase) :: "memory");
return fsbase;
}
static inline void wrgsbase(unsigned long gsbase)
{
asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory");
}
static inline void wrfsbase(unsigned long fsbase)
{
asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory");
}
enum which_base { FS, GS };
static unsigned long read_base(enum which_base which)
{
unsigned long offset;
/*
* Unless we have FSGSBASE, there's no direct way to do this from
* user mode. We can get at it indirectly using signals, though.
*/
want_segv = true;
offset = 0;
if (which == FS) {
/* Use a constant-length instruction here. */
asm volatile ("mov %%fs:(%%rcx), %%rax" : : "c" (offset) : "rax");
} else {
asm volatile ("mov %%gs:(%%rcx), %%rax" : : "c" (offset) : "rax");
}
if (!want_segv)
return segv_addr + offset;
/*
* If that didn't segfault, try the other end of the address space.
* Unless we get really unlucky and run into the vsyscall page, this
* is guaranteed to segfault.
*/
offset = (ULONG_MAX >> 1) + 1;
if (which == FS) {
asm volatile ("mov %%fs:(%%rcx), %%rax"
: : "c" (offset) : "rax");
} else {
asm volatile ("mov %%gs:(%%rcx), %%rax"
: : "c" (offset) : "rax");
}
if (!want_segv)
return segv_addr + offset;
abort();
}
static void check_gs_value(unsigned long value)
{
unsigned long base;
unsigned short sel;
printf("[RUN]\tARCH_SET_GS to 0x%lx\n", value);
if (syscall(SYS_arch_prctl, ARCH_SET_GS, value) != 0)
err(1, "ARCH_SET_GS");
asm volatile ("mov %%gs, %0" : "=rm" (sel));
base = read_base(GS);
if (base == value) {
printf("[OK]\tGSBASE was set as expected (selector 0x%hx)\n",
sel);
} else {
nerrs++;
printf("[FAIL]\tGSBASE was not as expected: got 0x%lx (selector 0x%hx)\n",
base, sel);
}
if (syscall(SYS_arch_prctl, ARCH_GET_GS, &base) != 0)
err(1, "ARCH_GET_GS");
if (base == value) {
printf("[OK]\tARCH_GET_GS worked as expected (selector 0x%hx)\n",
sel);
} else {
nerrs++;
printf("[FAIL]\tARCH_GET_GS was not as expected: got 0x%lx (selector 0x%hx)\n",
base, sel);
}
}
static void mov_0_gs(unsigned long initial_base, bool schedule)
{
unsigned long base, arch_base;
printf("[RUN]\tARCH_SET_GS to 0x%lx then mov 0 to %%gs%s\n", initial_base, schedule ? " and schedule " : "");
if (syscall(SYS_arch_prctl, ARCH_SET_GS, initial_base) != 0)
err(1, "ARCH_SET_GS");
if (schedule)
usleep(10);
asm volatile ("mov %0, %%gs" : : "rm" (0));
base = read_base(GS);
if (syscall(SYS_arch_prctl, ARCH_GET_GS, &arch_base) != 0)
err(1, "ARCH_GET_GS");
if (base == arch_base) {
printf("[OK]\tGSBASE is 0x%lx\n", base);
} else {
nerrs++;
printf("[FAIL]\tGSBASE changed to 0x%lx but kernel reports 0x%lx\n", base, arch_base);
}
}
static volatile unsigned long remote_base;
static volatile bool remote_hard_zero;
static volatile unsigned int ftx;
/*
* ARCH_SET_FS/GS(0) may or may not program a selector of zero. HARD_ZERO
* means to force the selector to zero to improve test coverage.
*/
#define HARD_ZERO 0xa1fa5f343cb85fa4
static void do_remote_base()
{
unsigned long to_set = remote_base;
bool hard_zero = false;
if (to_set == HARD_ZERO) {
to_set = 0;
hard_zero = true;
}
if (syscall(SYS_arch_prctl, ARCH_SET_GS, to_set) != 0)
err(1, "ARCH_SET_GS");
if (hard_zero)
asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
unsigned short sel;
asm volatile ("mov %%gs, %0" : "=rm" (sel));
printf("\tother thread: ARCH_SET_GS(0x%lx)%s -- sel is 0x%hx\n",
to_set, hard_zero ? " and clear gs" : "", sel);
}
static __thread int set_thread_area_entry_number = -1;
static unsigned short load_gs(void)
{
/*
* Sets GS != 0 and GSBASE != 0 but arranges for the kernel to think
* that GSBASE == 0 (i.e. thread.gsbase == 0).
*/
/* Step 1: tell the kernel that we have GSBASE == 0. */
if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
err(1, "ARCH_SET_GS");
/* Step 2: change GSBASE without telling the kernel. */
struct user_desc desc = {
.entry_number = 0,
.base_addr = 0xBAADF00D,
.limit = 0xfffff,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 1,
.seg_not_present = 0,
.useable = 0
};
if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) {
printf("\tusing LDT slot 0\n");
asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0x7));
return 0x7;
} else {
/* No modify_ldt for us (configured out, perhaps) */
struct user_desc *low_desc = mmap(
NULL, sizeof(desc),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
memcpy(low_desc, &desc, sizeof(desc));
low_desc->entry_number = set_thread_area_entry_number;
/* 32-bit set_thread_area */
long ret;
asm volatile ("int $0x80"
: "=a" (ret), "+m" (*low_desc)
: "a" (243), "b" (low_desc)
: "r8", "r9", "r10", "r11");
memcpy(&desc, low_desc, sizeof(desc));
munmap(low_desc, sizeof(desc));
if (ret != 0) {
printf("[NOTE]\tcould not create a segment -- test won't do anything\n");
return 0;
}
printf("\tusing GDT slot %d\n", desc.entry_number);
set_thread_area_entry_number = desc.entry_number;
unsigned short gs = (unsigned short)((desc.entry_number << 3) | 0x3);
asm volatile ("mov %0, %%gs" : : "rm" (gs));
return gs;
}
}
void test_wrbase(unsigned short index, unsigned long base)
{
unsigned short newindex;
unsigned long newbase;
printf("[RUN]\tGS = 0x%hx, GSBASE = 0x%lx\n", index, base);
asm volatile ("mov %0, %%gs" : : "rm" (index));
wrgsbase(base);
remote_base = 0;
ftx = 1;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
while (ftx != 0)
syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
asm volatile ("mov %%gs, %0" : "=rm" (newindex));
newbase = rdgsbase();
if (newindex == index && newbase == base) {
printf("[OK]\tIndex and base were preserved\n");
} else {
printf("[FAIL]\tAfter switch, GS = 0x%hx and GSBASE = 0x%lx\n",
newindex, newbase);
nerrs++;
}
}
static void *threadproc(void *ctx)
{
while (1) {
while (ftx == 0)
syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0);
if (ftx == 3)
return NULL;
if (ftx == 1) {
do_remote_base();
} else if (ftx == 2) {
/*
* On AMD chips, this causes GSBASE != 0, GS == 0, and
* thread.gsbase == 0.
*/
load_gs();
asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
} else {
errx(1, "helper thread got bad command");
}
ftx = 0;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
}
}
static void set_gs_and_switch_to(unsigned long local,
unsigned short force_sel,
unsigned long remote)
{
unsigned long base;
unsigned short sel_pre_sched, sel_post_sched;
bool hard_zero = false;
if (local == HARD_ZERO) {
hard_zero = true;
local = 0;
}
printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
local, hard_zero ? " and clear gs" : "", remote);
if (force_sel)
printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
err(1, "ARCH_SET_GS");
if (hard_zero)
asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
if (read_base(GS) != local) {
nerrs++;
printf("[FAIL]\tGSBASE wasn't set as expected\n");
}
if (force_sel) {
asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
sel_pre_sched = force_sel;
local = read_base(GS);
/*
* Signal delivery is quite likely to change a selector
* of 1, 2, or 3 back to 0 due to IRET being defective.
*/
asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
} else {
asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
}
remote_base = remote;
ftx = 1;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
while (ftx != 0)
syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
base = read_base(GS);
if (base == local && sel_pre_sched == sel_post_sched) {
printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
sel_pre_sched, local);
} else if (base == local && sel_pre_sched >= 1 && sel_pre_sched <= 3 &&
sel_post_sched == 0) {
/*
* IRET is misdesigned and will squash selectors 1, 2, or 3
* to zero. Don't fail the test just because this happened.
*/
printf("[OK]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx because IRET is defective\n",
sel_pre_sched, local, sel_post_sched, base);
} else {
nerrs++;
printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
sel_pre_sched, local, sel_post_sched, base);
}
}
static void test_unexpected_base(void)
{
unsigned long base;
printf("[RUN]\tARCH_SET_GS(0), clear gs, then manipulate GSBASE in a different thread\n");
if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
err(1, "ARCH_SET_GS");
asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
ftx = 2;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
while (ftx != 0)
syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
base = read_base(GS);
if (base == 0) {
printf("[OK]\tGSBASE remained 0\n");
} else {
nerrs++;
printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
}
}
#define USER_REGS_OFFSET(r) offsetof(struct user_regs_struct, r)
static void test_ptrace_write_gs_read_base(void)
{
int status;
pid_t child = fork();
if (child < 0)
err(1, "fork");
if (child == 0) {
printf("[RUN]\tPTRACE_POKE GS, read GSBASE back\n");
printf("[RUN]\tARCH_SET_GS to 1\n");
if (syscall(SYS_arch_prctl, ARCH_SET_GS, 1) != 0)
err(1, "ARCH_SET_GS");
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0)
err(1, "PTRACE_TRACEME");
raise(SIGTRAP);
_exit(0);
}
wait(&status);
if (WSTOPSIG(status) == SIGTRAP) {
unsigned long base;
unsigned long gs_offset = USER_REGS_OFFSET(gs);
unsigned long base_offset = USER_REGS_OFFSET(gs_base);
/* Read the initial base. It should be 1. */
base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL);
if (base == 1) {
printf("[OK]\tGSBASE started at 1\n");
} else {
nerrs++;
printf("[FAIL]\tGSBASE started at 0x%lx\n", base);
}
printf("[RUN]\tSet GS = 0x7, read GSBASE\n");
/* Poke an LDT selector into GS. */
if (ptrace(PTRACE_POKEUSER, child, gs_offset, 0x7) != 0)
err(1, "PTRACE_POKEUSER");
/* And read the base. */
base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL);
if (base == 0 || base == 1) {
printf("[OK]\tGSBASE reads as 0x%lx with invalid GS\n", base);
} else {
nerrs++;
printf("[FAIL]\tGSBASE=0x%lx (should be 0 or 1)\n", base);
}
}
ptrace(PTRACE_CONT, child, NULL, NULL);
wait(&status);
if (!WIFEXITED(status))
printf("[WARN]\tChild didn't exit cleanly.\n");
}
static void test_ptrace_write_gsbase(void)
{
int status;
pid_t child = fork();
if (child < 0)
err(1, "fork");
if (child == 0) {
printf("[RUN]\tPTRACE_POKE(), write GSBASE from ptracer\n");
*shared_scratch = load_gs();
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0)
err(1, "PTRACE_TRACEME");
raise(SIGTRAP);
_exit(0);
}
wait(&status);
if (WSTOPSIG(status) == SIGTRAP) {
unsigned long gs, base;
unsigned long gs_offset = USER_REGS_OFFSET(gs);
unsigned long base_offset = USER_REGS_OFFSET(gs_base);
gs = ptrace(PTRACE_PEEKUSER, child, gs_offset, NULL);
if (gs != *shared_scratch) {
nerrs++;
printf("[FAIL]\tGS is not prepared with nonzero\n");
goto END;
}
if (ptrace(PTRACE_POKEUSER, child, base_offset, 0xFF) != 0)
err(1, "PTRACE_POKEUSER");
gs = ptrace(PTRACE_PEEKUSER, child, gs_offset, NULL);
base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL);
/*
* In a non-FSGSBASE system, the nonzero selector will load
* GSBASE (again). But what is tested here is whether the
* selector value is changed or not by the GSBASE write in
* a ptracer.
*/
if (gs != *shared_scratch) {
nerrs++;
printf("[FAIL]\tGS changed to %lx\n", gs);
/*
* On older kernels, poking a nonzero value into the
* base would zero the selector. On newer kernels,
* this behavior has changed -- poking the base
* changes only the base and, if FSGSBASE is not
* available, this may have no effect once the tracee
* is resumed.
*/
if (gs == 0)
printf("\tNote: this is expected behavior on older kernels.\n");
} else if (have_fsgsbase && (base != 0xFF)) {
nerrs++;
printf("[FAIL]\tGSBASE changed to %lx\n", base);
} else {
printf("[OK]\tGS remained 0x%hx", *shared_scratch);
if (have_fsgsbase)
printf(" and GSBASE changed to 0xFF");
printf("\n");
}
}
END:
ptrace(PTRACE_CONT, child, NULL, NULL);
wait(&status);
if (!WIFEXITED(status))
printf("[WARN]\tChild didn't exit cleanly.\n");
}
int main()
{
pthread_t thread;
shared_scratch = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_SHARED, -1, 0);
/* Do these tests before we have an LDT. */
test_ptrace_write_gs_read_base();
/* Probe FSGSBASE */
sethandler(SIGILL, sigill, 0);
if (sigsetjmp(jmpbuf, 1) == 0) {
rdfsbase();
have_fsgsbase = true;
printf("\tFSGSBASE instructions are enabled\n");
} else {
printf("\tFSGSBASE instructions are disabled\n");
}
clearhandler(SIGILL);
sethandler(SIGSEGV, sigsegv, 0);
check_gs_value(0);
check_gs_value(1);
check_gs_value(0x200000000);
check_gs_value(0);
check_gs_value(0x200000000);
check_gs_value(1);
for (int sched = 0; sched < 2; sched++) {
mov_0_gs(0, !!sched);
mov_0_gs(1, !!sched);
mov_0_gs(0x200000000, !!sched);
}
/* Set up for multithreading. */
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
err(1, "sched_setaffinity to CPU 0"); /* should never fail */
if (pthread_create(&thread, 0, threadproc, 0) != 0)
err(1, "pthread_create");
static unsigned long bases_with_hard_zero[] = {
0, HARD_ZERO, 1, 0x200000000,
};
for (int local = 0; local < 4; local++) {
for (int remote = 0; remote < 4; remote++) {
for (unsigned short s = 0; s < 5; s++) {
unsigned short sel = s;
if (s == 4)
asm ("mov %%ss, %0" : "=rm" (sel));
set_gs_and_switch_to(
bases_with_hard_zero[local],
sel,
bases_with_hard_zero[remote]);
}
}
}
test_unexpected_base();
if (have_fsgsbase) {
unsigned short ss;
asm volatile ("mov %%ss, %0" : "=rm" (ss));
test_wrbase(0, 0);
test_wrbase(0, 1);
test_wrbase(0, 0x200000000);
test_wrbase(0, 0xffffffffffffffff);
test_wrbase(ss, 0);
test_wrbase(ss, 1);
test_wrbase(ss, 0x200000000);
test_wrbase(ss, 0xffffffffffffffff);
}
ftx = 3; /* Kill the thread. */
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
if (pthread_join(thread, NULL) != 0)
err(1, "pthread_join");
test_ptrace_write_gsbase();
return nerrs == 0 ? 0 : 1;
}
| linux-master | tools/testing/selftests/x86/fsgsbase.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <err.h>
#include <errno.h>
#include <pthread.h>
#include <setjmp.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <unistd.h>
#include <x86intrin.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/shm.h>
#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include "../kselftest.h" /* For __cpuid_count() */
#ifndef __x86_64__
# error This test is 64-bit only
#endif
#define XSAVE_HDR_OFFSET 512
#define XSAVE_HDR_SIZE 64
struct xsave_buffer {
union {
struct {
char legacy[XSAVE_HDR_OFFSET];
char header[XSAVE_HDR_SIZE];
char extended[0];
};
char bytes[0];
};
};
static inline uint64_t xgetbv(uint32_t index)
{
uint32_t eax, edx;
asm volatile("xgetbv;"
: "=a" (eax), "=d" (edx)
: "c" (index));
return eax + ((uint64_t)edx << 32);
}
static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
uint32_t rfbm_hi = rfbm >> 32;
asm volatile("xsave (%%rdi)"
: : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)
: "memory");
}
static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
uint32_t rfbm_hi = rfbm >> 32;
asm volatile("xrstor (%%rdi)"
: : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi));
}
/* err() exits and will not return */
#define fatal_error(msg, ...) err(1, "[FAIL]\t" msg, ##__VA_ARGS__)
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
fatal_error("sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
fatal_error("sigaction");
}
#define XFEATURE_XTILECFG 17
#define XFEATURE_XTILEDATA 18
#define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG)
#define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA)
#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
#define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26)
#define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27)
static inline void check_cpuid_xsave(void)
{
uint32_t eax, ebx, ecx, edx;
/*
* CPUID.1:ECX.XSAVE[bit 26] enumerates general
* support for the XSAVE feature set, including
* XGETBV.
*/
__cpuid_count(1, 0, eax, ebx, ecx, edx);
if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK))
fatal_error("cpuid: no CPU xsave support");
if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK))
fatal_error("cpuid: no OS xsave support");
}
static uint32_t xbuf_size;
static struct {
uint32_t xbuf_offset;
uint32_t size;
} xtiledata;
#define CPUID_LEAF_XSTATE 0xd
#define CPUID_SUBLEAF_XSTATE_USER 0x0
#define TILE_CPUID 0x1d
#define TILE_PALETTE_ID 0x1
static void check_cpuid_xtiledata(void)
{
uint32_t eax, ebx, ecx, edx;
__cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER,
eax, ebx, ecx, edx);
/*
* EBX enumerates the size (in bytes) required by the XSAVE
* instruction for an XSAVE area containing all the user state
* components corresponding to bits currently set in XCR0.
*
* Stash that off so it can be used to allocate buffers later.
*/
xbuf_size = ebx;
__cpuid_count(CPUID_LEAF_XSTATE, XFEATURE_XTILEDATA,
eax, ebx, ecx, edx);
/*
* eax: XTILEDATA state component size
* ebx: XTILEDATA state component offset in user buffer
*/
if (!eax || !ebx)
fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d",
eax, ebx);
xtiledata.size = eax;
xtiledata.xbuf_offset = ebx;
}
/* The helpers for managing XSAVE buffer and tile states: */
struct xsave_buffer *alloc_xbuf(void)
{
struct xsave_buffer *xbuf;
/* XSAVE buffer should be 64B-aligned. */
xbuf = aligned_alloc(64, xbuf_size);
if (!xbuf)
fatal_error("aligned_alloc()");
return xbuf;
}
static inline void clear_xstate_header(struct xsave_buffer *buffer)
{
memset(&buffer->header, 0, sizeof(buffer->header));
}
static inline uint64_t get_xstatebv(struct xsave_buffer *buffer)
{
/* XSTATE_BV is at the beginning of the header: */
return *(uint64_t *)&buffer->header;
}
static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
{
/* XSTATE_BV is at the beginning of the header: */
*(uint64_t *)(&buffer->header) = bv;
}
static void set_rand_tiledata(struct xsave_buffer *xbuf)
{
int *ptr = (int *)&xbuf->bytes[xtiledata.xbuf_offset];
int data;
int i;
/*
* Ensure that 'data' is never 0. This ensures that
* the registers are never in their initial configuration
* and thus never tracked as being in the init state.
*/
data = rand() | 1;
for (i = 0; i < xtiledata.size / sizeof(int); i++, ptr++)
*ptr = data;
}
struct xsave_buffer *stashed_xsave;
static void init_stashed_xsave(void)
{
stashed_xsave = alloc_xbuf();
if (!stashed_xsave)
fatal_error("failed to allocate stashed_xsave\n");
clear_xstate_header(stashed_xsave);
}
static void free_stashed_xsave(void)
{
free(stashed_xsave);
}
/* See 'struct _fpx_sw_bytes' at sigcontext.h */
#define SW_BYTES_OFFSET 464
/* N.B. The struct's field name varies so read from the offset. */
#define SW_BYTES_BV_OFFSET (SW_BYTES_OFFSET + 8)
static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *buffer)
{
return (struct _fpx_sw_bytes *)(buffer + SW_BYTES_OFFSET);
}
static inline uint64_t get_fpx_sw_bytes_features(void *buffer)
{
return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET);
}
/* Work around printf() being unsafe in signals: */
#define SIGNAL_BUF_LEN 1000
char signal_message_buffer[SIGNAL_BUF_LEN];
void sig_print(char *msg)
{
int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1;
strncat(signal_message_buffer, msg, left);
}
static volatile bool noperm_signaled;
static int noperm_errs;
/*
* Signal handler for when AMX is used but
* permission has not been obtained.
*/
static void handle_noperm(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
void *xbuf = ctx->uc_mcontext.fpregs;
struct _fpx_sw_bytes *sw_bytes;
uint64_t features;
/* Reset the signal message buffer: */
signal_message_buffer[0] = '\0';
sig_print("\tAt SIGILL handler,\n");
if (si->si_code != ILL_ILLOPC) {
noperm_errs++;
sig_print("[FAIL]\tInvalid signal code.\n");
} else {
sig_print("[OK]\tValid signal code (ILL_ILLOPC).\n");
}
sw_bytes = get_fpx_sw_bytes(xbuf);
/*
* Without permission, the signal XSAVE buffer should not
* have room for AMX register state (aka. xtiledata).
* Check that the size does not overlap with where xtiledata
* will reside.
*
* This also implies that no state components *PAST*
* XTILEDATA (features >=19) can be present in the buffer.
*/
if (sw_bytes->xstate_size <= xtiledata.xbuf_offset) {
sig_print("[OK]\tValid xstate size\n");
} else {
noperm_errs++;
sig_print("[FAIL]\tInvalid xstate size\n");
}
features = get_fpx_sw_bytes_features(xbuf);
/*
* Without permission, the XTILEDATA feature
* bit should not be set.
*/
if ((features & XFEATURE_MASK_XTILEDATA) == 0) {
sig_print("[OK]\tValid xstate mask\n");
} else {
noperm_errs++;
sig_print("[FAIL]\tInvalid xstate mask\n");
}
noperm_signaled = true;
ctx->uc_mcontext.gregs[REG_RIP] += 3; /* Skip the faulting XRSTOR */
}
/* Return true if XRSTOR is successful; otherwise, false. */
static inline bool xrstor_safe(struct xsave_buffer *xbuf, uint64_t mask)
{
noperm_signaled = false;
xrstor(xbuf, mask);
/* Print any messages produced by the signal code: */
printf("%s", signal_message_buffer);
/*
* Reset the buffer to make sure any future printing
* only outputs new messages:
*/
signal_message_buffer[0] = '\0';
if (noperm_errs)
fatal_error("saw %d errors in noperm signal handler\n", noperm_errs);
return !noperm_signaled;
}
/*
* Use XRSTOR to populate the XTILEDATA registers with
* random data.
*
* Return true if successful; otherwise, false.
*/
static inline bool load_rand_tiledata(struct xsave_buffer *xbuf)
{
clear_xstate_header(xbuf);
set_xstatebv(xbuf, XFEATURE_MASK_XTILEDATA);
set_rand_tiledata(xbuf);
return xrstor_safe(xbuf, XFEATURE_MASK_XTILEDATA);
}
/* Return XTILEDATA to its initial configuration. */
static inline void init_xtiledata(void)
{
clear_xstate_header(stashed_xsave);
xrstor_safe(stashed_xsave, XFEATURE_MASK_XTILEDATA);
}
enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED };
/* arch_prctl() and sigaltstack() test */
#define ARCH_GET_XCOMP_PERM 0x1022
#define ARCH_REQ_XCOMP_PERM 0x1023
static void req_xtiledata_perm(void)
{
syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
}
static void validate_req_xcomp_perm(enum expected_result exp)
{
unsigned long bitmask, expected_bitmask;
long rc;
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
if (rc) {
fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
} else if (!(bitmask & XFEATURE_MASK_XTILECFG)) {
fatal_error("ARCH_GET_XCOMP_PERM returns XFEATURE_XTILECFG off.");
}
rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
if (exp == FAIL_EXPECTED) {
if (rc) {
printf("[OK]\tARCH_REQ_XCOMP_PERM saw expected failure..\n");
return;
}
fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected success.\n");
} else if (rc) {
fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n");
}
expected_bitmask = bitmask | XFEATURE_MASK_XTILEDATA;
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
if (rc) {
fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
} else if (bitmask != expected_bitmask) {
fatal_error("ARCH_REQ_XCOMP_PERM set a wrong bitmask: %lx, expected: %lx.\n",
bitmask, expected_bitmask);
} else {
printf("\tARCH_REQ_XCOMP_PERM is successful.\n");
}
}
static void validate_xcomp_perm(enum expected_result exp)
{
bool load_success = load_rand_tiledata(stashed_xsave);
if (exp == FAIL_EXPECTED) {
if (load_success) {
noperm_errs++;
printf("[FAIL]\tLoad tiledata succeeded.\n");
} else {
printf("[OK]\tLoad tiledata failed.\n");
}
} else if (exp == SUCCESS_EXPECTED) {
if (load_success) {
printf("[OK]\tLoad tiledata succeeded.\n");
} else {
noperm_errs++;
printf("[FAIL]\tLoad tiledata failed.\n");
}
}
}
#ifndef AT_MINSIGSTKSZ
# define AT_MINSIGSTKSZ 51
#endif
static void *alloc_altstack(unsigned int size)
{
void *altstack;
altstack = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (altstack == MAP_FAILED)
fatal_error("mmap() for altstack");
return altstack;
}
static void setup_altstack(void *addr, unsigned long size, enum expected_result exp)
{
stack_t ss;
int rc;
memset(&ss, 0, sizeof(ss));
ss.ss_size = size;
ss.ss_sp = addr;
rc = sigaltstack(&ss, NULL);
if (exp == FAIL_EXPECTED) {
if (rc) {
printf("[OK]\tsigaltstack() failed.\n");
} else {
fatal_error("sigaltstack() succeeded unexpectedly.\n");
}
} else if (rc) {
fatal_error("sigaltstack()");
}
}
static void test_dynamic_sigaltstack(void)
{
unsigned int small_size, enough_size;
unsigned long minsigstksz;
void *altstack;
minsigstksz = getauxval(AT_MINSIGSTKSZ);
printf("\tAT_MINSIGSTKSZ = %lu\n", minsigstksz);
/*
* getauxval() itself can return 0 for failure or
* success. But, in this case, AT_MINSIGSTKSZ
* will always return a >=0 value if implemented.
* Just check for 0.
*/
if (minsigstksz == 0) {
printf("no support for AT_MINSIGSTKSZ, skipping sigaltstack tests\n");
return;
}
enough_size = minsigstksz * 2;
altstack = alloc_altstack(enough_size);
printf("\tAllocate memory for altstack (%u bytes).\n", enough_size);
/*
* Try setup_altstack() with a size which can not fit
* XTILEDATA. ARCH_REQ_XCOMP_PERM should fail.
*/
small_size = minsigstksz - xtiledata.size;
printf("\tAfter sigaltstack() with small size (%u bytes).\n", small_size);
setup_altstack(altstack, small_size, SUCCESS_EXPECTED);
validate_req_xcomp_perm(FAIL_EXPECTED);
/*
* Try setup_altstack() with a size derived from
* AT_MINSIGSTKSZ. It should be more than large enough
* and thus ARCH_REQ_XCOMP_PERM should succeed.
*/
printf("\tAfter sigaltstack() with enough size (%u bytes).\n", enough_size);
setup_altstack(altstack, enough_size, SUCCESS_EXPECTED);
validate_req_xcomp_perm(SUCCESS_EXPECTED);
/*
* Try to coerce setup_altstack() to again accept a
* too-small altstack. This ensures that big-enough
* sigaltstacks can not shrink to a too-small value
* once XTILEDATA permission is established.
*/
printf("\tThen, sigaltstack() with small size (%u bytes).\n", small_size);
setup_altstack(altstack, small_size, FAIL_EXPECTED);
}
static void test_dynamic_state(void)
{
pid_t parent, child, grandchild;
parent = fork();
if (parent < 0) {
/* fork() failed */
fatal_error("fork");
} else if (parent > 0) {
int status;
/* fork() succeeded. Now in the parent. */
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
fatal_error("arch_prctl test parent exit");
return;
}
/* fork() succeeded. Now in the child . */
printf("[RUN]\tCheck ARCH_REQ_XCOMP_PERM around process fork() and sigaltack() test.\n");
printf("\tFork a child.\n");
child = fork();
if (child < 0) {
fatal_error("fork");
} else if (child > 0) {
int status;
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
fatal_error("arch_prctl test child exit");
_exit(0);
}
/*
* The permission request should fail without an
* XTILEDATA-compatible signal stack
*/
printf("\tTest XCOMP_PERM at child.\n");
validate_xcomp_perm(FAIL_EXPECTED);
/*
* Set up an XTILEDATA-compatible signal stack and
* also obtain permission to populate XTILEDATA.
*/
printf("\tTest dynamic sigaltstack at child:\n");
test_dynamic_sigaltstack();
/* Ensure that XTILEDATA can be populated. */
printf("\tTest XCOMP_PERM again at child.\n");
validate_xcomp_perm(SUCCESS_EXPECTED);
printf("\tFork a grandchild.\n");
grandchild = fork();
if (grandchild < 0) {
/* fork() failed */
fatal_error("fork");
} else if (!grandchild) {
/* fork() succeeded. Now in the (grand)child. */
printf("\tTest XCOMP_PERM at grandchild.\n");
/*
* Ensure that the grandchild inherited
* permission and a compatible sigaltstack:
*/
validate_xcomp_perm(SUCCESS_EXPECTED);
} else {
int status;
/* fork() succeeded. Now in the parent. */
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
fatal_error("fork test grandchild");
}
_exit(0);
}
static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
{
return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
&xbuf2->bytes[xtiledata.xbuf_offset],
xtiledata.size);
}
/*
* Save current register state and compare it to @xbuf1.'
*
* Returns false if @xbuf1 matches the registers.
* Returns true if @xbuf1 differs from the registers.
*/
static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
{
struct xsave_buffer *xbuf2;
int ret;
xbuf2 = alloc_xbuf();
if (!xbuf2)
fatal_error("failed to allocate XSAVE buffer\n");
xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
ret = __compare_tiledata_state(xbuf1, xbuf2);
free(xbuf2);
if (ret == 0)
return false;
return true;
}
static inline void validate_tiledata_regs_same(struct xsave_buffer *xbuf)
{
int ret = __validate_tiledata_regs(xbuf);
if (ret != 0)
fatal_error("TILEDATA registers changed");
}
static inline void validate_tiledata_regs_changed(struct xsave_buffer *xbuf)
{
int ret = __validate_tiledata_regs(xbuf);
if (ret == 0)
fatal_error("TILEDATA registers did not change");
}
/* tiledata inheritance test */
static void test_fork(void)
{
pid_t child, grandchild;
child = fork();
if (child < 0) {
/* fork() failed */
fatal_error("fork");
} else if (child > 0) {
/* fork() succeeded. Now in the parent. */
int status;
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
fatal_error("fork test child");
return;
}
/* fork() succeeded. Now in the child. */
printf("[RUN]\tCheck tile data inheritance.\n\tBefore fork(), load tiledata\n");
load_rand_tiledata(stashed_xsave);
grandchild = fork();
if (grandchild < 0) {
/* fork() failed */
fatal_error("fork");
} else if (grandchild > 0) {
/* fork() succeeded. Still in the first child. */
int status;
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
fatal_error("fork test grand child");
_exit(0);
}
/* fork() succeeded. Now in the (grand)child. */
/*
* TILEDATA registers are not preserved across fork().
* Ensure that their value has changed:
*/
validate_tiledata_regs_changed(stashed_xsave);
_exit(0);
}
/* Context switching test */
static struct _ctxtswtest_cfg {
unsigned int iterations;
unsigned int num_threads;
} ctxtswtest_config;
struct futex_info {
pthread_t thread;
int nr;
pthread_mutex_t mutex;
struct futex_info *next;
};
static void *check_tiledata(void *info)
{
struct futex_info *finfo = (struct futex_info *)info;
struct xsave_buffer *xbuf;
int i;
xbuf = alloc_xbuf();
if (!xbuf)
fatal_error("unable to allocate XSAVE buffer");
/*
* Load random data into 'xbuf' and then restore
* it to the tile registers themselves.
*/
load_rand_tiledata(xbuf);
for (i = 0; i < ctxtswtest_config.iterations; i++) {
pthread_mutex_lock(&finfo->mutex);
/*
* Ensure the register values have not
* diverged from those recorded in 'xbuf'.
*/
validate_tiledata_regs_same(xbuf);
/* Load new, random values into xbuf and registers */
load_rand_tiledata(xbuf);
/*
* The last thread's last unlock will be for
* thread 0's mutex. However, thread 0 will
* have already exited the loop and the mutex
* will already be unlocked.
*
* Because this is not an ERRORCHECK mutex,
* that inconsistency will be silently ignored.
*/
pthread_mutex_unlock(&finfo->next->mutex);
}
free(xbuf);
/*
* Return this thread's finfo, which is
* a unique value for this thread.
*/
return finfo;
}
static int create_threads(int num, struct futex_info *finfo)
{
int i;
for (i = 0; i < num; i++) {
int next_nr;
finfo[i].nr = i;
/*
* Thread 'i' will wait on this mutex to
* be unlocked. Lock it immediately after
* initialization:
*/
pthread_mutex_init(&finfo[i].mutex, NULL);
pthread_mutex_lock(&finfo[i].mutex);
next_nr = (i + 1) % num;
finfo[i].next = &finfo[next_nr];
if (pthread_create(&finfo[i].thread, NULL, check_tiledata, &finfo[i]))
fatal_error("pthread_create()");
}
return 0;
}
static void affinitize_cpu0(void)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
fatal_error("sched_setaffinity to CPU 0");
}
static void test_context_switch(void)
{
struct futex_info *finfo;
int i;
/* Affinitize to one CPU to force context switches */
affinitize_cpu0();
req_xtiledata_perm();
printf("[RUN]\tCheck tiledata context switches, %d iterations, %d threads.\n",
ctxtswtest_config.iterations,
ctxtswtest_config.num_threads);
finfo = malloc(sizeof(*finfo) * ctxtswtest_config.num_threads);
if (!finfo)
fatal_error("malloc()");
create_threads(ctxtswtest_config.num_threads, finfo);
/*
* This thread wakes up thread 0
* Thread 0 will wake up 1
* Thread 1 will wake up 2
* ...
* the last thread will wake up 0
*
* ... this will repeat for the configured
* number of iterations.
*/
pthread_mutex_unlock(&finfo[0].mutex);
/* Wait for all the threads to finish: */
for (i = 0; i < ctxtswtest_config.num_threads; i++) {
void *thread_retval;
int rc;
rc = pthread_join(finfo[i].thread, &thread_retval);
if (rc)
fatal_error("pthread_join() failed for thread %d err: %d\n",
i, rc);
if (thread_retval != &finfo[i])
fatal_error("unexpected thread retval for thread %d: %p\n",
i, thread_retval);
}
printf("[OK]\tNo incorrect case was found.\n");
free(finfo);
}
/* Ptrace test */
/*
* Make sure the ptracee has the expanded kernel buffer on the first
* use. Then, initialize the state before performing the state
* injection from the ptracer.
*/
static inline void ptracee_firstuse_tiledata(void)
{
load_rand_tiledata(stashed_xsave);
init_xtiledata();
}
/*
* Ptracer injects the randomized tile data state. It also reads
* before and after that, which will execute the kernel's state copy
* functions. So, the tester is advised to double-check any emitted
* kernel messages.
*/
static void ptracer_inject_tiledata(pid_t target)
{
struct xsave_buffer *xbuf;
struct iovec iov;
xbuf = alloc_xbuf();
if (!xbuf)
fatal_error("unable to allocate XSAVE buffer");
printf("\tRead the init'ed tiledata via ptrace().\n");
iov.iov_base = xbuf;
iov.iov_len = xbuf_size;
memset(stashed_xsave, 0, xbuf_size);
if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
fatal_error("PTRACE_GETREGSET");
if (!__compare_tiledata_state(stashed_xsave, xbuf))
printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
else
printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
printf("\tInject tiledata via ptrace().\n");
load_rand_tiledata(xbuf);
memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
&xbuf->bytes[xtiledata.xbuf_offset],
xtiledata.size);
if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
fatal_error("PTRACE_SETREGSET");
if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
fatal_error("PTRACE_GETREGSET");
if (!__compare_tiledata_state(stashed_xsave, xbuf))
printf("[OK]\tTiledata was correctly written to ptracee.\n");
else
printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
}
static void test_ptrace(void)
{
pid_t child;
int status;
child = fork();
if (child < 0) {
err(1, "fork");
} else if (!child) {
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
err(1, "PTRACE_TRACEME");
ptracee_firstuse_tiledata();
raise(SIGTRAP);
_exit(0);
}
do {
wait(&status);
} while (WSTOPSIG(status) != SIGTRAP);
ptracer_inject_tiledata(child);
ptrace(PTRACE_DETACH, child, NULL, NULL);
wait(&status);
if (!WIFEXITED(status) || WEXITSTATUS(status))
err(1, "ptrace test");
}
int main(void)
{
/* Check hardware availability at first */
check_cpuid_xsave();
check_cpuid_xtiledata();
init_stashed_xsave();
sethandler(SIGILL, handle_noperm, 0);
test_dynamic_state();
/* Request permission for the following tests */
req_xtiledata_perm();
test_fork();
ctxtswtest_config.iterations = 10;
ctxtswtest_config.num_threads = 5;
test_context_switch();
test_ptrace();
clearhandler(SIGILL);
free_stashed_xsave();
return 0;
}
| linux-master | tools/testing/selftests/x86/amx.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/syscall.h>
#include <time.h>
#include <signal.h>
#include <setjmp.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <inttypes.h>
#include <sched.h>
#include <sys/uio.h>
#include <linux/io_uring.h>
#include "../kselftest.h"
#ifndef __x86_64__
# error This test is 64-bit only
#endif
/* LAM modes, these definitions were copied from kernel code */
#define LAM_NONE 0
#define LAM_U57_BITS 6
#define LAM_U57_MASK (0x3fULL << 57)
/* arch prctl for LAM */
#define ARCH_GET_UNTAG_MASK 0x4001
#define ARCH_ENABLE_TAGGED_ADDR 0x4002
#define ARCH_GET_MAX_TAG_BITS 0x4003
#define ARCH_FORCE_TAGGED_SVA 0x4004
/* Specified test function bits */
#define FUNC_MALLOC 0x1
#define FUNC_BITS 0x2
#define FUNC_MMAP 0x4
#define FUNC_SYSCALL 0x8
#define FUNC_URING 0x10
#define FUNC_INHERITE 0x20
#define FUNC_PASID 0x40
#define TEST_MASK 0x7f
#define LOW_ADDR (0x1UL << 30)
#define HIGH_ADDR (0x3UL << 48)
#define MALLOC_LEN 32
#define PAGE_SIZE (4 << 10)
#define STACK_SIZE 65536
#define barrier() ({ \
__asm__ __volatile__("" : : : "memory"); \
})
#define URING_QUEUE_SZ 1
#define URING_BLOCK_SZ 2048
/* Pasid test define */
#define LAM_CMD_BIT 0x1
#define PAS_CMD_BIT 0x2
#define SVA_CMD_BIT 0x4
#define PAS_CMD(cmd1, cmd2, cmd3) (((cmd3) << 8) | ((cmd2) << 4) | ((cmd1) << 0))
struct testcases {
unsigned int later;
int expected; /* 2: SIGSEGV Error; 1: other errors */
unsigned long lam;
uint64_t addr;
uint64_t cmd;
int (*test_func)(struct testcases *test);
const char *msg;
};
/* Used by CQ of uring, source file handler and file's size */
struct file_io {
int file_fd;
off_t file_sz;
struct iovec iovecs[];
};
struct io_uring_queue {
unsigned int *head;
unsigned int *tail;
unsigned int *ring_mask;
unsigned int *ring_entries;
unsigned int *flags;
unsigned int *array;
union {
struct io_uring_cqe *cqes;
struct io_uring_sqe *sqes;
} queue;
size_t ring_sz;
};
struct io_ring {
int ring_fd;
struct io_uring_queue sq_ring;
struct io_uring_queue cq_ring;
};
int tests_cnt;
jmp_buf segv_env;
static void segv_handler(int sig)
{
ksft_print_msg("Get segmentation fault(%d).", sig);
siglongjmp(segv_env, 1);
}
static inline int cpu_has_lam(void)
{
unsigned int cpuinfo[4];
__cpuid_count(0x7, 1, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
return (cpuinfo[0] & (1 << 26));
}
/* Check 5-level page table feature in CPUID.(EAX=07H, ECX=00H):ECX.[bit 16] */
static inline int cpu_has_la57(void)
{
unsigned int cpuinfo[4];
__cpuid_count(0x7, 0, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
return (cpuinfo[2] & (1 << 16));
}
/*
* Set tagged address and read back untag mask.
* check if the untagged mask is expected.
*
* @return:
* 0: Set LAM mode successfully
* others: failed to set LAM
*/
static int set_lam(unsigned long lam)
{
int ret = 0;
uint64_t ptr = 0;
if (lam != LAM_U57_BITS && lam != LAM_NONE)
return -1;
/* Skip check return */
syscall(SYS_arch_prctl, ARCH_ENABLE_TAGGED_ADDR, lam);
/* Get untagged mask */
syscall(SYS_arch_prctl, ARCH_GET_UNTAG_MASK, &ptr);
/* Check mask returned is expected */
if (lam == LAM_U57_BITS)
ret = (ptr != ~(LAM_U57_MASK));
else if (lam == LAM_NONE)
ret = (ptr != -1ULL);
return ret;
}
static unsigned long get_default_tag_bits(void)
{
pid_t pid;
int lam = LAM_NONE;
int ret = 0;
pid = fork();
if (pid < 0) {
perror("Fork failed.");
} else if (pid == 0) {
/* Set LAM mode in child process */
if (set_lam(LAM_U57_BITS) == 0)
lam = LAM_U57_BITS;
else
lam = LAM_NONE;
exit(lam);
} else {
wait(&ret);
lam = WEXITSTATUS(ret);
}
return lam;
}
/*
* Set tagged address and read back untag mask.
* check if the untag mask is expected.
*/
static int get_lam(void)
{
uint64_t ptr = 0;
int ret = -1;
/* Get untagged mask */
if (syscall(SYS_arch_prctl, ARCH_GET_UNTAG_MASK, &ptr) == -1)
return -1;
/* Check mask returned is expected */
if (ptr == ~(LAM_U57_MASK))
ret = LAM_U57_BITS;
else if (ptr == -1ULL)
ret = LAM_NONE;
return ret;
}
/* According to LAM mode, set metadata in high bits */
static uint64_t set_metadata(uint64_t src, unsigned long lam)
{
uint64_t metadata;
srand(time(NULL));
switch (lam) {
case LAM_U57_BITS: /* Set metadata in bits 62:57 */
/* Get a random non-zero value as metadata */
metadata = (rand() % ((1UL << LAM_U57_BITS) - 1) + 1) << 57;
metadata |= (src & ~(LAM_U57_MASK));
break;
default:
metadata = src;
break;
}
return metadata;
}
/*
* Set metadata in user pointer, compare new pointer with original pointer.
* both pointers should point to the same address.
*
* @return:
* 0: value on the pointer with metadate and value on original are same
* 1: not same.
*/
static int handle_lam_test(void *src, unsigned int lam)
{
char *ptr;
strcpy((char *)src, "USER POINTER");
ptr = (char *)set_metadata((uint64_t)src, lam);
if (src == ptr)
return 0;
/* Copy a string into the pointer with metadata */
strcpy((char *)ptr, "METADATA POINTER");
return (!!strcmp((char *)src, (char *)ptr));
}
int handle_max_bits(struct testcases *test)
{
unsigned long exp_bits = get_default_tag_bits();
unsigned long bits = 0;
if (exp_bits != LAM_NONE)
exp_bits = LAM_U57_BITS;
/* Get LAM max tag bits */
if (syscall(SYS_arch_prctl, ARCH_GET_MAX_TAG_BITS, &bits) == -1)
return 1;
return (exp_bits != bits);
}
/*
* Test lam feature through dereference pointer get from malloc.
* @return 0: Pass test. 1: Get failure during test 2: Get SIGSEGV
*/
static int handle_malloc(struct testcases *test)
{
char *ptr = NULL;
int ret = 0;
if (test->later == 0 && test->lam != 0)
if (set_lam(test->lam) == -1)
return 1;
ptr = (char *)malloc(MALLOC_LEN);
if (ptr == NULL) {
perror("malloc() failure\n");
return 1;
}
/* Set signal handler */
if (sigsetjmp(segv_env, 1) == 0) {
signal(SIGSEGV, segv_handler);
ret = handle_lam_test(ptr, test->lam);
} else {
ret = 2;
}
if (test->later != 0 && test->lam != 0)
if (set_lam(test->lam) == -1 && ret == 0)
ret = 1;
free(ptr);
return ret;
}
static int handle_mmap(struct testcases *test)
{
void *ptr;
unsigned int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
int ret = 0;
if (test->later == 0 && test->lam != 0)
if (set_lam(test->lam) != 0)
return 1;
ptr = mmap((void *)test->addr, PAGE_SIZE, PROT_READ | PROT_WRITE,
flags, -1, 0);
if (ptr == MAP_FAILED) {
if (test->addr == HIGH_ADDR)
if (!cpu_has_la57())
return 3; /* unsupport LA57 */
return 1;
}
if (test->later != 0 && test->lam != 0)
if (set_lam(test->lam) != 0)
ret = 1;
if (ret == 0) {
if (sigsetjmp(segv_env, 1) == 0) {
signal(SIGSEGV, segv_handler);
ret = handle_lam_test(ptr, test->lam);
} else {
ret = 2;
}
}
munmap(ptr, PAGE_SIZE);
return ret;
}
static int handle_syscall(struct testcases *test)
{
struct utsname unme, *pu;
int ret = 0;
if (test->later == 0 && test->lam != 0)
if (set_lam(test->lam) != 0)
return 1;
if (sigsetjmp(segv_env, 1) == 0) {
signal(SIGSEGV, segv_handler);
pu = (struct utsname *)set_metadata((uint64_t)&unme, test->lam);
ret = uname(pu);
if (ret < 0)
ret = 1;
} else {
ret = 2;
}
if (test->later != 0 && test->lam != 0)
if (set_lam(test->lam) != -1 && ret == 0)
ret = 1;
return ret;
}
int sys_uring_setup(unsigned int entries, struct io_uring_params *p)
{
return (int)syscall(__NR_io_uring_setup, entries, p);
}
int sys_uring_enter(int fd, unsigned int to, unsigned int min, unsigned int flags)
{
return (int)syscall(__NR_io_uring_enter, fd, to, min, flags, NULL, 0);
}
/* Init submission queue and completion queue */
int mmap_io_uring(struct io_uring_params p, struct io_ring *s)
{
struct io_uring_queue *sring = &s->sq_ring;
struct io_uring_queue *cring = &s->cq_ring;
sring->ring_sz = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
cring->ring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
if (p.features & IORING_FEAT_SINGLE_MMAP) {
if (cring->ring_sz > sring->ring_sz)
sring->ring_sz = cring->ring_sz;
cring->ring_sz = sring->ring_sz;
}
void *sq_ptr = mmap(0, sring->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, s->ring_fd,
IORING_OFF_SQ_RING);
if (sq_ptr == MAP_FAILED) {
perror("sub-queue!");
return 1;
}
void *cq_ptr = sq_ptr;
if (!(p.features & IORING_FEAT_SINGLE_MMAP)) {
cq_ptr = mmap(0, cring->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, s->ring_fd,
IORING_OFF_CQ_RING);
if (cq_ptr == MAP_FAILED) {
perror("cpl-queue!");
munmap(sq_ptr, sring->ring_sz);
return 1;
}
}
sring->head = sq_ptr + p.sq_off.head;
sring->tail = sq_ptr + p.sq_off.tail;
sring->ring_mask = sq_ptr + p.sq_off.ring_mask;
sring->ring_entries = sq_ptr + p.sq_off.ring_entries;
sring->flags = sq_ptr + p.sq_off.flags;
sring->array = sq_ptr + p.sq_off.array;
/* Map a queue as mem map */
s->sq_ring.queue.sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
s->ring_fd, IORING_OFF_SQES);
if (s->sq_ring.queue.sqes == MAP_FAILED) {
munmap(sq_ptr, sring->ring_sz);
if (sq_ptr != cq_ptr) {
ksft_print_msg("failed to mmap uring queue!");
munmap(cq_ptr, cring->ring_sz);
return 1;
}
}
cring->head = cq_ptr + p.cq_off.head;
cring->tail = cq_ptr + p.cq_off.tail;
cring->ring_mask = cq_ptr + p.cq_off.ring_mask;
cring->ring_entries = cq_ptr + p.cq_off.ring_entries;
cring->queue.cqes = cq_ptr + p.cq_off.cqes;
return 0;
}
/* Init io_uring queues */
int setup_io_uring(struct io_ring *s)
{
struct io_uring_params para;
memset(¶, 0, sizeof(para));
s->ring_fd = sys_uring_setup(URING_QUEUE_SZ, ¶);
if (s->ring_fd < 0)
return 1;
return mmap_io_uring(para, s);
}
/*
* Get data from completion queue. the data buffer saved the file data
* return 0: success; others: error;
*/
int handle_uring_cq(struct io_ring *s)
{
struct file_io *fi = NULL;
struct io_uring_queue *cring = &s->cq_ring;
struct io_uring_cqe *cqe;
unsigned int head;
off_t len = 0;
head = *cring->head;
do {
barrier();
if (head == *cring->tail)
break;
/* Get the entry */
cqe = &cring->queue.cqes[head & *s->cq_ring.ring_mask];
fi = (struct file_io *)cqe->user_data;
if (cqe->res < 0)
break;
int blocks = (int)(fi->file_sz + URING_BLOCK_SZ - 1) / URING_BLOCK_SZ;
for (int i = 0; i < blocks; i++)
len += fi->iovecs[i].iov_len;
head++;
} while (1);
*cring->head = head;
barrier();
return (len != fi->file_sz);
}
/*
* Submit squeue. specify via IORING_OP_READV.
* the buffer need to be set metadata according to LAM mode
*/
int handle_uring_sq(struct io_ring *ring, struct file_io *fi, unsigned long lam)
{
int file_fd = fi->file_fd;
struct io_uring_queue *sring = &ring->sq_ring;
unsigned int index = 0, cur_block = 0, tail = 0, next_tail = 0;
struct io_uring_sqe *sqe;
off_t remain = fi->file_sz;
int blocks = (int)(remain + URING_BLOCK_SZ - 1) / URING_BLOCK_SZ;
while (remain) {
off_t bytes = remain;
void *buf;
if (bytes > URING_BLOCK_SZ)
bytes = URING_BLOCK_SZ;
fi->iovecs[cur_block].iov_len = bytes;
if (posix_memalign(&buf, URING_BLOCK_SZ, URING_BLOCK_SZ))
return 1;
fi->iovecs[cur_block].iov_base = (void *)set_metadata((uint64_t)buf, lam);
remain -= bytes;
cur_block++;
}
next_tail = *sring->tail;
tail = next_tail;
next_tail++;
barrier();
index = tail & *ring->sq_ring.ring_mask;
sqe = &ring->sq_ring.queue.sqes[index];
sqe->fd = file_fd;
sqe->flags = 0;
sqe->opcode = IORING_OP_READV;
sqe->addr = (unsigned long)fi->iovecs;
sqe->len = blocks;
sqe->off = 0;
sqe->user_data = (uint64_t)fi;
sring->array[index] = index;
tail = next_tail;
if (*sring->tail != tail) {
*sring->tail = tail;
barrier();
}
if (sys_uring_enter(ring->ring_fd, 1, 1, IORING_ENTER_GETEVENTS) < 0)
return 1;
return 0;
}
/*
* Test LAM in async I/O and io_uring, read current binery through io_uring
* Set metadata in pointers to iovecs buffer.
*/
int do_uring(unsigned long lam)
{
struct io_ring *ring;
struct file_io *fi;
struct stat st;
int ret = 1;
char path[PATH_MAX] = {0};
/* get current process path */
if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
return 1;
int file_fd = open(path, O_RDONLY);
if (file_fd < 0)
return 1;
if (fstat(file_fd, &st) < 0)
return 1;
off_t file_sz = st.st_size;
int blocks = (int)(file_sz + URING_BLOCK_SZ - 1) / URING_BLOCK_SZ;
fi = malloc(sizeof(*fi) + sizeof(struct iovec) * blocks);
if (!fi)
return 1;
fi->file_sz = file_sz;
fi->file_fd = file_fd;
ring = malloc(sizeof(*ring));
if (!ring)
return 1;
memset(ring, 0, sizeof(struct io_ring));
if (setup_io_uring(ring))
goto out;
if (handle_uring_sq(ring, fi, lam))
goto out;
ret = handle_uring_cq(ring);
out:
free(ring);
for (int i = 0; i < blocks; i++) {
if (fi->iovecs[i].iov_base) {
uint64_t addr = ((uint64_t)fi->iovecs[i].iov_base);
switch (lam) {
case LAM_U57_BITS: /* Clear bits 62:57 */
addr = (addr & ~(LAM_U57_MASK));
break;
}
free((void *)addr);
fi->iovecs[i].iov_base = NULL;
}
}
free(fi);
return ret;
}
int handle_uring(struct testcases *test)
{
int ret = 0;
if (test->later == 0 && test->lam != 0)
if (set_lam(test->lam) != 0)
return 1;
if (sigsetjmp(segv_env, 1) == 0) {
signal(SIGSEGV, segv_handler);
ret = do_uring(test->lam);
} else {
ret = 2;
}
return ret;
}
static int fork_test(struct testcases *test)
{
int ret, child_ret;
pid_t pid;
pid = fork();
if (pid < 0) {
perror("Fork failed.");
ret = 1;
} else if (pid == 0) {
ret = test->test_func(test);
exit(ret);
} else {
wait(&child_ret);
ret = WEXITSTATUS(child_ret);
}
return ret;
}
static int handle_execve(struct testcases *test)
{
int ret, child_ret;
int lam = test->lam;
pid_t pid;
pid = fork();
if (pid < 0) {
perror("Fork failed.");
ret = 1;
} else if (pid == 0) {
char path[PATH_MAX];
/* Set LAM mode in parent process */
if (set_lam(lam) != 0)
return 1;
/* Get current binary's path and the binary was run by execve */
if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
exit(-1);
/* run binary to get LAM mode and return to parent process */
if (execlp(path, path, "-t 0x0", NULL) < 0) {
perror("error on exec");
exit(-1);
}
} else {
wait(&child_ret);
ret = WEXITSTATUS(child_ret);
if (ret != LAM_NONE)
return 1;
}
return 0;
}
static int handle_inheritance(struct testcases *test)
{
int ret, child_ret;
int lam = test->lam;
pid_t pid;
/* Set LAM mode in parent process */
if (set_lam(lam) != 0)
return 1;
pid = fork();
if (pid < 0) {
perror("Fork failed.");
return 1;
} else if (pid == 0) {
/* Set LAM mode in parent process */
int child_lam = get_lam();
exit(child_lam);
} else {
wait(&child_ret);
ret = WEXITSTATUS(child_ret);
if (lam != ret)
return 1;
}
return 0;
}
static int thread_fn_get_lam(void *arg)
{
return get_lam();
}
static int thread_fn_set_lam(void *arg)
{
struct testcases *test = arg;
return set_lam(test->lam);
}
static int handle_thread(struct testcases *test)
{
char stack[STACK_SIZE];
int ret, child_ret;
int lam = 0;
pid_t pid;
/* Set LAM mode in parent process */
if (!test->later) {
lam = test->lam;
if (set_lam(lam) != 0)
return 1;
}
pid = clone(thread_fn_get_lam, stack + STACK_SIZE,
SIGCHLD | CLONE_FILES | CLONE_FS | CLONE_VM, NULL);
if (pid < 0) {
perror("Clone failed.");
return 1;
}
waitpid(pid, &child_ret, 0);
ret = WEXITSTATUS(child_ret);
if (lam != ret)
return 1;
if (test->later) {
if (set_lam(test->lam) != 0)
return 1;
}
return 0;
}
static int handle_thread_enable(struct testcases *test)
{
char stack[STACK_SIZE];
int ret, child_ret;
int lam = test->lam;
pid_t pid;
pid = clone(thread_fn_set_lam, stack + STACK_SIZE,
SIGCHLD | CLONE_FILES | CLONE_FS | CLONE_VM, test);
if (pid < 0) {
perror("Clone failed.");
return 1;
}
waitpid(pid, &child_ret, 0);
ret = WEXITSTATUS(child_ret);
if (lam != ret)
return 1;
return 0;
}
static void run_test(struct testcases *test, int count)
{
int i, ret = 0;
for (i = 0; i < count; i++) {
struct testcases *t = test + i;
/* fork a process to run test case */
tests_cnt++;
ret = fork_test(t);
/* return 3 is not support LA57, the case should be skipped */
if (ret == 3) {
ksft_test_result_skip(t->msg);
continue;
}
if (ret != 0)
ret = (t->expected == ret);
else
ret = !(t->expected);
ksft_test_result(ret, t->msg);
}
}
static struct testcases uring_cases[] = {
{
.later = 0,
.lam = LAM_U57_BITS,
.test_func = handle_uring,
.msg = "URING: LAM_U57. Dereferencing pointer with metadata\n",
},
{
.later = 1,
.expected = 1,
.lam = LAM_U57_BITS,
.test_func = handle_uring,
.msg = "URING:[Negative] Disable LAM. Dereferencing pointer with metadata.\n",
},
};
static struct testcases malloc_cases[] = {
{
.later = 0,
.lam = LAM_U57_BITS,
.test_func = handle_malloc,
.msg = "MALLOC: LAM_U57. Dereferencing pointer with metadata\n",
},
{
.later = 1,
.expected = 2,
.lam = LAM_U57_BITS,
.test_func = handle_malloc,
.msg = "MALLOC:[Negative] Disable LAM. Dereferencing pointer with metadata.\n",
},
};
static struct testcases bits_cases[] = {
{
.test_func = handle_max_bits,
.msg = "BITS: Check default tag bits\n",
},
};
static struct testcases syscall_cases[] = {
{
.later = 0,
.lam = LAM_U57_BITS,
.test_func = handle_syscall,
.msg = "SYSCALL: LAM_U57. syscall with metadata\n",
},
{
.later = 1,
.expected = 1,
.lam = LAM_U57_BITS,
.test_func = handle_syscall,
.msg = "SYSCALL:[Negative] Disable LAM. Dereferencing pointer with metadata.\n",
},
};
static struct testcases mmap_cases[] = {
{
.later = 1,
.expected = 0,
.lam = LAM_U57_BITS,
.addr = HIGH_ADDR,
.test_func = handle_mmap,
.msg = "MMAP: First mmap high address, then set LAM_U57.\n",
},
{
.later = 0,
.expected = 0,
.lam = LAM_U57_BITS,
.addr = HIGH_ADDR,
.test_func = handle_mmap,
.msg = "MMAP: First LAM_U57, then High address.\n",
},
{
.later = 0,
.expected = 0,
.lam = LAM_U57_BITS,
.addr = LOW_ADDR,
.test_func = handle_mmap,
.msg = "MMAP: First LAM_U57, then Low address.\n",
},
};
static struct testcases inheritance_cases[] = {
{
.expected = 0,
.lam = LAM_U57_BITS,
.test_func = handle_inheritance,
.msg = "FORK: LAM_U57, child process should get LAM mode same as parent\n",
},
{
.expected = 0,
.lam = LAM_U57_BITS,
.test_func = handle_thread,
.msg = "THREAD: LAM_U57, child thread should get LAM mode same as parent\n",
},
{
.expected = 1,
.lam = LAM_U57_BITS,
.test_func = handle_thread_enable,
.msg = "THREAD: [NEGATIVE] Enable LAM in child.\n",
},
{
.expected = 1,
.later = 1,
.lam = LAM_U57_BITS,
.test_func = handle_thread,
.msg = "THREAD: [NEGATIVE] Enable LAM in parent after thread created.\n",
},
{
.expected = 0,
.lam = LAM_U57_BITS,
.test_func = handle_execve,
.msg = "EXECVE: LAM_U57, child process should get disabled LAM mode\n",
},
};
static void cmd_help(void)
{
printf("usage: lam [-h] [-t test list]\n");
printf("\t-t test list: run tests specified in the test list, default:0x%x\n", TEST_MASK);
printf("\t\t0x1:malloc; 0x2:max_bits; 0x4:mmap; 0x8:syscall; 0x10:io_uring; 0x20:inherit;\n");
printf("\t-h: help\n");
}
/* Check for file existence */
uint8_t file_Exists(const char *fileName)
{
struct stat buffer;
uint8_t ret = (stat(fileName, &buffer) == 0);
return ret;
}
/* Sysfs idxd files */
const char *dsa_configs[] = {
"echo 1 > /sys/bus/dsa/devices/dsa0/wq0.1/group_id",
"echo shared > /sys/bus/dsa/devices/dsa0/wq0.1/mode",
"echo 10 > /sys/bus/dsa/devices/dsa0/wq0.1/priority",
"echo 16 > /sys/bus/dsa/devices/dsa0/wq0.1/size",
"echo 15 > /sys/bus/dsa/devices/dsa0/wq0.1/threshold",
"echo user > /sys/bus/dsa/devices/dsa0/wq0.1/type",
"echo MyApp1 > /sys/bus/dsa/devices/dsa0/wq0.1/name",
"echo 1 > /sys/bus/dsa/devices/dsa0/engine0.1/group_id",
"echo dsa0 > /sys/bus/dsa/drivers/idxd/bind",
/* bind files and devices, generated a device file in /dev */
"echo wq0.1 > /sys/bus/dsa/drivers/user/bind",
};
/* DSA device file */
const char *dsaDeviceFile = "/dev/dsa/wq0.1";
/* file for io*/
const char *dsaPasidEnable = "/sys/bus/dsa/devices/dsa0/pasid_enabled";
/*
* DSA depends on kernel cmdline "intel_iommu=on,sm_on"
* return pasid_enabled (0: disable 1:enable)
*/
int Check_DSA_Kernel_Setting(void)
{
char command[256] = "";
char buf[256] = "";
char *ptr;
int rv = -1;
snprintf(command, sizeof(command) - 1, "cat %s", dsaPasidEnable);
FILE *cmd = popen(command, "r");
if (cmd) {
while (fgets(buf, sizeof(buf) - 1, cmd) != NULL);
pclose(cmd);
rv = strtol(buf, &ptr, 16);
}
return rv;
}
/*
* Config DSA's sysfs files as shared DSA's WQ.
* Generated a device file /dev/dsa/wq0.1
* Return: 0 OK; 1 Failed; 3 Skip(SVA disabled).
*/
int Dsa_Init_Sysfs(void)
{
uint len = ARRAY_SIZE(dsa_configs);
const char **p = dsa_configs;
if (file_Exists(dsaDeviceFile) == 1)
return 0;
/* check the idxd driver */
if (file_Exists(dsaPasidEnable) != 1) {
printf("Please make sure idxd driver was loaded\n");
return 3;
}
/* Check SVA feature */
if (Check_DSA_Kernel_Setting() != 1) {
printf("Please enable SVA.(Add intel_iommu=on,sm_on in kernel cmdline)\n");
return 3;
}
/* Check the idxd device file on /dev/dsa/ */
for (int i = 0; i < len; i++) {
if (system(p[i]))
return 1;
}
/* After config, /dev/dsa/wq0.1 should be generated */
return (file_Exists(dsaDeviceFile) != 1);
}
/*
* Open DSA device file, triger API: iommu_sva_alloc_pasid
*/
void *allocate_dsa_pasid(void)
{
int fd;
void *wq;
fd = open(dsaDeviceFile, O_RDWR);
if (fd < 0) {
perror("open");
return MAP_FAILED;
}
wq = mmap(NULL, 0x1000, PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (wq == MAP_FAILED)
perror("mmap");
return wq;
}
int set_force_svm(void)
{
int ret = 0;
ret = syscall(SYS_arch_prctl, ARCH_FORCE_TAGGED_SVA);
return ret;
}
int handle_pasid(struct testcases *test)
{
uint tmp = test->cmd;
uint runed = 0x0;
int ret = 0;
void *wq = NULL;
ret = Dsa_Init_Sysfs();
if (ret != 0)
return ret;
for (int i = 0; i < 3; i++) {
int err = 0;
if (tmp & 0x1) {
/* run set lam mode*/
if ((runed & 0x1) == 0) {
err = set_lam(LAM_U57_BITS);
runed = runed | 0x1;
} else
err = 1;
} else if (tmp & 0x4) {
/* run force svm */
if ((runed & 0x4) == 0) {
err = set_force_svm();
runed = runed | 0x4;
} else
err = 1;
} else if (tmp & 0x2) {
/* run allocate pasid */
if ((runed & 0x2) == 0) {
runed = runed | 0x2;
wq = allocate_dsa_pasid();
if (wq == MAP_FAILED)
err = 1;
} else
err = 1;
}
ret = ret + err;
if (ret > 0)
break;
tmp = tmp >> 4;
}
if (wq != MAP_FAILED && wq != NULL)
if (munmap(wq, 0x1000))
printf("munmap failed %d\n", errno);
if (runed != 0x7)
ret = 1;
return (ret != 0);
}
/*
* Pasid test depends on idxd and SVA, kernel should enable iommu and sm.
* command line(intel_iommu=on,sm_on)
*/
static struct testcases pasid_cases[] = {
{
.expected = 1,
.cmd = PAS_CMD(LAM_CMD_BIT, PAS_CMD_BIT, SVA_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: [Negative] Execute LAM, PASID, SVA in sequence\n",
},
{
.expected = 0,
.cmd = PAS_CMD(LAM_CMD_BIT, SVA_CMD_BIT, PAS_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: Execute LAM, SVA, PASID in sequence\n",
},
{
.expected = 1,
.cmd = PAS_CMD(PAS_CMD_BIT, LAM_CMD_BIT, SVA_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: [Negative] Execute PASID, LAM, SVA in sequence\n",
},
{
.expected = 0,
.cmd = PAS_CMD(PAS_CMD_BIT, SVA_CMD_BIT, LAM_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: Execute PASID, SVA, LAM in sequence\n",
},
{
.expected = 0,
.cmd = PAS_CMD(SVA_CMD_BIT, LAM_CMD_BIT, PAS_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: Execute SVA, LAM, PASID in sequence\n",
},
{
.expected = 0,
.cmd = PAS_CMD(SVA_CMD_BIT, PAS_CMD_BIT, LAM_CMD_BIT),
.test_func = handle_pasid,
.msg = "PASID: Execute SVA, PASID, LAM in sequence\n",
},
};
int main(int argc, char **argv)
{
int c = 0;
unsigned int tests = TEST_MASK;
tests_cnt = 0;
if (!cpu_has_lam()) {
ksft_print_msg("Unsupported LAM feature!\n");
return -1;
}
while ((c = getopt(argc, argv, "ht:")) != -1) {
switch (c) {
case 't':
tests = strtoul(optarg, NULL, 16);
if (tests && !(tests & TEST_MASK)) {
ksft_print_msg("Invalid argument!\n");
return -1;
}
break;
case 'h':
cmd_help();
return 0;
default:
ksft_print_msg("Invalid argument\n");
return -1;
}
}
/*
* When tests is 0, it is not a real test case;
* the option used by test case(execve) to check the lam mode in
* process generated by execve, the process read back lam mode and
* check with lam mode in parent process.
*/
if (!tests)
return (get_lam());
/* Run test cases */
if (tests & FUNC_MALLOC)
run_test(malloc_cases, ARRAY_SIZE(malloc_cases));
if (tests & FUNC_BITS)
run_test(bits_cases, ARRAY_SIZE(bits_cases));
if (tests & FUNC_MMAP)
run_test(mmap_cases, ARRAY_SIZE(mmap_cases));
if (tests & FUNC_SYSCALL)
run_test(syscall_cases, ARRAY_SIZE(syscall_cases));
if (tests & FUNC_URING)
run_test(uring_cases, ARRAY_SIZE(uring_cases));
if (tests & FUNC_INHERITE)
run_test(inheritance_cases, ARRAY_SIZE(inheritance_cases));
if (tests & FUNC_PASID)
run_test(pasid_cases, ARRAY_SIZE(pasid_cases));
ksft_set_plan(tests_cnt);
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/x86/lam.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* entry_from_vm86.c - tests kernel entries from vm86 mode
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This exercises a few paths that need to special-case vm86 mode.
*/
#define _GNU_SOURCE
#include <assert.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <errno.h>
#include <sys/vm86.h>
static unsigned long load_addr = 0x10000;
static int nerrs = 0;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static sig_atomic_t got_signal;
static void sighandler(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_VM ||
(ctx->uc_mcontext.gregs[REG_CS] & 3) != 3) {
printf("[FAIL]\tSignal frame should not reflect vm86 mode\n");
nerrs++;
}
const char *signame;
if (sig == SIGSEGV)
signame = "SIGSEGV";
else if (sig == SIGILL)
signame = "SIGILL";
else
signame = "unexpected signal";
printf("[INFO]\t%s: FLAGS = 0x%lx, CS = 0x%hx\n", signame,
(unsigned long)ctx->uc_mcontext.gregs[REG_EFL],
(unsigned short)ctx->uc_mcontext.gregs[REG_CS]);
got_signal = 1;
}
asm (
".pushsection .rodata\n\t"
".type vmcode_bound, @object\n\t"
"vmcode:\n\t"
"vmcode_bound:\n\t"
".code16\n\t"
"bound %ax, (2048)\n\t"
"int3\n\t"
"vmcode_sysenter:\n\t"
"sysenter\n\t"
"vmcode_syscall:\n\t"
"syscall\n\t"
"vmcode_sti:\n\t"
"sti\n\t"
"vmcode_int3:\n\t"
"int3\n\t"
"vmcode_int80:\n\t"
"int $0x80\n\t"
"vmcode_popf_hlt:\n\t"
"push %ax\n\t"
"popf\n\t"
"hlt\n\t"
"vmcode_umip:\n\t"
/* addressing via displacements */
"smsw (2052)\n\t"
"sidt (2054)\n\t"
"sgdt (2060)\n\t"
/* addressing via registers */
"mov $2066, %bx\n\t"
"smsw (%bx)\n\t"
"mov $2068, %bx\n\t"
"sidt (%bx)\n\t"
"mov $2074, %bx\n\t"
"sgdt (%bx)\n\t"
/* register operands, only for smsw */
"smsw %ax\n\t"
"mov %ax, (2080)\n\t"
"int3\n\t"
"vmcode_umip_str:\n\t"
"str %eax\n\t"
"vmcode_umip_sldt:\n\t"
"sldt %eax\n\t"
"int3\n\t"
".size vmcode, . - vmcode\n\t"
"end_vmcode:\n\t"
".code32\n\t"
".popsection"
);
extern unsigned char vmcode[], end_vmcode[];
extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
/* Returns false if the test was skipped. */
static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
unsigned int rettype, unsigned int retarg,
const char *text)
{
long ret;
printf("[RUN]\t%s from vm86 mode\n", text);
v86->regs.eip = eip;
ret = vm86(VM86_ENTER, v86);
if (ret == -1 && (errno == ENOSYS || errno == EPERM)) {
printf("[SKIP]\tvm86 %s\n",
errno == ENOSYS ? "not supported" : "not allowed");
return false;
}
if (VM86_TYPE(ret) == VM86_INTx) {
char trapname[32];
int trapno = VM86_ARG(ret);
if (trapno == 13)
strcpy(trapname, "GP");
else if (trapno == 5)
strcpy(trapname, "BR");
else if (trapno == 14)
strcpy(trapname, "PF");
else
sprintf(trapname, "%d", trapno);
printf("[INFO]\tExited vm86 mode due to #%s\n", trapname);
} else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
printf("[INFO]\tExited vm86 mode due to unhandled GP fault\n");
} else if (VM86_TYPE(ret) == VM86_TRAP) {
printf("[INFO]\tExited vm86 mode due to a trap (arg=%ld)\n",
VM86_ARG(ret));
} else if (VM86_TYPE(ret) == VM86_SIGNAL) {
printf("[INFO]\tExited vm86 mode due to a signal\n");
} else if (VM86_TYPE(ret) == VM86_STI) {
printf("[INFO]\tExited vm86 mode due to STI\n");
} else {
printf("[INFO]\tExited vm86 mode due to type %ld, arg %ld\n",
VM86_TYPE(ret), VM86_ARG(ret));
}
if (rettype == -1 ||
(VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
printf("[OK]\tReturned correctly\n");
} else {
printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
nerrs++;
}
return true;
}
void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem)
{
struct table_desc {
unsigned short limit;
unsigned long base;
} __attribute__((packed));
/* Initialize variables with arbitrary values */
struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 };
struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae };
struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 };
struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 };
unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737;
/* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */
do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests");
/* Results from displacement-only addressing */
msw1 = *(unsigned short *)(test_mem + 2052);
memcpy(&idt1, test_mem + 2054, sizeof(idt1));
memcpy(&gdt1, test_mem + 2060, sizeof(gdt1));
/* Results from register-indirect addressing */
msw2 = *(unsigned short *)(test_mem + 2066);
memcpy(&idt2, test_mem + 2068, sizeof(idt2));
memcpy(&gdt2, test_mem + 2074, sizeof(gdt2));
/* Results when using register operands */
msw3 = *(unsigned short *)(test_mem + 2080);
printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1);
printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n",
idt1.limit, idt1.base);
printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n",
gdt1.limit, gdt1.base);
if (msw1 != msw2 || msw1 != msw3)
printf("[FAIL]\tAll the results of SMSW should be the same.\n");
else
printf("[PASS]\tAll the results from SMSW are identical.\n");
if (memcmp(&gdt1, &gdt2, sizeof(gdt1)))
printf("[FAIL]\tAll the results of SGDT should be the same.\n");
else
printf("[PASS]\tAll the results from SGDT are identical.\n");
if (memcmp(&idt1, &idt2, sizeof(idt1)))
printf("[FAIL]\tAll the results of SIDT should be the same.\n");
else
printf("[PASS]\tAll the results from SIDT are identical.\n");
sethandler(SIGILL, sighandler, 0);
do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0,
"STR instruction");
clearhandler(SIGILL);
sethandler(SIGILL, sighandler, 0);
do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0,
"SLDT instruction");
clearhandler(SIGILL);
}
int main(void)
{
struct vm86plus_struct v86;
unsigned char *addr = mmap((void *)load_addr, 4096,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, -1,0);
if (addr != (unsigned char *)load_addr)
err(1, "mmap");
memcpy(addr, vmcode, end_vmcode - vmcode);
addr[2048] = 2;
addr[2050] = 3;
memset(&v86, 0, sizeof(v86));
v86.regs.cs = load_addr / 16;
v86.regs.ss = load_addr / 16;
v86.regs.ds = load_addr / 16;
v86.regs.es = load_addr / 16;
/* Use the end of the page as our stack. */
v86.regs.esp = 4096;
assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
/* #BR -- should deliver SIG??? */
do_test(&v86, vmcode_bound - vmcode, VM86_INTx, 5, "#BR");
/*
* SYSENTER -- should cause #GP or #UD depending on CPU.
* Expected return type -1 means that we shouldn't validate
* the vm86 return value. This will avoid problems on non-SEP
* CPUs.
*/
sethandler(SIGILL, sighandler, 0);
do_test(&v86, vmcode_sysenter - vmcode, -1, 0, "SYSENTER");
clearhandler(SIGILL);
/*
* SYSCALL would be a disaster in VM86 mode. Fortunately,
* there is no kernel that both enables SYSCALL and sets
* EFER.SCE, so it's #UD on all systems. But vm86 is
* buggy (or has a "feature"), so the SIGILL will actually
* be delivered.
*/
sethandler(SIGILL, sighandler, 0);
do_test(&v86, vmcode_syscall - vmcode, VM86_SIGNAL, 0, "SYSCALL");
clearhandler(SIGILL);
/* STI with VIP set */
v86.regs.eflags |= X86_EFLAGS_VIP;
v86.regs.eflags &= ~X86_EFLAGS_IF;
do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
/* POPF with VIP set but IF clear: should not trap */
v86.regs.eflags = X86_EFLAGS_VIP;
v86.regs.eax = 0;
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
/* POPF with VIP set and IF set: should trap */
v86.regs.eflags = X86_EFLAGS_VIP;
v86.regs.eax = X86_EFLAGS_IF;
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
/* POPF with VIP clear and IF set: should not trap */
v86.regs.eflags = 0;
v86.regs.eax = X86_EFLAGS_IF;
do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
v86.regs.eflags = 0;
/* INT3 -- should cause #BP */
do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
/* INT80 -- should exit with "INTx 0x80" */
v86.regs.eax = (unsigned int)-1;
do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
/* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */
do_umip_tests(&v86, addr);
/* Execute a null pointer */
v86.regs.cs = 0;
v86.regs.ss = 0;
sethandler(SIGSEGV, sighandler, 0);
got_signal = 0;
if (do_test(&v86, 0, VM86_SIGNAL, 0, "Execute null pointer") &&
!got_signal) {
printf("[FAIL]\tDid not receive SIGSEGV\n");
nerrs++;
}
clearhandler(SIGSEGV);
/* Make sure nothing explodes if we fork. */
if (fork() == 0)
return 0;
return (nerrs == 0 ? 0 : 1);
}
| linux-master | tools/testing/selftests/x86/entry_from_vm86.c |
// SPDX-License-Identifier: GPL-2.0
/*
* iopl.c - Test case for a Linux on Xen 64-bit bug
* Copyright (c) 2015 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <err.h>
#include <stdio.h>
#include <stdint.h>
#include <signal.h>
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdbool.h>
#include <sched.h>
#include <sys/io.h>
static int nerrs = 0;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
{
siglongjmp(jmpbuf, 1);
}
static bool try_outb(unsigned short port)
{
sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
if (sigsetjmp(jmpbuf, 1) != 0) {
return false;
} else {
asm volatile ("outb %%al, %w[port]"
: : [port] "Nd" (port), "a" (0));
return true;
}
clearhandler(SIGSEGV);
}
static void expect_ok_outb(unsigned short port)
{
if (!try_outb(port)) {
printf("[FAIL]\toutb to 0x%02hx failed\n", port);
exit(1);
}
printf("[OK]\toutb to 0x%02hx worked\n", port);
}
static void expect_gp_outb(unsigned short port)
{
if (try_outb(port)) {
printf("[FAIL]\toutb to 0x%02hx worked\n", port);
nerrs++;
}
printf("[OK]\toutb to 0x%02hx failed\n", port);
}
#define RET_FAULTED 0
#define RET_FAIL 1
#define RET_EMUL 2
static int try_cli(void)
{
unsigned long flags;
sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
if (sigsetjmp(jmpbuf, 1) != 0) {
return RET_FAULTED;
} else {
asm volatile("cli; pushf; pop %[flags]"
: [flags] "=rm" (flags));
/* X86_FLAGS_IF */
if (!(flags & (1 << 9)))
return RET_FAIL;
else
return RET_EMUL;
}
clearhandler(SIGSEGV);
}
static int try_sti(bool irqs_off)
{
unsigned long flags;
sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
if (sigsetjmp(jmpbuf, 1) != 0) {
return RET_FAULTED;
} else {
asm volatile("sti; pushf; pop %[flags]"
: [flags] "=rm" (flags));
/* X86_FLAGS_IF */
if (irqs_off && (flags & (1 << 9)))
return RET_FAIL;
else
return RET_EMUL;
}
clearhandler(SIGSEGV);
}
static void expect_gp_sti(bool irqs_off)
{
int ret = try_sti(irqs_off);
switch (ret) {
case RET_FAULTED:
printf("[OK]\tSTI faulted\n");
break;
case RET_EMUL:
printf("[OK]\tSTI NOPped\n");
break;
default:
printf("[FAIL]\tSTI worked\n");
nerrs++;
}
}
/*
* Returns whether it managed to disable interrupts.
*/
static bool test_cli(void)
{
int ret = try_cli();
switch (ret) {
case RET_FAULTED:
printf("[OK]\tCLI faulted\n");
break;
case RET_EMUL:
printf("[OK]\tCLI NOPped\n");
break;
default:
printf("[FAIL]\tCLI worked\n");
nerrs++;
return true;
}
return false;
}
int main(void)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
err(1, "sched_setaffinity to CPU 0");
/* Probe for iopl support. Note that iopl(0) works even as nonroot. */
switch(iopl(3)) {
case 0:
break;
case -ENOSYS:
printf("[OK]\tiopl() nor supported\n");
return 0;
default:
printf("[OK]\tiopl(3) failed (%d) -- try running as root\n",
errno);
return 0;
}
/* Make sure that CLI/STI are blocked even with IOPL level 3 */
expect_gp_sti(test_cli());
expect_ok_outb(0x80);
/* Establish an I/O bitmap to test the restore */
if (ioperm(0x80, 1, 1) != 0)
err(1, "ioperm(0x80, 1, 1) failed\n");
/* Restore our original state prior to starting the fork test. */
if (iopl(0) != 0)
err(1, "iopl(0)");
/*
* Verify that IOPL emulation is disabled and the I/O bitmap still
* works.
*/
expect_ok_outb(0x80);
expect_gp_outb(0xed);
/* Drop the I/O bitmap */
if (ioperm(0x80, 1, 0) != 0)
err(1, "ioperm(0x80, 1, 0) failed\n");
pid_t child = fork();
if (child == -1)
err(1, "fork");
if (child == 0) {
printf("\tchild: set IOPL to 3\n");
if (iopl(3) != 0)
err(1, "iopl");
printf("[RUN]\tchild: write to 0x80\n");
asm volatile ("outb %%al, $0x80" : : "a" (0));
return 0;
} else {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
printf("[FAIL]\tChild died\n");
nerrs++;
} else if (WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild succeeded\n");
}
}
printf("[RUN]\tparent: write to 0x80 (should fail)\n");
expect_gp_outb(0x80);
expect_gp_sti(test_cli());
/* Test the capability checks. */
printf("\tiopl(3)\n");
if (iopl(3) != 0)
err(1, "iopl(3)");
printf("\tDrop privileges\n");
if (setresuid(1, 1, 1) != 0) {
printf("[WARN]\tDropping privileges failed\n");
goto done;
}
printf("[RUN]\tiopl(3) unprivileged but with IOPL==3\n");
if (iopl(3) != 0) {
printf("[FAIL]\tiopl(3) should work if iopl is already 3 even if unprivileged\n");
nerrs++;
}
printf("[RUN]\tiopl(0) unprivileged\n");
if (iopl(0) != 0) {
printf("[FAIL]\tiopl(0) should work if iopl is already 3 even if unprivileged\n");
nerrs++;
}
printf("[RUN]\tiopl(3) unprivileged\n");
if (iopl(3) == 0) {
printf("[FAIL]\tiopl(3) should fail if when unprivileged if iopl==0\n");
nerrs++;
} else {
printf("[OK]\tFailed as expected\n");
}
done:
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/x86/iopl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Trivial program to check that we have a valid 32-bit build environment.
* Copyright (c) 2015 Andy Lutomirski
*/
#ifndef __i386__
# error wrong architecture
#endif
#include <stdio.h>
int main()
{
printf("\n");
return 0;
}
| linux-master | tools/testing/selftests/x86/trivial_32bit_program.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* unwind_vdso.c - tests unwind info for AT_SYSINFO in the vDSO
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This tests __kernel_vsyscall's unwind info.
*/
#define _GNU_SOURCE
#include <features.h>
#include <stdio.h>
#include "helpers.h"
#if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
int main()
{
/* We need getauxval(). */
printf("[SKIP]\tGLIBC before 2.16 cannot compile this test\n");
return 0;
}
#else
#include <sys/time.h>
#include <stdlib.h>
#include <syscall.h>
#include <unistd.h>
#include <string.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <signal.h>
#include <sys/ucontext.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include <link.h>
#include <sys/auxv.h>
#include <dlfcn.h>
#include <unwind.h>
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static volatile sig_atomic_t nerrs;
static unsigned long sysinfo;
static bool got_sysinfo = false;
static unsigned long return_address;
struct unwind_state {
unsigned long ip; /* trap source */
int depth; /* -1 until we hit the trap source */
};
_Unwind_Reason_Code trace_fn(struct _Unwind_Context * ctx, void *opaque)
{
struct unwind_state *state = opaque;
unsigned long ip = _Unwind_GetIP(ctx);
if (state->depth == -1) {
if (ip == state->ip)
state->depth = 0;
else
return _URC_NO_REASON; /* Not there yet */
}
printf("\t 0x%lx\n", ip);
if (ip == return_address) {
/* Here we are. */
unsigned long eax = _Unwind_GetGR(ctx, 0);
unsigned long ecx = _Unwind_GetGR(ctx, 1);
unsigned long edx = _Unwind_GetGR(ctx, 2);
unsigned long ebx = _Unwind_GetGR(ctx, 3);
unsigned long ebp = _Unwind_GetGR(ctx, 5);
unsigned long esi = _Unwind_GetGR(ctx, 6);
unsigned long edi = _Unwind_GetGR(ctx, 7);
bool ok = (eax == SYS_getpid || eax == getpid()) &&
ebx == 1 && ecx == 2 && edx == 3 &&
esi == 4 && edi == 5 && ebp == 6;
if (!ok)
nerrs++;
printf("[%s]\t NR = %ld, args = %ld, %ld, %ld, %ld, %ld, %ld\n",
(ok ? "OK" : "FAIL"),
eax, ebx, ecx, edx, esi, edi, ebp);
return _URC_NORMAL_STOP;
} else {
state->depth++;
return _URC_NO_REASON;
}
}
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
struct unwind_state state;
unsigned long ip = ctx->uc_mcontext.gregs[REG_EIP];
if (!got_sysinfo && ip == sysinfo) {
got_sysinfo = true;
/* Find the return address. */
return_address = *(unsigned long *)(unsigned long)ctx->uc_mcontext.gregs[REG_ESP];
printf("\tIn vsyscall at 0x%lx, returning to 0x%lx\n",
ip, return_address);
}
if (!got_sysinfo)
return; /* Not there yet */
if (ip == return_address) {
ctx->uc_mcontext.gregs[REG_EFL] &= ~X86_EFLAGS_TF;
printf("\tVsyscall is done\n");
return;
}
printf("\tSIGTRAP at 0x%lx\n", ip);
state.ip = ip;
state.depth = -1;
_Unwind_Backtrace(trace_fn, &state);
}
int main()
{
sysinfo = getauxval(AT_SYSINFO);
printf("\tAT_SYSINFO is 0x%lx\n", sysinfo);
Dl_info info;
if (!dladdr((void *)sysinfo, &info)) {
printf("[WARN]\tdladdr failed on AT_SYSINFO\n");
} else {
printf("[OK]\tAT_SYSINFO maps to %s, loaded at 0x%p\n",
info.dli_fname, info.dli_fbase);
}
sethandler(SIGTRAP, sigtrap, 0);
syscall(SYS_getpid); /* Force symbol binding without TF set. */
printf("[RUN]\tSet TF and check a fast syscall\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
syscall(SYS_getpid, 1, 2, 3, 4, 5, 6);
if (!got_sysinfo) {
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
/*
* The most likely cause of this is that you're on Debian or
* a Debian-based distro, you're missing libc6-i686, and you're
* affected by libc/19006 (https://sourceware.org/PR19006).
*/
printf("[WARN]\tsyscall(2) didn't enter AT_SYSINFO\n");
}
if (get_eflags() & X86_EFLAGS_TF) {
printf("[FAIL]\tTF is still set\n");
nerrs++;
}
if (nerrs) {
printf("[FAIL]\tThere were errors\n");
return 1;
} else {
printf("[OK]\tAll is well\n");
return 0;
}
}
#endif /* New enough libc */
| linux-master | tools/testing/selftests/x86/unwind_vdso.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ldt_gdt.c - Test cases for LDT and GDT access
* Copyright (c) 2015 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <err.h>
#include <stdio.h>
#include <stdint.h>
#include <signal.h>
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <asm/ldt.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdbool.h>
#include <pthread.h>
#include <sched.h>
#include <linux/futex.h>
#include <sys/mman.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#define AR_ACCESSED (1<<8)
#define AR_TYPE_RODATA (0 * (1<<9))
#define AR_TYPE_RWDATA (1 * (1<<9))
#define AR_TYPE_RODATA_EXPDOWN (2 * (1<<9))
#define AR_TYPE_RWDATA_EXPDOWN (3 * (1<<9))
#define AR_TYPE_XOCODE (4 * (1<<9))
#define AR_TYPE_XRCODE (5 * (1<<9))
#define AR_TYPE_XOCODE_CONF (6 * (1<<9))
#define AR_TYPE_XRCODE_CONF (7 * (1<<9))
#define AR_DPL3 (3 * (1<<13))
#define AR_S (1 << 12)
#define AR_P (1 << 15)
#define AR_AVL (1 << 20)
#define AR_L (1 << 21)
#define AR_DB (1 << 22)
#define AR_G (1 << 23)
#ifdef __x86_64__
# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
#else
# define INT80_CLOBBERS
#endif
static int nerrs;
/* Points to an array of 1024 ints, each holding its own index. */
static const unsigned int *counter_page;
static struct user_desc *low_user_desc;
static struct user_desc *low_user_desc_clear; /* Use to delete GDT entry */
static int gdt_entry_num;
static void check_invalid_segment(uint16_t index, int ldt)
{
uint32_t has_limit = 0, has_ar = 0, limit, ar;
uint32_t selector = (index << 3) | (ldt << 2) | 3;
asm ("lsl %[selector], %[limit]\n\t"
"jnz 1f\n\t"
"movl $1, %[has_limit]\n\t"
"1:"
: [limit] "=r" (limit), [has_limit] "+rm" (has_limit)
: [selector] "r" (selector));
asm ("larl %[selector], %[ar]\n\t"
"jnz 1f\n\t"
"movl $1, %[has_ar]\n\t"
"1:"
: [ar] "=r" (ar), [has_ar] "+rm" (has_ar)
: [selector] "r" (selector));
if (has_limit || has_ar) {
printf("[FAIL]\t%s entry %hu is valid but should be invalid\n",
(ldt ? "LDT" : "GDT"), index);
nerrs++;
} else {
printf("[OK]\t%s entry %hu is invalid\n",
(ldt ? "LDT" : "GDT"), index);
}
}
static void check_valid_segment(uint16_t index, int ldt,
uint32_t expected_ar, uint32_t expected_limit,
bool verbose)
{
uint32_t has_limit = 0, has_ar = 0, limit, ar;
uint32_t selector = (index << 3) | (ldt << 2) | 3;
asm ("lsl %[selector], %[limit]\n\t"
"jnz 1f\n\t"
"movl $1, %[has_limit]\n\t"
"1:"
: [limit] "=r" (limit), [has_limit] "+rm" (has_limit)
: [selector] "r" (selector));
asm ("larl %[selector], %[ar]\n\t"
"jnz 1f\n\t"
"movl $1, %[has_ar]\n\t"
"1:"
: [ar] "=r" (ar), [has_ar] "+rm" (has_ar)
: [selector] "r" (selector));
if (!has_limit || !has_ar) {
printf("[FAIL]\t%s entry %hu is invalid but should be valid\n",
(ldt ? "LDT" : "GDT"), index);
nerrs++;
return;
}
/* The SDM says "bits 19:16 are undefined". Thanks. */
ar &= ~0xF0000;
/*
* NB: Different Linux versions do different things with the
* accessed bit in set_thread_area().
*/
if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, ar, expected_ar);
nerrs++;
} else if (limit != expected_limit) {
printf("[FAIL]\t%s entry %hu has limit 0x%08X but expected 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, limit, expected_limit);
nerrs++;
} else if (verbose) {
printf("[OK]\t%s entry %hu has AR 0x%08X and limit 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, ar, limit);
}
}
static bool install_valid_mode(const struct user_desc *d, uint32_t ar,
bool oldmode, bool ldt)
{
struct user_desc desc = *d;
int ret;
if (!ldt) {
#ifndef __i386__
/* No point testing set_thread_area in a 64-bit build */
return false;
#endif
if (!gdt_entry_num)
return false;
desc.entry_number = gdt_entry_num;
ret = syscall(SYS_set_thread_area, &desc);
} else {
ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
&desc, sizeof(desc));
if (ret < -1)
errno = -ret;
if (ret != 0 && errno == ENOSYS) {
printf("[OK]\tmodify_ldt returned -ENOSYS\n");
return false;
}
}
if (ret == 0) {
uint32_t limit = desc.limit;
if (desc.limit_in_pages)
limit = (limit << 12) + 4095;
check_valid_segment(desc.entry_number, ldt, ar, limit, true);
return true;
} else {
if (desc.seg_32bit) {
printf("[FAIL]\tUnexpected %s failure %d\n",
ldt ? "modify_ldt" : "set_thread_area",
errno);
nerrs++;
return false;
} else {
printf("[OK]\t%s rejected 16 bit segment\n",
ldt ? "modify_ldt" : "set_thread_area");
return false;
}
}
}
static bool install_valid(const struct user_desc *desc, uint32_t ar)
{
bool ret = install_valid_mode(desc, ar, false, true);
if (desc->contents <= 1 && desc->seg_32bit &&
!desc->seg_not_present) {
/* Should work in the GDT, too. */
install_valid_mode(desc, ar, false, false);
}
return ret;
}
static void install_invalid(const struct user_desc *desc, bool oldmode)
{
int ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
desc, sizeof(*desc));
if (ret < -1)
errno = -ret;
if (ret == 0) {
check_invalid_segment(desc->entry_number, 1);
} else if (errno == ENOSYS) {
printf("[OK]\tmodify_ldt returned -ENOSYS\n");
} else {
if (desc->seg_32bit) {
printf("[FAIL]\tUnexpected modify_ldt failure %d\n",
errno);
nerrs++;
} else {
printf("[OK]\tmodify_ldt rejected 16 bit segment\n");
}
}
}
static int safe_modify_ldt(int func, struct user_desc *ptr,
unsigned long bytecount)
{
int ret = syscall(SYS_modify_ldt, 0x11, ptr, bytecount);
if (ret < -1)
errno = -ret;
return ret;
}
static void fail_install(struct user_desc *desc)
{
if (safe_modify_ldt(0x11, desc, sizeof(*desc)) == 0) {
printf("[FAIL]\tmodify_ldt accepted a bad descriptor\n");
nerrs++;
} else if (errno == ENOSYS) {
printf("[OK]\tmodify_ldt returned -ENOSYS\n");
} else {
printf("[OK]\tmodify_ldt failure %d\n", errno);
}
}
static void do_simple_tests(void)
{
struct user_desc desc = {
.entry_number = 0,
.base_addr = 0,
.limit = 10,
.seg_32bit = 1,
.contents = 2, /* Code, not conforming */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB);
desc.limit_in_pages = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_P | AR_DB | AR_G);
check_invalid_segment(1, 1);
desc.entry_number = 2;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_P | AR_DB | AR_G);
check_invalid_segment(1, 1);
desc.base_addr = 0xf0000000;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_P | AR_DB | AR_G);
desc.useable = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_P | AR_DB | AR_G | AR_AVL);
desc.seg_not_present = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_DB | AR_G | AR_AVL);
desc.seg_32bit = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_G | AR_AVL);
desc.seg_32bit = 1;
desc.contents = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA |
AR_S | AR_DB | AR_G | AR_AVL);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA |
AR_S | AR_DB | AR_G | AR_AVL);
desc.contents = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN |
AR_S | AR_DB | AR_G | AR_AVL);
desc.read_exec_only = 0;
desc.limit_in_pages = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN |
AR_S | AR_DB | AR_AVL);
desc.contents = 3;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE_CONF |
AR_S | AR_DB | AR_AVL);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE_CONF |
AR_S | AR_DB | AR_AVL);
desc.read_exec_only = 0;
desc.contents = 2;
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
AR_S | AR_DB | AR_AVL);
desc.read_exec_only = 1;
#ifdef __x86_64__
desc.lm = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE |
AR_S | AR_DB | AR_AVL);
desc.lm = 0;
#endif
bool entry1_okay = install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE |
AR_S | AR_DB | AR_AVL);
if (entry1_okay) {
printf("[RUN]\tTest fork\n");
pid_t child = fork();
if (child == 0) {
nerrs = 0;
check_valid_segment(desc.entry_number, 1,
AR_DPL3 | AR_TYPE_XOCODE |
AR_S | AR_DB | AR_AVL, desc.limit,
true);
check_invalid_segment(1, 1);
exit(nerrs ? 1 : 0);
} else {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
printf("[FAIL]\tChild died\n");
nerrs++;
} else if (WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild succeeded\n");
}
}
printf("[RUN]\tTest size\n");
int i;
for (i = 0; i < 8192; i++) {
desc.entry_number = i;
desc.limit = i;
if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) {
printf("[FAIL]\tFailed to install entry %d\n", i);
nerrs++;
break;
}
}
for (int j = 0; j < i; j++) {
check_valid_segment(j, 1, AR_DPL3 | AR_TYPE_XOCODE |
AR_S | AR_DB | AR_AVL, j, false);
}
printf("[DONE]\tSize test\n");
} else {
printf("[SKIP]\tSkipping fork and size tests because we have no LDT\n");
}
/* Test entry_number too high. */
desc.entry_number = 8192;
fail_install(&desc);
/* Test deletion and actions mistakeable for deletion. */
memset(&desc, 0, sizeof(desc));
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P);
desc.seg_not_present = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S);
desc.seg_not_present = 0;
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P);
desc.read_exec_only = 0;
desc.seg_not_present = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S);
desc.read_exec_only = 1;
desc.limit = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S);
desc.limit = 0;
desc.base_addr = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S);
desc.base_addr = 0;
install_invalid(&desc, false);
desc.seg_not_present = 0;
desc.seg_32bit = 1;
desc.read_exec_only = 0;
desc.limit = 0xfffff;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
desc.limit_in_pages = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
desc.contents = 1;
desc.read_exec_only = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.limit = 0;
install_invalid(&desc, true);
}
/*
* 0: thread is idle
* 1: thread armed
* 2: thread should clear LDT entry 0
* 3: thread should exit
*/
static volatile unsigned int ftx;
static void *threadproc(void *ctx)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(1, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
err(1, "sched_setaffinity to CPU 1"); /* should never fail */
while (1) {
syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0);
while (ftx != 2) {
if (ftx >= 3)
return NULL;
}
/* clear LDT entry 0 */
const struct user_desc desc = {};
if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) != 0)
err(1, "modify_ldt");
/* If ftx == 2, set it to zero. If ftx == 100, quit. */
unsigned int x = -2;
asm volatile ("lock xaddl %[x], %[ftx]" :
[x] "+r" (x), [ftx] "+m" (ftx));
if (x != 2)
return NULL;
}
}
#ifdef __i386__
#ifndef SA_RESTORE
#define SA_RESTORER 0x04000000
#endif
/*
* The UAPI header calls this 'struct sigaction', which conflicts with
* glibc. Sigh.
*/
struct fake_ksigaction {
void *handler; /* the real type is nasty */
unsigned long sa_flags;
void (*sa_restorer)(void);
unsigned char sigset[8];
};
static void fix_sa_restorer(int sig)
{
struct fake_ksigaction ksa;
if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
/*
* glibc has a nasty bug: it sometimes writes garbage to
* sa_restorer. This interacts quite badly with anything
* that fiddles with SS because it can trigger legacy
* stack switching. Patch it up. See:
*
* https://sourceware.org/bugzilla/show_bug.cgi?id=21269
*/
if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
ksa.sa_restorer = NULL;
if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
sizeof(ksa.sigset)) != 0)
err(1, "rt_sigaction");
}
}
}
#else
static void fix_sa_restorer(int sig)
{
/* 64-bit glibc works fine. */
}
#endif
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
fix_sa_restorer(sig);
}
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
{
siglongjmp(jmpbuf, 1);
}
static void do_multicpu_tests(void)
{
cpu_set_t cpuset;
pthread_t thread;
int failures = 0, iters = 5, i;
unsigned short orig_ss;
CPU_ZERO(&cpuset);
CPU_SET(1, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
printf("[SKIP]\tCannot set affinity to CPU 1\n");
return;
}
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
printf("[SKIP]\tCannot set affinity to CPU 0\n");
return;
}
sethandler(SIGSEGV, sigsegv, 0);
#ifdef __i386__
/* True 32-bit kernels send SIGILL instead of SIGSEGV on IRET faults. */
sethandler(SIGILL, sigsegv, 0);
#endif
printf("[RUN]\tCross-CPU LDT invalidation\n");
if (pthread_create(&thread, 0, threadproc, 0) != 0)
err(1, "pthread_create");
asm volatile ("mov %%ss, %0" : "=rm" (orig_ss));
for (i = 0; i < 5; i++) {
if (sigsetjmp(jmpbuf, 1) != 0)
continue;
/* Make sure the thread is ready after the last test. */
while (ftx != 0)
;
struct user_desc desc = {
.entry_number = 0,
.base_addr = 0,
.limit = 0xfffff,
.seg_32bit = 1,
.contents = 0, /* Data */
.read_exec_only = 0,
.limit_in_pages = 1,
.seg_not_present = 0,
.useable = 0
};
if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) {
if (errno != ENOSYS)
err(1, "modify_ldt");
printf("[SKIP]\tmodify_ldt unavailable\n");
break;
}
/* Arm the thread. */
ftx = 1;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
asm volatile ("mov %0, %%ss" : : "r" (0x7));
/* Go! */
ftx = 2;
while (ftx != 0)
;
/*
* On success, modify_ldt will segfault us synchronously,
* and we'll escape via siglongjmp.
*/
failures++;
asm volatile ("mov %0, %%ss" : : "rm" (orig_ss));
}
ftx = 100; /* Kill the thread. */
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
if (pthread_join(thread, NULL) != 0)
err(1, "pthread_join");
if (failures) {
printf("[FAIL]\t%d of %d iterations failed\n", failures, iters);
nerrs++;
} else {
printf("[OK]\tAll %d iterations succeeded\n", iters);
}
}
static int finish_exec_test(void)
{
/*
* Older kernel versions did inherit the LDT on exec() which is
* wrong because exec() starts from a clean state.
*/
check_invalid_segment(0, 1);
return nerrs ? 1 : 0;
}
static void do_exec_test(void)
{
printf("[RUN]\tTest exec\n");
struct user_desc desc = {
.entry_number = 0,
.base_addr = 0,
.limit = 42,
.seg_32bit = 1,
.contents = 2, /* Code, not conforming */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB);
pid_t child = fork();
if (child == 0) {
execl("/proc/self/exe", "ldt_gdt_test_exec", NULL);
printf("[FAIL]\tCould not exec self\n");
exit(1); /* exec failed */
} else {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
printf("[FAIL]\tChild died\n");
nerrs++;
} else if (WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild succeeded\n");
}
}
}
static void setup_counter_page(void)
{
unsigned int *page = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
if (page == MAP_FAILED)
err(1, "mmap");
for (int i = 0; i < 1024; i++)
page[i] = i;
counter_page = page;
}
static int invoke_set_thread_area(void)
{
int ret;
asm volatile ("int $0x80"
: "=a" (ret), "+m" (low_user_desc) :
"a" (243), "b" (low_user_desc)
: INT80_CLOBBERS);
return ret;
}
static void setup_low_user_desc(void)
{
low_user_desc = mmap(NULL, 2 * sizeof(struct user_desc),
PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
if (low_user_desc == MAP_FAILED)
err(1, "mmap");
low_user_desc->entry_number = -1;
low_user_desc->base_addr = (unsigned long)&counter_page[1];
low_user_desc->limit = 0xfffff;
low_user_desc->seg_32bit = 1;
low_user_desc->contents = 0; /* Data, grow-up*/
low_user_desc->read_exec_only = 0;
low_user_desc->limit_in_pages = 1;
low_user_desc->seg_not_present = 0;
low_user_desc->useable = 0;
if (invoke_set_thread_area() == 0) {
gdt_entry_num = low_user_desc->entry_number;
printf("[NOTE]\tset_thread_area is available; will use GDT index %d\n", gdt_entry_num);
} else {
printf("[NOTE]\tset_thread_area is unavailable\n");
}
low_user_desc_clear = low_user_desc + 1;
low_user_desc_clear->entry_number = gdt_entry_num;
low_user_desc_clear->read_exec_only = 1;
low_user_desc_clear->seg_not_present = 1;
}
static void test_gdt_invalidation(void)
{
if (!gdt_entry_num)
return; /* 64-bit only system -- we can't use set_thread_area */
unsigned short prev_sel;
unsigned short sel;
unsigned int eax;
const char *result;
#ifdef __x86_64__
unsigned long saved_base;
unsigned long new_base;
#endif
/* Test DS */
invoke_set_thread_area();
eax = 243;
sel = (gdt_entry_num << 3) | 3;
asm volatile ("movw %%ds, %[prev_sel]\n\t"
"movw %[sel], %%ds\n\t"
#ifdef __i386__
"pushl %%ebx\n\t"
#endif
"movl %[arg1], %%ebx\n\t"
"int $0x80\n\t" /* Should invalidate ds */
#ifdef __i386__
"popl %%ebx\n\t"
#endif
"movw %%ds, %[sel]\n\t"
"movw %[prev_sel], %%ds"
: [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
: INT80_CLOBBERS);
if (sel != 0) {
result = "FAIL";
nerrs++;
} else {
result = "OK";
}
printf("[%s]\tInvalidate DS with set_thread_area: new DS = 0x%hx\n",
result, sel);
/* Test ES */
invoke_set_thread_area();
eax = 243;
sel = (gdt_entry_num << 3) | 3;
asm volatile ("movw %%es, %[prev_sel]\n\t"
"movw %[sel], %%es\n\t"
#ifdef __i386__
"pushl %%ebx\n\t"
#endif
"movl %[arg1], %%ebx\n\t"
"int $0x80\n\t" /* Should invalidate es */
#ifdef __i386__
"popl %%ebx\n\t"
#endif
"movw %%es, %[sel]\n\t"
"movw %[prev_sel], %%es"
: [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
: INT80_CLOBBERS);
if (sel != 0) {
result = "FAIL";
nerrs++;
} else {
result = "OK";
}
printf("[%s]\tInvalidate ES with set_thread_area: new ES = 0x%hx\n",
result, sel);
/* Test FS */
invoke_set_thread_area();
eax = 243;
sel = (gdt_entry_num << 3) | 3;
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_FS, &saved_base);
#endif
asm volatile ("movw %%fs, %[prev_sel]\n\t"
"movw %[sel], %%fs\n\t"
#ifdef __i386__
"pushl %%ebx\n\t"
#endif
"movl %[arg1], %%ebx\n\t"
"int $0x80\n\t" /* Should invalidate fs */
#ifdef __i386__
"popl %%ebx\n\t"
#endif
"movw %%fs, %[sel]\n\t"
: [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
: INT80_CLOBBERS);
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
#endif
/* Restore FS/BASE for glibc */
asm volatile ("movw %[prev_sel], %%fs" : : [prev_sel] "rm" (prev_sel));
#ifdef __x86_64__
if (saved_base)
syscall(SYS_arch_prctl, ARCH_SET_FS, saved_base);
#endif
if (sel != 0) {
result = "FAIL";
nerrs++;
} else {
result = "OK";
}
printf("[%s]\tInvalidate FS with set_thread_area: new FS = 0x%hx\n",
result, sel);
#ifdef __x86_64__
if (sel == 0 && new_base != 0) {
nerrs++;
printf("[FAIL]\tNew FSBASE was 0x%lx\n", new_base);
} else {
printf("[OK]\tNew FSBASE was zero\n");
}
#endif
/* Test GS */
invoke_set_thread_area();
eax = 243;
sel = (gdt_entry_num << 3) | 3;
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_GS, &saved_base);
#endif
asm volatile ("movw %%gs, %[prev_sel]\n\t"
"movw %[sel], %%gs\n\t"
#ifdef __i386__
"pushl %%ebx\n\t"
#endif
"movl %[arg1], %%ebx\n\t"
"int $0x80\n\t" /* Should invalidate gs */
#ifdef __i386__
"popl %%ebx\n\t"
#endif
"movw %%gs, %[sel]\n\t"
: [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
"+a" (eax)
: "m" (low_user_desc_clear),
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
: INT80_CLOBBERS);
#ifdef __x86_64__
syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
#endif
/* Restore GS/BASE for glibc */
asm volatile ("movw %[prev_sel], %%gs" : : [prev_sel] "rm" (prev_sel));
#ifdef __x86_64__
if (saved_base)
syscall(SYS_arch_prctl, ARCH_SET_GS, saved_base);
#endif
if (sel != 0) {
result = "FAIL";
nerrs++;
} else {
result = "OK";
}
printf("[%s]\tInvalidate GS with set_thread_area: new GS = 0x%hx\n",
result, sel);
#ifdef __x86_64__
if (sel == 0 && new_base != 0) {
nerrs++;
printf("[FAIL]\tNew GSBASE was 0x%lx\n", new_base);
} else {
printf("[OK]\tNew GSBASE was zero\n");
}
#endif
}
int main(int argc, char **argv)
{
if (argc == 1 && !strcmp(argv[0], "ldt_gdt_test_exec"))
return finish_exec_test();
setup_counter_page();
setup_low_user_desc();
do_simple_tests();
do_multicpu_tests();
do_exec_test();
test_gdt_invalidation();
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/x86/ldt_gdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ioperm.c - Test case for ioperm(2)
* Copyright (c) 2015 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <err.h>
#include <stdio.h>
#include <stdint.h>
#include <signal.h>
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdbool.h>
#include <sched.h>
#include <sys/io.h>
static int nerrs = 0;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
{
siglongjmp(jmpbuf, 1);
}
static bool try_outb(unsigned short port)
{
sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
if (sigsetjmp(jmpbuf, 1) != 0) {
return false;
} else {
asm volatile ("outb %%al, %w[port]"
: : [port] "Nd" (port), "a" (0));
return true;
}
clearhandler(SIGSEGV);
}
static void expect_ok(unsigned short port)
{
if (!try_outb(port)) {
printf("[FAIL]\toutb to 0x%02hx failed\n", port);
exit(1);
}
printf("[OK]\toutb to 0x%02hx worked\n", port);
}
static void expect_gp(unsigned short port)
{
if (try_outb(port)) {
printf("[FAIL]\toutb to 0x%02hx worked\n", port);
exit(1);
}
printf("[OK]\toutb to 0x%02hx failed\n", port);
}
int main(void)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
err(1, "sched_setaffinity to CPU 0");
expect_gp(0x80);
expect_gp(0xed);
/*
* Probe for ioperm support. Note that clearing ioperm bits
* works even as nonroot.
*/
printf("[RUN]\tenable 0x80\n");
if (ioperm(0x80, 1, 1) != 0) {
printf("[OK]\tioperm(0x80, 1, 1) failed (%d) -- try running as root\n",
errno);
return 0;
}
expect_ok(0x80);
expect_gp(0xed);
printf("[RUN]\tdisable 0x80\n");
if (ioperm(0x80, 1, 0) != 0) {
printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno);
return 1;
}
expect_gp(0x80);
expect_gp(0xed);
/* Make sure that fork() preserves ioperm. */
if (ioperm(0x80, 1, 1) != 0) {
printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno);
return 1;
}
pid_t child = fork();
if (child == -1)
err(1, "fork");
if (child == 0) {
printf("[RUN]\tchild: check that we inherited permissions\n");
expect_ok(0x80);
expect_gp(0xed);
printf("[RUN]\tchild: Extend permissions to 0x81\n");
if (ioperm(0x81, 1, 1) != 0) {
printf("[FAIL]\tioperm(0x81, 1, 1) failed (%d)", errno);
return 1;
}
printf("[RUN]\tchild: Drop permissions to 0x80\n");
if (ioperm(0x80, 1, 0) != 0) {
printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno);
return 1;
}
expect_gp(0x80);
return 0;
} else {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
printf("[FAIL]\tChild died\n");
nerrs++;
} else if (WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild succeeded\n");
}
}
/* Verify that the child dropping 0x80 did not affect the parent */
printf("\tVerify that unsharing the bitmap worked\n");
expect_ok(0x80);
/* Test the capability checks. */
printf("\tDrop privileges\n");
if (setresuid(1, 1, 1) != 0) {
printf("[WARN]\tDropping privileges failed\n");
return 0;
}
printf("[RUN]\tdisable 0x80\n");
if (ioperm(0x80, 1, 0) != 0) {
printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno);
return 1;
}
printf("[OK]\tit worked\n");
printf("[RUN]\tenable 0x80 again\n");
if (ioperm(0x80, 1, 1) == 0) {
printf("[FAIL]\tit succeeded but should have failed.\n");
return 1;
}
printf("[OK]\tit failed\n");
return 0;
}
| linux-master | tools/testing/selftests/x86/ioperm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 32-bit syscall ABI conformance test.
*
* Copyright (c) 2015 Denys Vlasenko
*/
/*
* Can be built statically:
* gcc -Os -Wall -static -m32 test_syscall_vdso.c thunks_32.S
*/
#undef _GNU_SOURCE
#define _GNU_SOURCE 1
#undef __USE_GNU
#define __USE_GNU 1
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <elf.h>
#include <sys/ptrace.h>
#include <sys/wait.h>
#if !defined(__i386__)
int main(int argc, char **argv, char **envp)
{
printf("[SKIP]\tNot a 32-bit x86 userspace\n");
return 0;
}
#else
long syscall_addr;
long get_syscall(char **envp)
{
Elf32_auxv_t *auxv;
while (*envp++ != NULL)
continue;
for (auxv = (void *)envp; auxv->a_type != AT_NULL; auxv++)
if (auxv->a_type == AT_SYSINFO)
return auxv->a_un.a_val;
printf("[WARN]\tAT_SYSINFO not supplied\n");
return 0;
}
asm (
" .pushsection .text\n"
" .global int80\n"
"int80:\n"
" int $0x80\n"
" ret\n"
" .popsection\n"
);
extern char int80;
struct regs64 {
uint64_t rax, rbx, rcx, rdx;
uint64_t rsi, rdi, rbp, rsp;
uint64_t r8, r9, r10, r11;
uint64_t r12, r13, r14, r15;
};
struct regs64 regs64;
int kernel_is_64bit;
asm (
" .pushsection .text\n"
" .code64\n"
"get_regs64:\n"
" push %rax\n"
" mov $regs64, %eax\n"
" pop 0*8(%rax)\n"
" movq %rbx, 1*8(%rax)\n"
" movq %rcx, 2*8(%rax)\n"
" movq %rdx, 3*8(%rax)\n"
" movq %rsi, 4*8(%rax)\n"
" movq %rdi, 5*8(%rax)\n"
" movq %rbp, 6*8(%rax)\n"
" movq %rsp, 7*8(%rax)\n"
" movq %r8, 8*8(%rax)\n"
" movq %r9, 9*8(%rax)\n"
" movq %r10, 10*8(%rax)\n"
" movq %r11, 11*8(%rax)\n"
" movq %r12, 12*8(%rax)\n"
" movq %r13, 13*8(%rax)\n"
" movq %r14, 14*8(%rax)\n"
" movq %r15, 15*8(%rax)\n"
" ret\n"
"poison_regs64:\n"
" movq $0x7f7f7f7f, %r8\n"
" shl $32, %r8\n"
" orq $0x7f7f7f7f, %r8\n"
" movq %r8, %r9\n"
" incq %r9\n"
" movq %r9, %r10\n"
" incq %r10\n"
" movq %r10, %r11\n"
" incq %r11\n"
" movq %r11, %r12\n"
" incq %r12\n"
" movq %r12, %r13\n"
" incq %r13\n"
" movq %r13, %r14\n"
" incq %r14\n"
" movq %r14, %r15\n"
" incq %r15\n"
" ret\n"
" .code32\n"
" .popsection\n"
);
extern void get_regs64(void);
extern void poison_regs64(void);
extern unsigned long call64_from_32(void (*function)(void));
void print_regs64(void)
{
if (!kernel_is_64bit)
return;
printf("ax:%016llx bx:%016llx cx:%016llx dx:%016llx\n", regs64.rax, regs64.rbx, regs64.rcx, regs64.rdx);
printf("si:%016llx di:%016llx bp:%016llx sp:%016llx\n", regs64.rsi, regs64.rdi, regs64.rbp, regs64.rsp);
printf(" 8:%016llx 9:%016llx 10:%016llx 11:%016llx\n", regs64.r8 , regs64.r9 , regs64.r10, regs64.r11);
printf("12:%016llx 13:%016llx 14:%016llx 15:%016llx\n", regs64.r12, regs64.r13, regs64.r14, regs64.r15);
}
int check_regs64(void)
{
int err = 0;
int num = 8;
uint64_t *r64 = ®s64.r8;
uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
if (!kernel_is_64bit)
return 0;
do {
if (*r64 == expected++)
continue; /* register did not change */
if (syscall_addr != (long)&int80) {
/*
* Non-INT80 syscall entrypoints are allowed to clobber R8+ regs:
* either clear them to 0, or for R11, load EFLAGS.
*/
if (*r64 == 0)
continue;
if (num == 11) {
printf("[NOTE]\tR11 has changed:%016llx - assuming clobbered by SYSRET insn\n", *r64);
continue;
}
} else {
/*
* INT80 syscall entrypoint can be used by
* 64-bit programs too, unlike SYSCALL/SYSENTER.
* Therefore it must preserve R12+
* (they are callee-saved registers in 64-bit C ABI).
*
* Starting in Linux 4.17 (and any kernel that
* backports the change), R8..11 are preserved.
* Historically (and probably unintentionally), they
* were clobbered or zeroed.
*/
}
printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
err++;
} while (r64++, ++num < 16);
if (!err)
printf("[OK]\tR8..R15 did not leak kernel data\n");
return err;
}
int nfds;
fd_set rfds;
fd_set wfds;
fd_set efds;
struct timespec timeout;
sigset_t sigmask;
struct {
sigset_t *sp;
int sz;
} sigmask_desc;
void prep_args()
{
nfds = 42;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
FD_ZERO(&efds);
FD_SET(0, &rfds);
FD_SET(1, &wfds);
FD_SET(2, &efds);
timeout.tv_sec = 0;
timeout.tv_nsec = 123;
sigemptyset(&sigmask);
sigaddset(&sigmask, SIGINT);
sigaddset(&sigmask, SIGUSR2);
sigaddset(&sigmask, SIGRTMAX);
sigmask_desc.sp = &sigmask;
sigmask_desc.sz = 8; /* bytes */
}
static void print_flags(const char *name, unsigned long r)
{
static const char *bitarray[] = {
"\n" ,"c\n" ,/* Carry Flag */
"0 " ,"1 " ,/* Bit 1 - always on */
"" ,"p " ,/* Parity Flag */
"0 " ,"3? " ,
"" ,"a " ,/* Auxiliary carry Flag */
"0 " ,"5? " ,
"" ,"z " ,/* Zero Flag */
"" ,"s " ,/* Sign Flag */
"" ,"t " ,/* Trap Flag */
"" ,"i " ,/* Interrupt Flag */
"" ,"d " ,/* Direction Flag */
"" ,"o " ,/* Overflow Flag */
"0 " ,"1 " ,/* I/O Privilege Level (2 bits) */
"0" ,"1" ,/* I/O Privilege Level (2 bits) */
"" ,"n " ,/* Nested Task */
"0 " ,"15? ",
"" ,"r " ,/* Resume Flag */
"" ,"v " ,/* Virtual Mode */
"" ,"ac " ,/* Alignment Check/Access Control */
"" ,"vif ",/* Virtual Interrupt Flag */
"" ,"vip ",/* Virtual Interrupt Pending */
"" ,"id " ,/* CPUID detection */
NULL
};
const char **bitstr;
int bit;
printf("%s=%016lx ", name, r);
bitstr = bitarray + 42;
bit = 21;
if ((r >> 22) != 0)
printf("(extra bits are set) ");
do {
if (bitstr[(r >> bit) & 1][0])
fputs(bitstr[(r >> bit) & 1], stdout);
bitstr -= 2;
bit--;
} while (bit >= 0);
}
int run_syscall(void)
{
long flags, bad_arg;
prep_args();
if (kernel_is_64bit)
call64_from_32(poison_regs64);
/*print_regs64();*/
asm("\n"
/* Try 6-arg syscall: pselect. It should return quickly */
" push %%ebp\n"
" mov $308, %%eax\n" /* PSELECT */
" mov nfds, %%ebx\n" /* ebx arg1 */
" mov $rfds, %%ecx\n" /* ecx arg2 */
" mov $wfds, %%edx\n" /* edx arg3 */
" mov $efds, %%esi\n" /* esi arg4 */
" mov $timeout, %%edi\n" /* edi arg5 */
" mov $sigmask_desc, %%ebp\n" /* %ebp arg6 */
" push $0x200ed7\n" /* set almost all flags */
" popf\n" /* except TF, IOPL, NT, RF, VM, AC, VIF, VIP */
" call *syscall_addr\n"
/* Check that registers are not clobbered */
" pushf\n"
" pop %%eax\n"
" cld\n"
" cmp nfds, %%ebx\n" /* ebx arg1 */
" mov $1, %%ebx\n"
" jne 1f\n"
" cmp $rfds, %%ecx\n" /* ecx arg2 */
" mov $2, %%ebx\n"
" jne 1f\n"
" cmp $wfds, %%edx\n" /* edx arg3 */
" mov $3, %%ebx\n"
" jne 1f\n"
" cmp $efds, %%esi\n" /* esi arg4 */
" mov $4, %%ebx\n"
" jne 1f\n"
" cmp $timeout, %%edi\n" /* edi arg5 */
" mov $5, %%ebx\n"
" jne 1f\n"
" cmpl $sigmask_desc, %%ebp\n" /* %ebp arg6 */
" mov $6, %%ebx\n"
" jne 1f\n"
" mov $0, %%ebx\n"
"1:\n"
" pop %%ebp\n"
: "=a" (flags), "=b" (bad_arg)
:
: "cx", "dx", "si", "di"
);
if (kernel_is_64bit) {
memset(®s64, 0x77, sizeof(regs64));
call64_from_32(get_regs64);
/*print_regs64();*/
}
/*
* On paravirt kernels, flags are not preserved across syscalls.
* Thus, we do not consider it a bug if some are changed.
* We just show ones which do.
*/
if ((0x200ed7 ^ flags) != 0) {
print_flags("[WARN]\tFlags before", 0x200ed7);
print_flags("[WARN]\tFlags after", flags);
print_flags("[WARN]\tFlags change", (0x200ed7 ^ flags));
}
if (bad_arg) {
printf("[FAIL]\targ#%ld clobbered\n", bad_arg);
return 1;
}
printf("[OK]\tArguments are preserved across syscall\n");
return check_regs64();
}
int run_syscall_twice()
{
int exitcode = 0;
long sv;
if (syscall_addr) {
printf("[RUN]\tExecuting 6-argument 32-bit syscall via VDSO\n");
exitcode = run_syscall();
}
sv = syscall_addr;
syscall_addr = (long)&int80;
printf("[RUN]\tExecuting 6-argument 32-bit syscall via INT 80\n");
exitcode += run_syscall();
syscall_addr = sv;
return exitcode;
}
void ptrace_me()
{
pid_t pid;
fflush(NULL);
pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
/* child */
if (ptrace(PTRACE_TRACEME, 0L, 0L, 0L) != 0)
exit(0);
raise(SIGSTOP);
return;
}
/* parent */
printf("[RUN]\tRunning tests under ptrace\n");
while (1) {
int status;
pid = waitpid(-1, &status, __WALL);
if (WIFEXITED(status))
exit(WEXITSTATUS(status));
if (WIFSIGNALED(status))
exit(WTERMSIG(status));
if (pid <= 0 || !WIFSTOPPED(status)) /* paranoia */
exit(255);
/*
* Note: we do not inject sig = WSTOPSIG(status).
* We probably should, but careful: do not inject SIGTRAP
* generated by syscall entry/exit stops.
* That kills the child.
*/
ptrace(PTRACE_SYSCALL, pid, 0L, 0L /*sig*/);
}
}
int main(int argc, char **argv, char **envp)
{
int exitcode = 0;
int cs;
asm("\n"
" movl %%cs, %%eax\n"
: "=a" (cs)
);
kernel_is_64bit = (cs == 0x23);
if (!kernel_is_64bit)
printf("[NOTE]\tNot a 64-bit kernel, won't test R8..R15 leaks\n");
/* This only works for non-static builds:
* syscall_addr = dlsym(dlopen("linux-gate.so.1", RTLD_NOW), "__kernel_vsyscall");
*/
syscall_addr = get_syscall(envp);
exitcode += run_syscall_twice();
ptrace_me();
exitcode += run_syscall_twice();
return exitcode;
}
#endif
| linux-master | tools/testing/selftests/x86/test_syscall_vdso.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/user.h>
#include <unistd.h>
#include <errno.h>
#include <stddef.h>
#include <stdio.h>
#include <err.h>
#include <string.h>
#include <asm/ptrace-abi.h>
#include <sys/auxv.h>
/* Bitness-agnostic defines for user_regs_struct fields. */
#ifdef __x86_64__
# define user_syscall_nr orig_rax
# define user_arg0 rdi
# define user_arg1 rsi
# define user_arg2 rdx
# define user_arg3 r10
# define user_arg4 r8
# define user_arg5 r9
# define user_ip rip
# define user_ax rax
#else
# define user_syscall_nr orig_eax
# define user_arg0 ebx
# define user_arg1 ecx
# define user_arg2 edx
# define user_arg3 esi
# define user_arg4 edi
# define user_arg5 ebp
# define user_ip eip
# define user_ax eax
#endif
static int nerrs = 0;
struct syscall_args32 {
uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
};
#ifdef __i386__
extern void sys32_helper(struct syscall_args32 *, void *);
extern void int80_and_ret(void);
#endif
/*
* Helper to invoke int80 with controlled regs and capture the final regs.
*/
static void do_full_int80(struct syscall_args32 *args)
{
#ifdef __x86_64__
register unsigned long bp asm("bp") = args->arg5;
asm volatile ("int $0x80"
: "+a" (args->nr),
"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
"+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
: : "r8", "r9", "r10", "r11");
args->arg5 = bp;
#else
sys32_helper(args, int80_and_ret);
#endif
}
#ifdef __i386__
static void (*vsyscall32)(void);
/*
* Nasty helper to invoke AT_SYSINFO (i.e. __kernel_vsyscall) with
* controlled regs and capture the final regs. This is so nasty that it
* crashes my copy of gdb :)
*/
static void do_full_vsyscall32(struct syscall_args32 *args)
{
sys32_helper(args, vsyscall32);
}
#endif
static siginfo_t wait_trap(pid_t chld)
{
siginfo_t si;
if (waitid(P_PID, chld, &si, WEXITED|WSTOPPED) != 0)
err(1, "waitid");
if (si.si_pid != chld)
errx(1, "got unexpected pid in event\n");
if (si.si_code != CLD_TRAPPED)
errx(1, "got unexpected event type %d\n", si.si_code);
return si;
}
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void setsigign(int sig, int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = (void *)SIG_IGN;
sa.sa_flags = flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
#ifdef __x86_64__
# define REG_BP REG_RBP
#else
# define REG_BP REG_EBP
#endif
static void empty_handler(int sig, siginfo_t *si, void *ctx_void)
{
}
static void test_sys32_regs(void (*do_syscall)(struct syscall_args32 *))
{
struct syscall_args32 args = {
.nr = 224, /* gettid */
.arg0 = 10, .arg1 = 11, .arg2 = 12,
.arg3 = 13, .arg4 = 14, .arg5 = 15,
};
do_syscall(&args);
if (args.nr != getpid() ||
args.arg0 != 10 || args.arg1 != 11 || args.arg2 != 12 ||
args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
printf("[FAIL]\tgetpid() failed to preserve regs\n");
nerrs++;
} else {
printf("[OK]\tgetpid() preserves regs\n");
}
sethandler(SIGUSR1, empty_handler, 0);
args.nr = 37; /* kill */
args.arg0 = getpid();
args.arg1 = SIGUSR1;
do_syscall(&args);
if (args.nr != 0 ||
args.arg0 != getpid() || args.arg1 != SIGUSR1 || args.arg2 != 12 ||
args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
printf("[FAIL]\tkill(getpid(), SIGUSR1) failed to preserve regs\n");
nerrs++;
} else {
printf("[OK]\tkill(getpid(), SIGUSR1) preserves regs\n");
}
clearhandler(SIGUSR1);
}
static void test_ptrace_syscall_restart(void)
{
printf("[RUN]\tptrace-induced syscall restart\n");
pid_t chld = fork();
if (chld < 0)
err(1, "fork");
if (chld == 0) {
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
pid_t pid = getpid(), tid = syscall(SYS_gettid);
printf("\tChild will make one syscall\n");
syscall(SYS_tgkill, pid, tid, SIGSTOP);
syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
_exit(0);
}
int status;
/* Wait for SIGSTOP. */
if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
err(1, "waitpid");
struct user_regs_struct regs;
printf("[RUN]\tSYSEMU\n");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_gettid ||
regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
regs.user_arg4 != 14 || regs.user_arg5 != 15) {
printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tInitial nr and args are correct\n");
}
printf("[RUN]\tRestart the syscall (ip = 0x%lx)\n",
(unsigned long)regs.user_ip);
/*
* This does exactly what it appears to do if syscall is int80 or
* SYSCALL64. For SYSCALL32 or SYSENTER, though, this is highly
* magical. It needs to work so that ptrace and syscall restart
* work as expected.
*/
regs.user_ax = regs.user_syscall_nr;
regs.user_ip -= 2;
if (ptrace(PTRACE_SETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_SETREGS");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_gettid ||
regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
regs.user_arg4 != 14 || regs.user_arg5 != 15) {
printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tRestarted nr and args are correct\n");
}
printf("[RUN]\tChange nr and args and restart the syscall (ip = 0x%lx)\n",
(unsigned long)regs.user_ip);
regs.user_ax = SYS_getpid;
regs.user_arg0 = 20;
regs.user_arg1 = 21;
regs.user_arg2 = 22;
regs.user_arg3 = 23;
regs.user_arg4 = 24;
regs.user_arg5 = 25;
regs.user_ip -= 2;
if (ptrace(PTRACE_SETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_SETREGS");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_getpid ||
regs.user_arg0 != 20 || regs.user_arg1 != 21 || regs.user_arg2 != 22 ||
regs.user_arg3 != 23 || regs.user_arg4 != 24 || regs.user_arg5 != 25) {
printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tReplacement nr and args are correct\n");
}
if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
err(1, "PTRACE_CONT");
if (waitpid(chld, &status, 0) != chld)
err(1, "waitpid");
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild exited cleanly\n");
}
}
static void test_restart_under_ptrace(void)
{
printf("[RUN]\tkernel syscall restart under ptrace\n");
pid_t chld = fork();
if (chld < 0)
err(1, "fork");
if (chld == 0) {
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
pid_t pid = getpid(), tid = syscall(SYS_gettid);
printf("\tChild will take a nap until signaled\n");
setsigign(SIGUSR1, SA_RESTART);
syscall(SYS_tgkill, pid, tid, SIGSTOP);
syscall(SYS_pause, 0, 0, 0, 0, 0, 0);
_exit(0);
}
int status;
/* Wait for SIGSTOP. */
if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
err(1, "waitpid");
struct user_regs_struct regs;
printf("[RUN]\tSYSCALL\n");
if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
err(1, "PTRACE_SYSCALL");
wait_trap(chld);
/* We should be stopped at pause(2) entry. */
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_pause ||
regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
regs.user_arg4 != 0 || regs.user_arg5 != 0) {
printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tInitial nr and args are correct\n");
}
/* Interrupt it. */
kill(chld, SIGUSR1);
/* Advance. We should be stopped at exit. */
printf("[RUN]\tSYSCALL\n");
if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
err(1, "PTRACE_SYSCALL");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_pause ||
regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
regs.user_arg4 != 0 || regs.user_arg5 != 0) {
printf("[FAIL]\tArgs after SIGUSR1 are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tArgs after SIGUSR1 are correct (ax = %ld)\n",
(long)regs.user_ax);
}
/* Poke the regs back in. This must not break anything. */
if (ptrace(PTRACE_SETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_SETREGS");
/* Catch the (ignored) SIGUSR1. */
if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
err(1, "PTRACE_CONT");
if (waitpid(chld, &status, 0) != chld)
err(1, "waitpid");
if (!WIFSTOPPED(status)) {
printf("[FAIL]\tChild was stopped for SIGUSR1 (status = 0x%x)\n", status);
nerrs++;
} else {
printf("[OK]\tChild got SIGUSR1\n");
}
/* The next event should be pause(2) again. */
printf("[RUN]\tStep again\n");
if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
err(1, "PTRACE_SYSCALL");
wait_trap(chld);
/* We should be stopped at pause(2) entry. */
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_pause ||
regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
regs.user_arg4 != 0 || regs.user_arg5 != 0) {
printf("[FAIL]\tpause did not restart (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tpause(2) restarted correctly\n");
}
/* Kill it. */
kill(chld, SIGKILL);
if (waitpid(chld, &status, 0) != chld)
err(1, "waitpid");
}
int main()
{
printf("[RUN]\tCheck int80 return regs\n");
test_sys32_regs(do_full_int80);
#if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
vsyscall32 = (void *)getauxval(AT_SYSINFO);
if (vsyscall32) {
printf("[RUN]\tCheck AT_SYSINFO return regs\n");
test_sys32_regs(do_full_vsyscall32);
} else {
printf("[SKIP]\tAT_SYSINFO is not available\n");
}
#endif
test_ptrace_syscall_restart();
test_restart_under_ptrace();
return 0;
}
| linux-master | tools/testing/selftests/x86/ptrace_syscall.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
#include <signal.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <limits.h>
#include <sys/mman.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <setjmp.h>
/* sigaltstack()-enforced minimum stack */
#define ENFORCED_MINSIGSTKSZ 2048
#ifndef AT_MINSIGSTKSZ
# define AT_MINSIGSTKSZ 51
#endif
static int nerrs;
static bool sigalrm_expected;
static unsigned long at_minstack_size;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static int setup_altstack(void *start, unsigned long size)
{
stack_t ss;
memset(&ss, 0, sizeof(ss));
ss.ss_size = size;
ss.ss_sp = start;
return sigaltstack(&ss, NULL);
}
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
{
if (sigalrm_expected) {
printf("[FAIL]\tWrong signal delivered: SIGSEGV (expected SIGALRM).");
nerrs++;
} else {
printf("[OK]\tSIGSEGV signal delivered.\n");
}
siglongjmp(jmpbuf, 1);
}
static void sigalrm(int sig, siginfo_t *info, void *ctx_void)
{
if (!sigalrm_expected) {
printf("[FAIL]\tWrong signal delivered: SIGALRM (expected SIGSEGV).");
nerrs++;
} else {
printf("[OK]\tSIGALRM signal delivered.\n");
}
}
static void test_sigaltstack(void *altstack, unsigned long size)
{
if (setup_altstack(altstack, size))
err(1, "sigaltstack()");
sigalrm_expected = (size > at_minstack_size) ? true : false;
sethandler(SIGSEGV, sigsegv, 0);
sethandler(SIGALRM, sigalrm, SA_ONSTACK);
if (!sigsetjmp(jmpbuf, 1)) {
printf("[RUN]\tTest an alternate signal stack of %ssufficient size.\n",
sigalrm_expected ? "" : "in");
printf("\tRaise SIGALRM. %s is expected to be delivered.\n",
sigalrm_expected ? "It" : "SIGSEGV");
raise(SIGALRM);
}
clearhandler(SIGALRM);
clearhandler(SIGSEGV);
}
int main(void)
{
void *altstack;
at_minstack_size = getauxval(AT_MINSIGSTKSZ);
altstack = mmap(NULL, at_minstack_size + SIGSTKSZ, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (altstack == MAP_FAILED)
err(1, "mmap()");
if ((ENFORCED_MINSIGSTKSZ + 1) < at_minstack_size)
test_sigaltstack(altstack, ENFORCED_MINSIGSTKSZ + 1);
test_sigaltstack(altstack, at_minstack_size + SIGSTKSZ);
return nerrs == 0 ? 0 : 1;
}
| linux-master | tools/testing/selftests/x86/sigaltstack.c |
// SPDX-License-Identifier: GPL-2.0
#undef _GNU_SOURCE
#define _GNU_SOURCE 1
#undef __USE_GNU
#define __USE_GNU 1
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <fenv.h>
unsigned long long res64 = -1;
unsigned int res32 = -1;
unsigned short res16 = -1;
int test(void)
{
int ex;
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fld1""\n"
" fisttp res16""\n"
" fld1""\n"
" fisttpl res32""\n"
" fld1""\n"
" fisttpll res64""\n"
: : : "memory"
);
if (res16 != 1 || res32 != 1 || res64 != 1) {
printf("[BAD]\tfisttp 1\n");
return 1;
}
ex = fetestexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
if (ex != 0) {
printf("[BAD]\tfisttp 1: wrong exception state\n");
return 1;
}
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldpi""\n"
" fisttp res16""\n"
" fldpi""\n"
" fisttpl res32""\n"
" fldpi""\n"
" fisttpll res64""\n"
: : : "memory"
);
if (res16 != 3 || res32 != 3 || res64 != 3) {
printf("[BAD]\tfisttp pi\n");
return 1;
}
ex = fetestexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
if (ex != FE_INEXACT) {
printf("[BAD]\tfisttp pi: wrong exception state\n");
return 1;
}
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldpi""\n"
" fchs""\n"
" fisttp res16""\n"
" fldpi""\n"
" fchs""\n"
" fisttpl res32""\n"
" fldpi""\n"
" fchs""\n"
" fisttpll res64""\n"
: : : "memory"
);
if (res16 != 0xfffd || res32 != 0xfffffffd || res64 != 0xfffffffffffffffdULL) {
printf("[BAD]\tfisttp -pi\n");
return 1;
}
ex = fetestexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
if (ex != FE_INEXACT) {
printf("[BAD]\tfisttp -pi: wrong exception state\n");
return 1;
}
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldln2""\n"
" fisttp res16""\n"
" fldln2""\n"
" fisttpl res32""\n"
" fldln2""\n"
" fisttpll res64""\n"
: : : "memory"
);
/* Test truncation to zero (round-to-nearest would give 1 here) */
if (res16 != 0 || res32 != 0 || res64 != 0) {
printf("[BAD]\tfisttp ln2\n");
return 1;
}
ex = fetestexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
if (ex != FE_INEXACT) {
printf("[BAD]\tfisttp ln2: wrong exception state\n");
return 1;
}
return 0;
}
void sighandler(int sig)
{
printf("[FAIL]\tGot signal %d, exiting\n", sig);
exit(1);
}
int main(int argc, char **argv, char **envp)
{
int err = 0;
/* SIGILL triggers on 32-bit kernels w/o fisttp emulation
* when run with "no387 nofxsr". Other signals are caught
* just in case.
*/
signal(SIGILL, sighandler);
signal(SIGFPE, sighandler);
signal(SIGSEGV, sighandler);
printf("[RUN]\tTesting fisttp instructions\n");
err |= test();
if (!err)
printf("[OK]\tfisttp\n");
else
printf("[FAIL]\tfisttp errors: %d\n", err);
return err;
}
| linux-master | tools/testing/selftests/x86/test_FISTTP.c |
// SPDX-License-Identifier: GPL-2.0
#undef _GNU_SOURCE
#define _GNU_SOURCE 1
#undef __USE_GNU
#define __USE_GNU 1
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/wait.h>
#define TEST(insn) \
long double __attribute__((noinline)) insn(long flags) \
{ \
long double out; \
asm ("\n" \
" push %1""\n" \
" popf""\n" \
" fldpi""\n" \
" fld1""\n" \
" " #insn " %%st(1), %%st" "\n" \
" ffree %%st(1)" "\n" \
: "=t" (out) \
: "r" (flags) \
); \
return out; \
}
TEST(fcmovb)
TEST(fcmove)
TEST(fcmovbe)
TEST(fcmovu)
TEST(fcmovnb)
TEST(fcmovne)
TEST(fcmovnbe)
TEST(fcmovnu)
enum {
CF = 1 << 0,
PF = 1 << 2,
ZF = 1 << 6,
};
void sighandler(int sig)
{
printf("[FAIL]\tGot signal %d, exiting\n", sig);
exit(1);
}
int main(int argc, char **argv, char **envp)
{
int err = 0;
/* SIGILL triggers on 32-bit kernels w/o fcomi emulation
* when run with "no387 nofxsr". Other signals are caught
* just in case.
*/
signal(SIGILL, sighandler);
signal(SIGFPE, sighandler);
signal(SIGSEGV, sighandler);
printf("[RUN]\tTesting fcmovCC instructions\n");
/* If fcmovCC() returns 1.0, the move wasn't done */
err |= !(fcmovb(0) == 1.0); err |= !(fcmovnb(0) != 1.0);
err |= !(fcmove(0) == 1.0); err |= !(fcmovne(0) != 1.0);
err |= !(fcmovbe(0) == 1.0); err |= !(fcmovnbe(0) != 1.0);
err |= !(fcmovu(0) == 1.0); err |= !(fcmovnu(0) != 1.0);
err |= !(fcmovb(CF) != 1.0); err |= !(fcmovnb(CF) == 1.0);
err |= !(fcmove(CF) == 1.0); err |= !(fcmovne(CF) != 1.0);
err |= !(fcmovbe(CF) != 1.0); err |= !(fcmovnbe(CF) == 1.0);
err |= !(fcmovu(CF) == 1.0); err |= !(fcmovnu(CF) != 1.0);
err |= !(fcmovb(ZF) == 1.0); err |= !(fcmovnb(ZF) != 1.0);
err |= !(fcmove(ZF) != 1.0); err |= !(fcmovne(ZF) == 1.0);
err |= !(fcmovbe(ZF) != 1.0); err |= !(fcmovnbe(ZF) == 1.0);
err |= !(fcmovu(ZF) == 1.0); err |= !(fcmovnu(ZF) != 1.0);
err |= !(fcmovb(PF) == 1.0); err |= !(fcmovnb(PF) != 1.0);
err |= !(fcmove(PF) == 1.0); err |= !(fcmovne(PF) != 1.0);
err |= !(fcmovbe(PF) == 1.0); err |= !(fcmovnbe(PF) != 1.0);
err |= !(fcmovu(PF) != 1.0); err |= !(fcmovnu(PF) == 1.0);
if (!err)
printf("[OK]\tfcmovCC\n");
else
printf("[FAIL]\tfcmovCC errors: %d\n", err);
return err;
}
| linux-master | tools/testing/selftests/x86/test_FCMOV.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Trivial program to check that we have a valid 64-bit build environment.
* Copyright (c) 2015 Andy Lutomirski
*/
#ifndef __x86_64__
# error wrong architecture
#endif
#include <stdio.h>
int main()
{
printf("\n");
return 0;
}
| linux-master | tools/testing/selftests/x86/trivial_64bit_program.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* syscall_nt.c - checks syscalls with NT set
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* Some obscure user-space code requires the ability to make system calls
* with FLAGS.NT set. Make sure it works.
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <err.h>
#include <sys/syscall.h>
#include "helpers.h"
static unsigned int nerrs;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
{
}
static void do_it(unsigned long extraflags)
{
unsigned long flags;
set_eflags(get_eflags() | extraflags);
syscall(SYS_getpid);
flags = get_eflags();
set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
if ((flags & extraflags) == extraflags) {
printf("[OK]\tThe syscall worked and flags are still set\n");
} else {
printf("[FAIL]\tThe syscall worked but flags were cleared (flags = 0x%lx but expected 0x%lx set)\n",
flags, extraflags);
nerrs++;
}
}
int main(void)
{
printf("[RUN]\tSet NT and issue a syscall\n");
do_it(X86_EFLAGS_NT);
printf("[RUN]\tSet AC and issue a syscall\n");
do_it(X86_EFLAGS_AC);
printf("[RUN]\tSet NT|AC and issue a syscall\n");
do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
/*
* Now try it again with TF set -- TF forces returns via IRET in all
* cases except non-ptregs-using 64-bit full fast path syscalls.
*/
sethandler(SIGTRAP, sigtrap, 0);
printf("[RUN]\tSet TF and issue a syscall\n");
do_it(X86_EFLAGS_TF);
printf("[RUN]\tSet NT|TF and issue a syscall\n");
do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
printf("[RUN]\tSet AC|TF and issue a syscall\n");
do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
/*
* Now try DF. This is evil and it's plausible that we will crash
* glibc, but glibc would have to do something rather surprising
* for this to happen.
*/
printf("[RUN]\tSet DF and issue a syscall\n");
do_it(X86_EFLAGS_DF);
printf("[RUN]\tSet TF|DF and issue a syscall\n");
do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
return nerrs == 0 ? 0 : 1;
}
| linux-master | tools/testing/selftests/x86/syscall_nt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* single_step_syscall.c - single-steps various x86 syscalls
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This is a very simple series of tests that makes system calls with
* the TF flag set. This exercises some nasty kernel code in the
* SYSENTER case: SYSENTER does not clear TF, so SYSENTER with TF set
* immediately issues #DB from CPL 0. This requires special handling in
* the kernel.
*/
#define _GNU_SOURCE
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <asm/ldt.h>
#include <err.h>
#include <setjmp.h>
#include <stddef.h>
#include <stdbool.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include "helpers.h"
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static volatile sig_atomic_t sig_traps, sig_eflags;
sigjmp_buf jmpbuf;
#ifdef __x86_64__
# define REG_IP REG_RIP
# define WIDTH "q"
# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
#else
# define REG_IP REG_EIP
# define WIDTH "l"
# define INT80_CLOBBERS
#endif
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (get_eflags() & X86_EFLAGS_TF) {
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
printf("[WARN]\tSIGTRAP handler had TF set\n");
_exit(1);
}
sig_traps++;
if (sig_traps == 10000 || sig_traps == 10001) {
printf("[WARN]\tHit %d SIGTRAPs with si_addr 0x%lx, ip 0x%lx\n",
(int)sig_traps,
(unsigned long)info->si_addr,
(unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
}
}
static char const * const signames[] = {
[SIGSEGV] = "SIGSEGV",
[SIGBUS] = "SIBGUS",
[SIGTRAP] = "SIGTRAP",
[SIGILL] = "SIGILL",
};
static void print_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = ctx_void;
printf("\tGot %s with RIP=%lx, TF=%ld\n", signames[sig],
(unsigned long)ctx->uc_mcontext.gregs[REG_IP],
(unsigned long)ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_TF);
sig_eflags = (unsigned long)ctx->uc_mcontext.gregs[REG_EFL];
siglongjmp(jmpbuf, 1);
}
static void check_result(void)
{
unsigned long new_eflags = get_eflags();
set_eflags(new_eflags & ~X86_EFLAGS_TF);
if (!sig_traps) {
printf("[FAIL]\tNo SIGTRAP\n");
exit(1);
}
if (!(new_eflags & X86_EFLAGS_TF)) {
printf("[FAIL]\tTF was cleared\n");
exit(1);
}
printf("[OK]\tSurvived with TF set and %d traps\n", (int)sig_traps);
sig_traps = 0;
}
static void fast_syscall_no_tf(void)
{
sig_traps = 0;
printf("[RUN]\tFast syscall with TF cleared\n");
fflush(stdout); /* Force a syscall */
if (get_eflags() & X86_EFLAGS_TF) {
printf("[FAIL]\tTF is now set\n");
exit(1);
}
if (sig_traps) {
printf("[FAIL]\tGot SIGTRAP\n");
exit(1);
}
printf("[OK]\tNothing unexpected happened\n");
}
int main()
{
#ifdef CAN_BUILD_32
int tmp;
#endif
sethandler(SIGTRAP, sigtrap, 0);
printf("[RUN]\tSet TF and check nop\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile ("nop");
check_result();
#ifdef __x86_64__
printf("[RUN]\tSet TF and check syscall-less opportunistic sysret\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
extern unsigned char post_nop[];
asm volatile ("pushf" WIDTH "\n\t"
"pop" WIDTH " %%r11\n\t"
"nop\n\t"
"post_nop:"
: : "c" (post_nop) : "r11");
check_result();
#endif
#ifdef CAN_BUILD_32
printf("[RUN]\tSet TF and check int80\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
: INT80_CLOBBERS);
check_result();
#endif
/*
* This test is particularly interesting if fast syscalls use
* SYSENTER: it triggers a nasty design flaw in SYSENTER.
* Specifically, SYSENTER does not clear TF, so either SYSENTER
* or the next instruction traps at CPL0. (Of course, Intel
* mostly forgot to document exactly what happens here.) So we
* get a CPL0 fault with usergs (on 64-bit kernels) and possibly
* no stack. The only sane way the kernel can possibly handle
* it is to clear TF on return from the #DB handler, but this
* happens way too early to set TF in the saved pt_regs, so the
* kernel has to do something clever to avoid losing track of
* the TF bit.
*
* Needless to say, we've had bugs in this area.
*/
syscall(SYS_getpid); /* Force symbol binding without TF set. */
printf("[RUN]\tSet TF and check a fast syscall\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
syscall(SYS_getpid);
check_result();
/* Now make sure that another fast syscall doesn't set TF again. */
fast_syscall_no_tf();
/*
* And do a forced SYSENTER to make sure that this works even if
* fast syscalls don't use SYSENTER.
*
* Invoking SYSENTER directly breaks all the rules. Just handle
* the SIGSEGV.
*/
if (sigsetjmp(jmpbuf, 1) == 0) {
unsigned long nr = SYS_getpid;
printf("[RUN]\tSet TF and check SYSENTER\n");
stack_t stack = {
.ss_sp = malloc(sizeof(char) * SIGSTKSZ),
.ss_size = SIGSTKSZ,
};
if (sigaltstack(&stack, NULL) != 0)
err(1, "sigaltstack");
sethandler(SIGSEGV, print_and_longjmp,
SA_RESETHAND | SA_ONSTACK);
sethandler(SIGILL, print_and_longjmp, SA_RESETHAND);
set_eflags(get_eflags() | X86_EFLAGS_TF);
free(stack.ss_sp);
/* Clear EBP first to make sure we segfault cleanly. */
asm volatile ("xorl %%ebp, %%ebp; SYSENTER" : "+a" (nr) :: "flags", "rcx"
#ifdef __x86_64__
, "r11"
#endif
);
/* We're unreachable here. SYSENTER forgets RIP. */
}
clearhandler(SIGSEGV);
clearhandler(SIGILL);
if (!(sig_eflags & X86_EFLAGS_TF)) {
printf("[FAIL]\tTF was cleared\n");
exit(1);
}
/* Now make sure that another fast syscall doesn't set TF again. */
fast_syscall_no_tf();
return 0;
}
| linux-master | tools/testing/selftests/x86/single_step_syscall.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS
*
* This does MOV SS from a watchpointed address followed by various
* types of kernel entries. A MOV SS that hits a watchpoint will queue
* up a #DB trap but will not actually deliver that trap. The trap
* will be delivered after the next instruction instead. The CPU's logic
* seems to be:
*
* - Any fault: drop the pending #DB trap.
* - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then
* deliver #DB.
* - ICEBP: enter the kernel but do not deliver the watchpoint trap
* - breakpoint: only one #DB is delivered (phew!)
*
* There are plenty of ways for a kernel to handle this incorrectly. This
* test tries to exercise all the cases.
*
* This should mostly cover CVE-2018-1087 and CVE-2018-8897.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/user.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <errno.h>
#include <stddef.h>
#include <stdio.h>
#include <err.h>
#include <string.h>
#include <setjmp.h>
#include <sys/prctl.h>
#define X86_EFLAGS_RF (1UL << 16)
#if __x86_64__
# define REG_IP REG_RIP
#else
# define REG_IP REG_EIP
#endif
unsigned short ss;
extern unsigned char breakpoint_insn[];
sigjmp_buf jmpbuf;
static void enable_watchpoint(void)
{
pid_t parent = getpid();
int status;
pid_t child = fork();
if (child < 0)
err(1, "fork");
if (child) {
if (waitpid(child, &status, 0) != child)
err(1, "waitpid for child");
} else {
unsigned long dr0, dr1, dr7;
dr0 = (unsigned long)&ss;
dr1 = (unsigned long)breakpoint_insn;
dr7 = ((1UL << 1) | /* G0 */
(3UL << 16) | /* RW0 = read or write */
(1UL << 18) | /* LEN0 = 2 bytes */
(1UL << 3)); /* G1, RW1 = insn */
if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0)
err(1, "PTRACE_ATTACH");
if (waitpid(parent, &status, 0) != parent)
err(1, "waitpid for child");
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0)
err(1, "PTRACE_POKEUSER DR0");
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0)
err(1, "PTRACE_POKEUSER DR1");
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0)
err(1, "PTRACE_POKEUSER DR7");
printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7);
if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0)
err(1, "PTRACE_DETACH");
exit(0);
}
}
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static char const * const signames[] = {
[SIGSEGV] = "SIGSEGV",
[SIGBUS] = "SIBGUS",
[SIGTRAP] = "SIGTRAP",
[SIGILL] = "SIGILL",
};
static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = ctx_void;
printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n",
(unsigned long)ctx->uc_mcontext.gregs[REG_IP],
!!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF));
}
static void handle_and_return(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = ctx_void;
printf("\tGot %s with RIP=%lx\n", signames[sig],
(unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
}
static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = ctx_void;
printf("\tGot %s with RIP=%lx\n", signames[sig],
(unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
siglongjmp(jmpbuf, 1);
}
int main()
{
unsigned long nr;
asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss));
printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss);
if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0)
printf("\tPR_SET_PTRACER_ANY succeeded\n");
printf("\tSet up a watchpoint\n");
sethandler(SIGTRAP, sigtrap, 0);
enable_watchpoint();
printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n");
asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss));
printf("[RUN]\tMOV SS; INT3\n");
asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss));
printf("[RUN]\tMOV SS; INT 3\n");
asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss));
printf("[RUN]\tMOV SS; CS CS INT3\n");
asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss));
printf("[RUN]\tMOV SS; CSx14 INT3\n");
asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss));
printf("[RUN]\tMOV SS; INT 4\n");
sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss));
#ifdef __i386__
printf("[RUN]\tMOV SS; INTO\n");
sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
nr = -1;
asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into"
: [tmp] "+r" (nr) : [ss] "m" (ss));
#endif
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; ICEBP\n");
/* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */
sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss));
}
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; CLI\n");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss));
}
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; #PF\n");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]"
: [tmp] "=r" (nr) : [ss] "m" (ss));
}
/*
* INT $1: if #DB has DPL=3 and there isn't special handling,
* then the kernel will die.
*/
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; INT 1\n");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss));
}
#ifdef __x86_64__
/*
* In principle, we should test 32-bit SYSCALL as well, but
* the calling convention is so unpredictable that it's
* not obviously worth the effort.
*/
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; SYSCALL\n");
sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
nr = SYS_getpid;
/*
* Toggle the high bit of RSP to make it noncanonical to
* strengthen this test on non-SMAP systems.
*/
asm volatile ("btc $63, %%rsp\n\t"
"mov %[ss], %%ss; syscall\n\t"
"btc $63, %%rsp"
: "+a" (nr) : [ss] "m" (ss)
: "rcx"
#ifdef __x86_64__
, "r11"
#endif
);
}
#endif
printf("[RUN]\tMOV SS; breakpointed NOP\n");
asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss));
/*
* Invoking SYSENTER directly breaks all the rules. Just handle
* the SIGSEGV.
*/
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; SYSENTER\n");
stack_t stack = {
.ss_sp = malloc(sizeof(char) * SIGSTKSZ),
.ss_size = SIGSTKSZ,
};
if (sigaltstack(&stack, NULL) != 0)
err(1, "sigaltstack");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
nr = SYS_getpid;
free(stack.ss_sp);
/* Clear EBP first to make sure we segfault cleanly. */
asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr)
: [ss] "m" (ss) : "flags", "rcx"
#ifdef __x86_64__
, "r11"
#endif
);
/* We're unreachable here. SYSENTER forgets RIP. */
}
if (sigsetjmp(jmpbuf, 1) == 0) {
printf("[RUN]\tMOV SS; INT $0x80\n");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
nr = 20; /* compat getpid */
asm volatile ("mov %[ss], %%ss; int $0x80"
: "+a" (nr) : [ss] "m" (ss)
: "flags"
#ifdef __x86_64__
, "r8", "r9", "r10", "r11"
#endif
);
}
printf("[OK]\tI aten't dead\n");
return 0;
}
| linux-master | tools/testing/selftests/x86/mov_ss_trap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sigreturn.c - tests that x86 avoids Intel SYSRET pitfalls
* Copyright (c) 2014-2016 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <sys/syscall.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <setjmp.h>
#include <sys/user.h>
#include <sys/mman.h>
#include <assert.h>
asm (
".pushsection \".text\", \"ax\"\n\t"
".balign 4096\n\t"
"test_page: .globl test_page\n\t"
".fill 4094,1,0xcc\n\t"
"test_syscall_insn:\n\t"
"syscall\n\t"
".ifne . - test_page - 4096\n\t"
".error \"test page is not one page long\"\n\t"
".endif\n\t"
".popsection"
);
extern const char test_page[];
static void const *current_test_page_addr = test_page;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void clearhandler(int sig)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
/* State used by our signal handlers. */
static gregset_t initial_regs;
static volatile unsigned long rip;
static void sigsegv_for_sigreturn_test(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (rip != ctx->uc_mcontext.gregs[REG_RIP]) {
printf("[FAIL]\tRequested RIP=0x%lx but got RIP=0x%lx\n",
rip, (unsigned long)ctx->uc_mcontext.gregs[REG_RIP]);
fflush(stdout);
_exit(1);
}
memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
printf("[OK]\tGot SIGSEGV at RIP=0x%lx\n", rip);
}
static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
/* Set IP and CX to match so that SYSRET can happen. */
ctx->uc_mcontext.gregs[REG_RIP] = rip;
ctx->uc_mcontext.gregs[REG_RCX] = rip;
/* R11 and EFLAGS should already match. */
assert(ctx->uc_mcontext.gregs[REG_EFL] ==
ctx->uc_mcontext.gregs[REG_R11]);
sethandler(SIGSEGV, sigsegv_for_sigreturn_test, SA_RESETHAND);
return;
}
static void test_sigreturn_to(unsigned long ip)
{
rip = ip;
printf("[RUN]\tsigreturn to 0x%lx\n", ip);
raise(SIGUSR1);
}
static jmp_buf jmpbuf;
static void sigsegv_for_fallthrough(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
if (rip != ctx->uc_mcontext.gregs[REG_RIP]) {
printf("[FAIL]\tExpected SIGSEGV at 0x%lx but got RIP=0x%lx\n",
rip, (unsigned long)ctx->uc_mcontext.gregs[REG_RIP]);
fflush(stdout);
_exit(1);
}
siglongjmp(jmpbuf, 1);
}
static void test_syscall_fallthrough_to(unsigned long ip)
{
void *new_address = (void *)(ip - 4096);
void *ret;
printf("[RUN]\tTrying a SYSCALL that falls through to 0x%lx\n", ip);
ret = mremap((void *)current_test_page_addr, 4096, 4096,
MREMAP_MAYMOVE | MREMAP_FIXED, new_address);
if (ret == MAP_FAILED) {
if (ip <= (1UL << 47) - PAGE_SIZE) {
err(1, "mremap to %p", new_address);
} else {
printf("[OK]\tmremap to %p failed\n", new_address);
return;
}
}
if (ret != new_address)
errx(1, "mremap malfunctioned: asked for %p but got %p\n",
new_address, ret);
current_test_page_addr = new_address;
rip = ip;
if (sigsetjmp(jmpbuf, 1) == 0) {
asm volatile ("call *%[syscall_insn]" :: "a" (SYS_getpid),
[syscall_insn] "rm" (ip - 2));
errx(1, "[FAIL]\tSyscall trampoline returned");
}
printf("[OK]\tWe survived\n");
}
int main()
{
/*
* When the kernel returns from a slow-path syscall, it will
* detect whether SYSRET is appropriate. If it incorrectly
* thinks that SYSRET is appropriate when RIP is noncanonical,
* it'll crash on Intel CPUs.
*/
sethandler(SIGUSR1, sigusr1, 0);
for (int i = 47; i < 64; i++)
test_sigreturn_to(1UL<<i);
clearhandler(SIGUSR1);
sethandler(SIGSEGV, sigsegv_for_fallthrough, 0);
/* One extra test to check that we didn't screw up the mremap logic. */
test_syscall_fallthrough_to((1UL << 47) - 2*PAGE_SIZE);
/* These are the interesting cases. */
for (int i = 47; i < 64; i++) {
test_syscall_fallthrough_to((1UL<<i) - PAGE_SIZE);
test_syscall_fallthrough_to(1UL<<i);
}
return 0;
}
| linux-master | tools/testing/selftests/x86/sysret_rip.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program test's basic kernel shadow stack support. It enables shadow
* stack manual via the arch_prctl(), instead of relying on glibc. It's
* Makefile doesn't compile with shadow stack support, so it doesn't rely on
* any particular glibc. As a result it can't do any operations that require
* special glibc shadow stack support (longjmp(), swapcontext(), etc). Just
* stick to the basics and hope the compiler doesn't do anything strange.
*/
#define _GNU_SOURCE
#include <sys/syscall.h>
#include <asm/mman.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdbool.h>
#include <x86intrin.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include <stdint.h>
#include <signal.h>
#include <pthread.h>
#include <sys/ioctl.h>
#include <linux/userfaultfd.h>
#include <setjmp.h>
#include <sys/ptrace.h>
#include <sys/signal.h>
#include <linux/elf.h>
/*
* Define the ABI defines if needed, so people can run the tests
* without building the headers.
*/
#ifndef __NR_map_shadow_stack
#define __NR_map_shadow_stack 453
#define SHADOW_STACK_SET_TOKEN (1ULL << 0)
#define ARCH_SHSTK_ENABLE 0x5001
#define ARCH_SHSTK_DISABLE 0x5002
#define ARCH_SHSTK_LOCK 0x5003
#define ARCH_SHSTK_UNLOCK 0x5004
#define ARCH_SHSTK_STATUS 0x5005
#define ARCH_SHSTK_SHSTK (1ULL << 0)
#define ARCH_SHSTK_WRSS (1ULL << 1)
#define NT_X86_SHSTK 0x204
#endif
#define SS_SIZE 0x200000
#define PAGE_SIZE 0x1000
#if (__GNUC__ < 8) || (__GNUC__ == 8 && __GNUC_MINOR__ < 5)
int main(int argc, char *argv[])
{
printf("[SKIP]\tCompiler does not support CET.\n");
return 0;
}
#else
void write_shstk(unsigned long *addr, unsigned long val)
{
asm volatile("wrssq %[val], (%[addr])\n"
: "=m" (addr)
: [addr] "r" (addr), [val] "r" (val));
}
static inline unsigned long __attribute__((always_inline)) get_ssp(void)
{
unsigned long ret = 0;
asm volatile("xor %0, %0; rdsspq %0" : "=r" (ret));
return ret;
}
/*
* For use in inline enablement of shadow stack.
*
* The program can't return from the point where shadow stack gets enabled
* because there will be no address on the shadow stack. So it can't use
* syscall() for enablement, since it is a function.
*
* Based on code from nolibc.h. Keep a copy here because this can't pull in all
* of nolibc.h.
*/
#define ARCH_PRCTL(arg1, arg2) \
({ \
long _ret; \
register long _num asm("eax") = __NR_arch_prctl; \
register long _arg1 asm("rdi") = (long)(arg1); \
register long _arg2 asm("rsi") = (long)(arg2); \
\
asm volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
void *create_shstk(void *addr)
{
return (void *)syscall(__NR_map_shadow_stack, addr, SS_SIZE, SHADOW_STACK_SET_TOKEN);
}
void *create_normal_mem(void *addr)
{
return mmap(addr, SS_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
}
void free_shstk(void *shstk)
{
munmap(shstk, SS_SIZE);
}
int reset_shstk(void *shstk)
{
return madvise(shstk, SS_SIZE, MADV_DONTNEED);
}
void try_shstk(unsigned long new_ssp)
{
unsigned long ssp;
printf("[INFO]\tnew_ssp = %lx, *new_ssp = %lx\n",
new_ssp, *((unsigned long *)new_ssp));
ssp = get_ssp();
printf("[INFO]\tchanging ssp from %lx to %lx\n", ssp, new_ssp);
asm volatile("rstorssp (%0)\n":: "r" (new_ssp));
asm volatile("saveprevssp");
printf("[INFO]\tssp is now %lx\n", get_ssp());
/* Switch back to original shadow stack */
ssp -= 8;
asm volatile("rstorssp (%0)\n":: "r" (ssp));
asm volatile("saveprevssp");
}
int test_shstk_pivot(void)
{
void *shstk = create_shstk(0);
if (shstk == MAP_FAILED) {
printf("[FAIL]\tError creating shadow stack: %d\n", errno);
return 1;
}
try_shstk((unsigned long)shstk + SS_SIZE - 8);
free_shstk(shstk);
printf("[OK]\tShadow stack pivot\n");
return 0;
}
int test_shstk_faults(void)
{
unsigned long *shstk = create_shstk(0);
/* Read shadow stack, test if it's zero to not get read optimized out */
if (*shstk != 0)
goto err;
/* Wrss memory that was already read. */
write_shstk(shstk, 1);
if (*shstk != 1)
goto err;
/* Page out memory, so we can wrss it again. */
if (reset_shstk((void *)shstk))
goto err;
write_shstk(shstk, 1);
if (*shstk != 1)
goto err;
printf("[OK]\tShadow stack faults\n");
return 0;
err:
return 1;
}
unsigned long saved_ssp;
unsigned long saved_ssp_val;
volatile bool segv_triggered;
void __attribute__((noinline)) violate_ss(void)
{
saved_ssp = get_ssp();
saved_ssp_val = *(unsigned long *)saved_ssp;
/* Corrupt shadow stack */
printf("[INFO]\tCorrupting shadow stack\n");
write_shstk((void *)saved_ssp, 0);
}
void segv_handler(int signum, siginfo_t *si, void *uc)
{
printf("[INFO]\tGenerated shadow stack violation successfully\n");
segv_triggered = true;
/* Fix shadow stack */
write_shstk((void *)saved_ssp, saved_ssp_val);
}
int test_shstk_violation(void)
{
struct sigaction sa = {};
sa.sa_sigaction = segv_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
segv_triggered = false;
/* Make sure segv_triggered is set before violate_ss() */
asm volatile("" : : : "memory");
violate_ss();
signal(SIGSEGV, SIG_DFL);
printf("[OK]\tShadow stack violation test\n");
return !segv_triggered;
}
/* Gup test state */
#define MAGIC_VAL 0x12345678
bool is_shstk_access;
void *shstk_ptr;
int fd;
void reset_test_shstk(void *addr)
{
if (shstk_ptr)
free_shstk(shstk_ptr);
shstk_ptr = create_shstk(addr);
}
void test_access_fix_handler(int signum, siginfo_t *si, void *uc)
{
printf("[INFO]\tViolation from %s\n", is_shstk_access ? "shstk access" : "normal write");
segv_triggered = true;
/* Fix shadow stack */
if (is_shstk_access) {
reset_test_shstk(shstk_ptr);
return;
}
free_shstk(shstk_ptr);
create_normal_mem(shstk_ptr);
}
bool test_shstk_access(void *ptr)
{
is_shstk_access = true;
segv_triggered = false;
write_shstk(ptr, MAGIC_VAL);
asm volatile("" : : : "memory");
return segv_triggered;
}
bool test_write_access(void *ptr)
{
is_shstk_access = false;
segv_triggered = false;
*(unsigned long *)ptr = MAGIC_VAL;
asm volatile("" : : : "memory");
return segv_triggered;
}
bool gup_write(void *ptr)
{
unsigned long val;
lseek(fd, (unsigned long)ptr, SEEK_SET);
if (write(fd, &val, sizeof(val)) < 0)
return 1;
return 0;
}
bool gup_read(void *ptr)
{
unsigned long val;
lseek(fd, (unsigned long)ptr, SEEK_SET);
if (read(fd, &val, sizeof(val)) < 0)
return 1;
return 0;
}
int test_gup(void)
{
struct sigaction sa = {};
int status;
pid_t pid;
sa.sa_sigaction = test_access_fix_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
segv_triggered = false;
fd = open("/proc/self/mem", O_RDWR);
if (fd == -1)
return 1;
reset_test_shstk(0);
if (gup_read(shstk_ptr))
return 1;
if (test_shstk_access(shstk_ptr))
return 1;
printf("[INFO]\tGup read -> shstk access success\n");
reset_test_shstk(0);
if (gup_write(shstk_ptr))
return 1;
if (test_shstk_access(shstk_ptr))
return 1;
printf("[INFO]\tGup write -> shstk access success\n");
reset_test_shstk(0);
if (gup_read(shstk_ptr))
return 1;
if (!test_write_access(shstk_ptr))
return 1;
printf("[INFO]\tGup read -> write access success\n");
reset_test_shstk(0);
if (gup_write(shstk_ptr))
return 1;
if (!test_write_access(shstk_ptr))
return 1;
printf("[INFO]\tGup write -> write access success\n");
close(fd);
/* COW/gup test */
reset_test_shstk(0);
pid = fork();
if (!pid) {
fd = open("/proc/self/mem", O_RDWR);
if (fd == -1)
exit(1);
if (gup_write(shstk_ptr)) {
close(fd);
exit(1);
}
close(fd);
exit(0);
}
waitpid(pid, &status, 0);
if (WEXITSTATUS(status)) {
printf("[FAIL]\tWrite in child failed\n");
return 1;
}
if (*(unsigned long *)shstk_ptr == MAGIC_VAL) {
printf("[FAIL]\tWrite in child wrote through to shared memory\n");
return 1;
}
printf("[INFO]\tCow gup write -> write access success\n");
free_shstk(shstk_ptr);
signal(SIGSEGV, SIG_DFL);
printf("[OK]\tShadow gup test\n");
return 0;
}
int test_mprotect(void)
{
struct sigaction sa = {};
sa.sa_sigaction = test_access_fix_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
segv_triggered = false;
/* mprotect a shadow stack as read only */
reset_test_shstk(0);
if (mprotect(shstk_ptr, SS_SIZE, PROT_READ) < 0) {
printf("[FAIL]\tmprotect(PROT_READ) failed\n");
return 1;
}
/* try to wrss it and fail */
if (!test_shstk_access(shstk_ptr)) {
printf("[FAIL]\tShadow stack access to read-only memory succeeded\n");
return 1;
}
/*
* The shadow stack was reset above to resolve the fault, make the new one
* read-only.
*/
if (mprotect(shstk_ptr, SS_SIZE, PROT_READ) < 0) {
printf("[FAIL]\tmprotect(PROT_READ) failed\n");
return 1;
}
/* then back to writable */
if (mprotect(shstk_ptr, SS_SIZE, PROT_WRITE | PROT_READ) < 0) {
printf("[FAIL]\tmprotect(PROT_WRITE) failed\n");
return 1;
}
/* then wrss to it and succeed */
if (test_shstk_access(shstk_ptr)) {
printf("[FAIL]\tShadow stack access to mprotect() writable memory failed\n");
return 1;
}
free_shstk(shstk_ptr);
signal(SIGSEGV, SIG_DFL);
printf("[OK]\tmprotect() test\n");
return 0;
}
char zero[4096];
static void *uffd_thread(void *arg)
{
struct uffdio_copy req;
int uffd = *(int *)arg;
struct uffd_msg msg;
int ret;
while (1) {
ret = read(uffd, &msg, sizeof(msg));
if (ret > 0)
break;
else if (errno == EAGAIN)
continue;
return (void *)1;
}
req.dst = msg.arg.pagefault.address;
req.src = (__u64)zero;
req.len = 4096;
req.mode = 0;
if (ioctl(uffd, UFFDIO_COPY, &req))
return (void *)1;
return (void *)0;
}
int test_userfaultfd(void)
{
struct uffdio_register uffdio_register;
struct uffdio_api uffdio_api;
struct sigaction sa = {};
pthread_t thread;
void *res;
int uffd;
sa.sa_sigaction = test_access_fix_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (uffd < 0) {
printf("[SKIP]\tUserfaultfd unavailable.\n");
return 0;
}
reset_test_shstk(0);
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api))
goto err;
uffdio_register.range.start = (__u64)shstk_ptr;
uffdio_register.range.len = 4096;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
goto err;
if (pthread_create(&thread, NULL, &uffd_thread, &uffd))
goto err;
reset_shstk(shstk_ptr);
test_shstk_access(shstk_ptr);
if (pthread_join(thread, &res))
goto err;
if (test_shstk_access(shstk_ptr))
goto err;
free_shstk(shstk_ptr);
signal(SIGSEGV, SIG_DFL);
if (!res)
printf("[OK]\tUserfaultfd test\n");
return !!res;
err:
free_shstk(shstk_ptr);
close(uffd);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/* Simple linked list for keeping track of mappings in test_guard_gap() */
struct node {
struct node *next;
void *mapping;
};
/*
* This tests whether mmap will place other mappings in a shadow stack's guard
* gap. The steps are:
* 1. Finds an empty place by mapping and unmapping something.
* 2. Map a shadow stack in the middle of the known empty area.
* 3. Map a bunch of PAGE_SIZE mappings. These will use the search down
* direction, filling any gaps until it encounters the shadow stack's
* guard gap.
* 4. When a mapping lands below the shadow stack from step 2, then all
* of the above gaps are filled. The search down algorithm will have
* looked at the shadow stack gaps.
* 5. See if it landed in the gap.
*/
int test_guard_gap(void)
{
void *free_area, *shstk, *test_map = (void *)0xFFFFFFFFFFFFFFFF;
struct node *head = NULL, *cur;
free_area = mmap(0, SS_SIZE * 3, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
munmap(free_area, SS_SIZE * 3);
shstk = create_shstk(free_area + SS_SIZE);
if (shstk == MAP_FAILED)
return 1;
while (test_map > shstk) {
test_map = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (test_map == MAP_FAILED)
return 1;
cur = malloc(sizeof(*cur));
cur->mapping = test_map;
cur->next = head;
head = cur;
}
while (head) {
cur = head;
head = cur->next;
munmap(cur->mapping, PAGE_SIZE);
free(cur);
}
free_shstk(shstk);
if (shstk - test_map - PAGE_SIZE != PAGE_SIZE)
return 1;
printf("[OK]\tGuard gap test\n");
return 0;
}
/*
* Too complicated to pull it out of the 32 bit header, but also get the
* 64 bit one needed above. Just define a copy here.
*/
#define __NR_compat_sigaction 67
/*
* Call 32 bit signal handler to get 32 bit signals ABI. Make sure
* to push the registers that will get clobbered.
*/
int sigaction32(int signum, const struct sigaction *restrict act,
struct sigaction *restrict oldact)
{
register long syscall_reg asm("eax") = __NR_compat_sigaction;
register long signum_reg asm("ebx") = signum;
register long act_reg asm("ecx") = (long)act;
register long oldact_reg asm("edx") = (long)oldact;
int ret = 0;
asm volatile ("int $0x80;"
: "=a"(ret), "=m"(oldact)
: "r"(syscall_reg), "r"(signum_reg), "r"(act_reg),
"r"(oldact_reg)
: "r8", "r9", "r10", "r11"
);
return ret;
}
sigjmp_buf jmp_buffer;
void segv_gp_handler(int signum, siginfo_t *si, void *uc)
{
segv_triggered = true;
/*
* To work with old glibc, this can't rely on siglongjmp working with
* shadow stack enabled, so disable shadow stack before siglongjmp().
*/
ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
siglongjmp(jmp_buffer, -1);
}
/*
* Transition to 32 bit mode and check that a #GP triggers a segfault.
*/
int test_32bit(void)
{
struct sigaction sa = {};
struct sigaction *sa32;
/* Create sigaction in 32 bit address range */
sa32 = mmap(0, 4096, PROT_READ | PROT_WRITE,
MAP_32BIT | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
sa32->sa_flags = SA_SIGINFO;
sa.sa_sigaction = segv_gp_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
segv_triggered = false;
/* Make sure segv_triggered is set before triggering the #GP */
asm volatile("" : : : "memory");
/*
* Set handler to somewhere in 32 bit address space
*/
sa32->sa_handler = (void *)sa32;
if (sigaction32(SIGUSR1, sa32, NULL))
return 1;
if (!sigsetjmp(jmp_buffer, 1))
raise(SIGUSR1);
if (segv_triggered)
printf("[OK]\t32 bit test\n");
return !segv_triggered;
}
void segv_handler_ptrace(int signum, siginfo_t *si, void *uc)
{
/* The SSP adjustment caused a segfault. */
exit(0);
}
int test_ptrace(void)
{
unsigned long saved_ssp, ssp = 0;
struct sigaction sa= {};
struct iovec iov;
int status;
int pid;
iov.iov_base = &ssp;
iov.iov_len = sizeof(ssp);
pid = fork();
if (!pid) {
ssp = get_ssp();
sa.sa_sigaction = segv_handler_ptrace;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGSEGV, &sa, NULL))
return 1;
ptrace(PTRACE_TRACEME, NULL, NULL, NULL);
/*
* The parent will tweak the SSP and return from this function
* will #CP.
*/
raise(SIGTRAP);
exit(1);
}
while (waitpid(pid, &status, 0) != -1 && WSTOPSIG(status) != SIGTRAP);
if (ptrace(PTRACE_GETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tFailed to PTRACE_GETREGS\n");
goto out_kill;
}
if (!ssp) {
printf("[INFO]\tPtrace child SSP was 0\n");
goto out_kill;
}
saved_ssp = ssp;
iov.iov_len = 0;
if (!ptrace(PTRACE_SETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tToo small size accepted via PTRACE_SETREGS\n");
goto out_kill;
}
iov.iov_len = sizeof(ssp) + 1;
if (!ptrace(PTRACE_SETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tToo large size accepted via PTRACE_SETREGS\n");
goto out_kill;
}
ssp += 1;
if (!ptrace(PTRACE_SETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tUnaligned SSP written via PTRACE_SETREGS\n");
goto out_kill;
}
ssp = 0xFFFFFFFFFFFF0000;
if (!ptrace(PTRACE_SETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tKernel range SSP written via PTRACE_SETREGS\n");
goto out_kill;
}
/*
* Tweak the SSP so the child with #CP when it resumes and returns
* from raise()
*/
ssp = saved_ssp + 8;
iov.iov_len = sizeof(ssp);
if (ptrace(PTRACE_SETREGSET, pid, NT_X86_SHSTK, &iov)) {
printf("[INFO]\tFailed to PTRACE_SETREGS\n");
goto out_kill;
}
if (ptrace(PTRACE_DETACH, pid, NULL, NULL)) {
printf("[INFO]\tFailed to PTRACE_DETACH\n");
goto out_kill;
}
waitpid(pid, &status, 0);
if (WEXITSTATUS(status))
return 1;
printf("[OK]\tPtrace test\n");
return 0;
out_kill:
kill(pid, SIGKILL);
return 1;
}
int main(int argc, char *argv[])
{
int ret = 0;
if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
printf("[SKIP]\tCould not enable Shadow stack\n");
return 1;
}
if (ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK)) {
ret = 1;
printf("[FAIL]\tDisabling shadow stack failed\n");
}
if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
printf("[SKIP]\tCould not re-enable Shadow stack\n");
return 1;
}
if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_WRSS)) {
printf("[SKIP]\tCould not enable WRSS\n");
ret = 1;
goto out;
}
/* Should have succeeded if here, but this is a test, so double check. */
if (!get_ssp()) {
printf("[FAIL]\tShadow stack disabled\n");
return 1;
}
if (test_shstk_pivot()) {
ret = 1;
printf("[FAIL]\tShadow stack pivot\n");
goto out;
}
if (test_shstk_faults()) {
ret = 1;
printf("[FAIL]\tShadow stack fault test\n");
goto out;
}
if (test_shstk_violation()) {
ret = 1;
printf("[FAIL]\tShadow stack violation test\n");
goto out;
}
if (test_gup()) {
ret = 1;
printf("[FAIL]\tShadow shadow stack gup\n");
goto out;
}
if (test_mprotect()) {
ret = 1;
printf("[FAIL]\tShadow shadow mprotect test\n");
goto out;
}
if (test_userfaultfd()) {
ret = 1;
printf("[FAIL]\tUserfaultfd test\n");
goto out;
}
if (test_guard_gap()) {
ret = 1;
printf("[FAIL]\tGuard gap test\n");
goto out;
}
if (test_ptrace()) {
ret = 1;
printf("[FAIL]\tptrace test\n");
}
if (test_32bit()) {
ret = 1;
printf("[FAIL]\t32 bit test\n");
goto out;
}
return ret;
out:
/*
* Disable shadow stack before the function returns, or there will be a
* shadow stack violation.
*/
if (ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK)) {
ret = 1;
printf("[FAIL]\tDisabling shadow stack failed\n");
}
return ret;
}
#endif
| linux-master | tools/testing/selftests/x86/test_shadow_stack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Corrupt the XSTATE header in a signal frame
*
* Based on analysis and a test case from Thomas Gleixner.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sched.h>
#include <signal.h>
#include <err.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/wait.h>
#include "../kselftest.h" /* For __cpuid_count() */
static inline int xsave_enabled(void)
{
unsigned int eax, ebx, ecx, edx;
__cpuid_count(0x1, 0x0, eax, ebx, ecx, edx);
/* Is CR4.OSXSAVE enabled ? */
return ecx & (1U << 27);
}
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO | flags;
sigemptyset(&sa.sa_mask);
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
}
static void sigusr1(int sig, siginfo_t *info, void *uc_void)
{
ucontext_t *uc = uc_void;
uint8_t *fpstate = (uint8_t *)uc->uc_mcontext.fpregs;
uint64_t *xfeatures = (uint64_t *)(fpstate + 512);
printf("\tWreck XSTATE header\n");
/* Wreck the first reserved bytes in the header */
*(xfeatures + 2) = 0xfffffff;
}
static void sigsegv(int sig, siginfo_t *info, void *uc_void)
{
printf("\tGot SIGSEGV\n");
}
int main(void)
{
cpu_set_t set;
sethandler(SIGUSR1, sigusr1, 0);
sethandler(SIGSEGV, sigsegv, 0);
if (!xsave_enabled()) {
printf("[SKIP] CR4.OSXSAVE disabled.\n");
return 0;
}
CPU_ZERO(&set);
CPU_SET(0, &set);
/*
* Enforce that the child runs on the same CPU
* which in turn forces a schedule.
*/
sched_setaffinity(getpid(), sizeof(set), &set);
printf("[RUN]\tSend ourselves a signal\n");
raise(SIGUSR1);
printf("[OK]\tBack from the signal. Now schedule.\n");
pid_t child = fork();
if (child < 0)
err(1, "fork");
if (child == 0)
return 0;
if (child)
waitpid(child, NULL, 0);
printf("[OK]\tBack in the main thread.\n");
/*
* We could try to confirm that extended state is still preserved
* when we schedule. For now, the only indication of failure is
* a warning in the kernel logs.
*/
return 0;
}
| linux-master | tools/testing/selftests/x86/corrupt_xstate_header.c |
/* Trivial program to check that compilation with certain flags is working. */
#include <stdio.h>
int
main(void)
{
puts("");
return 0;
}
| linux-master | tools/testing/selftests/x86/trivial_program.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* fsgsbase_restore.c, test ptrace vs fsgsbase
* Copyright (c) 2020 Andy Lutomirski
*
* This test case simulates a tracer redirecting tracee execution to
* a function and then restoring tracee state using PTRACE_GETREGS and
* PTRACE_SETREGS. This is similar to what gdb does when doing
* 'p func()'. The catch is that this test has the called function
* modify a segment register. This makes sure that ptrace correctly
* restores segment state when using PTRACE_SETREGS.
*
* This is not part of fsgsbase.c, because that test is 64-bit only.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <err.h>
#include <sys/user.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include <asm/ldt.h>
#include <sys/mman.h>
#include <stddef.h>
#include <sys/ptrace.h>
#include <sys/wait.h>
#include <stdint.h>
#define EXPECTED_VALUE 0x1337f00d
#ifdef __x86_64__
# define SEG "%gs"
#else
# define SEG "%fs"
#endif
static unsigned int dereference_seg_base(void)
{
int ret;
asm volatile ("mov %" SEG ":(0), %0" : "=rm" (ret));
return ret;
}
static void init_seg(void)
{
unsigned int *target = mmap(
NULL, sizeof(unsigned int),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
if (target == MAP_FAILED)
err(1, "mmap");
*target = EXPECTED_VALUE;
printf("\tsegment base address = 0x%lx\n", (unsigned long)target);
struct user_desc desc = {
.entry_number = 0,
.base_addr = (unsigned int)(uintptr_t)target,
.limit = sizeof(unsigned int) - 1,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 0,
.seg_not_present = 0,
.useable = 0
};
if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) {
printf("\tusing LDT slot 0\n");
asm volatile ("mov %0, %" SEG :: "rm" ((unsigned short)0x7));
} else {
/* No modify_ldt for us (configured out, perhaps) */
struct user_desc *low_desc = mmap(
NULL, sizeof(desc),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
memcpy(low_desc, &desc, sizeof(desc));
low_desc->entry_number = -1;
/* 32-bit set_thread_area */
long ret;
asm volatile ("int $0x80"
: "=a" (ret), "+m" (*low_desc)
: "a" (243), "b" (low_desc)
#ifdef __x86_64__
: "r8", "r9", "r10", "r11"
#endif
);
memcpy(&desc, low_desc, sizeof(desc));
munmap(low_desc, sizeof(desc));
if (ret != 0) {
printf("[NOTE]\tcould not create a segment -- can't test anything\n");
exit(0);
}
printf("\tusing GDT slot %d\n", desc.entry_number);
unsigned short sel = (unsigned short)((desc.entry_number << 3) | 0x3);
asm volatile ("mov %0, %" SEG :: "rm" (sel));
}
}
static void tracee_zap_segment(void)
{
/*
* The tracer will redirect execution here. This is meant to
* work like gdb's 'p func()' feature. The tricky bit is that
* we modify a segment register in order to make sure that ptrace
* can correctly restore segment registers.
*/
printf("\tTracee: in tracee_zap_segment()\n");
/*
* Write a nonzero selector with base zero to the segment register.
* Using a null selector would defeat the test on AMD pre-Zen2
* CPUs, as such CPUs don't clear the base when loading a null
* selector.
*/
unsigned short sel;
asm volatile ("mov %%ss, %0\n\t"
"mov %0, %" SEG
: "=rm" (sel));
pid_t pid = getpid(), tid = syscall(SYS_gettid);
printf("\tTracee is going back to sleep\n");
syscall(SYS_tgkill, pid, tid, SIGSTOP);
/* Should not get here. */
while (true) {
printf("[FAIL]\tTracee hit unreachable code\n");
pause();
}
}
int main()
{
printf("\tSetting up a segment\n");
init_seg();
unsigned int val = dereference_seg_base();
if (val != EXPECTED_VALUE) {
printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE);
return 1;
}
printf("[OK]\tThe segment points to the right place.\n");
pid_t chld = fork();
if (chld < 0)
err(1, "fork");
if (chld == 0) {
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0, 0);
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
pid_t pid = getpid(), tid = syscall(SYS_gettid);
printf("\tTracee will take a nap until signaled\n");
syscall(SYS_tgkill, pid, tid, SIGSTOP);
printf("\tTracee was resumed. Will re-check segment.\n");
val = dereference_seg_base();
if (val != EXPECTED_VALUE) {
printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE);
exit(1);
}
printf("[OK]\tThe segment points to the right place.\n");
exit(0);
}
int status;
/* Wait for SIGSTOP. */
if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
err(1, "waitpid");
struct user_regs_struct regs;
if (ptrace(PTRACE_GETREGS, chld, NULL, ®s) != 0)
err(1, "PTRACE_GETREGS");
#ifdef __x86_64__
printf("\tChild GS=0x%lx, GSBASE=0x%lx\n", (unsigned long)regs.gs, (unsigned long)regs.gs_base);
#else
printf("\tChild FS=0x%lx\n", (unsigned long)regs.xfs);
#endif
struct user_regs_struct regs2 = regs;
#ifdef __x86_64__
regs2.rip = (unsigned long)tracee_zap_segment;
regs2.rsp -= 128; /* Don't clobber the redzone. */
#else
regs2.eip = (unsigned long)tracee_zap_segment;
#endif
printf("\tTracer: redirecting tracee to tracee_zap_segment()\n");
if (ptrace(PTRACE_SETREGS, chld, NULL, ®s2) != 0)
err(1, "PTRACE_GETREGS");
if (ptrace(PTRACE_CONT, chld, NULL, NULL) != 0)
err(1, "PTRACE_GETREGS");
/* Wait for SIGSTOP. */
if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
err(1, "waitpid");
printf("\tTracer: restoring tracee state\n");
if (ptrace(PTRACE_SETREGS, chld, NULL, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (ptrace(PTRACE_DETACH, chld, NULL, NULL) != 0)
err(1, "PTRACE_GETREGS");
/* Wait for SIGSTOP. */
if (waitpid(chld, &status, 0) != chld)
err(1, "waitpid");
if (WIFSIGNALED(status)) {
printf("[FAIL]\tTracee crashed\n");
return 1;
}
if (!WIFEXITED(status)) {
printf("[FAIL]\tTracee stopped for an unexpected reason: %d\n", status);
return 1;
}
int exitcode = WEXITSTATUS(status);
if (exitcode != 0) {
printf("[FAIL]\tTracee reported failure\n");
return 1;
}
printf("[OK]\tAll is well.\n");
return 0;
}
| linux-master | tools/testing/selftests/x86/fsgsbase_restore.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* syscall_numbering.c - test calling the x86-64 kernel with various
* valid and invalid system call numbers.
*
* Copyright (c) 2018 Andrew Lutomirski
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <limits.h>
#include <signal.h>
#include <sysexits.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <linux/ptrace.h>
/* Common system call numbers */
#define SYS_READ 0
#define SYS_WRITE 1
#define SYS_GETPID 39
/* x64-only system call numbers */
#define X64_IOCTL 16
#define X64_READV 19
#define X64_WRITEV 20
/* x32-only system call numbers (without X32_BIT) */
#define X32_IOCTL 514
#define X32_READV 515
#define X32_WRITEV 516
#define X32_BIT 0x40000000
static int nullfd = -1; /* File descriptor for /dev/null */
static bool with_x32; /* x32 supported on this kernel? */
enum ptrace_pass {
PTP_NOTHING,
PTP_GETREGS,
PTP_WRITEBACK,
PTP_FUZZRET,
PTP_FUZZHIGH,
PTP_INTNUM,
PTP_DONE
};
static const char * const ptrace_pass_name[] =
{
[PTP_NOTHING] = "just stop, no data read",
[PTP_GETREGS] = "only getregs",
[PTP_WRITEBACK] = "getregs, unmodified setregs",
[PTP_FUZZRET] = "modifying the default return",
[PTP_FUZZHIGH] = "clobbering the top 32 bits",
[PTP_INTNUM] = "sign-extending the syscall number",
};
/*
* Shared memory block between tracer and test
*/
struct shared {
unsigned int nerr; /* Total error count */
unsigned int indent; /* Message indentation level */
enum ptrace_pass ptrace_pass;
bool probing_syscall; /* In probe_syscall() */
};
static volatile struct shared *sh;
static inline unsigned int offset(void)
{
unsigned int level = sh ? sh->indent : 0;
return 8 + level * 4;
}
#define msg(lvl, fmt, ...) printf("%-*s" fmt, offset(), "[" #lvl "]", \
## __VA_ARGS__)
#define run(fmt, ...) msg(RUN, fmt, ## __VA_ARGS__)
#define info(fmt, ...) msg(INFO, fmt, ## __VA_ARGS__)
#define ok(fmt, ...) msg(OK, fmt, ## __VA_ARGS__)
#define fail(fmt, ...) \
do { \
msg(FAIL, fmt, ## __VA_ARGS__); \
sh->nerr++; \
} while (0)
#define crit(fmt, ...) \
do { \
sh->indent = 0; \
msg(FAIL, fmt, ## __VA_ARGS__); \
msg(SKIP, "Unable to run test\n"); \
exit(EX_OSERR); \
} while (0)
/* Sentinel for ptrace-modified return value */
#define MODIFIED_BY_PTRACE -9999
/*
* Directly invokes the given syscall with nullfd as the first argument
* and the rest zero. Avoids involving glibc wrappers in case they ever
* end up intercepting some system calls for some reason, or modify
* the system call number itself.
*/
static long long probe_syscall(int msb, int lsb)
{
register long long arg1 asm("rdi") = nullfd;
register long long arg2 asm("rsi") = 0;
register long long arg3 asm("rdx") = 0;
register long long arg4 asm("r10") = 0;
register long long arg5 asm("r8") = 0;
register long long arg6 asm("r9") = 0;
long long nr = ((long long)msb << 32) | (unsigned int)lsb;
long long ret;
/*
* We pass in an extra copy of the extended system call number
* in %rbx, so we can examine it from the ptrace handler without
* worrying about it being possibly modified. This is to test
* the validity of struct user regs.orig_rax a.k.a.
* struct pt_regs.orig_ax.
*/
sh->probing_syscall = true;
asm volatile("syscall"
: "=a" (ret)
: "a" (nr), "b" (nr),
"r" (arg1), "r" (arg2), "r" (arg3),
"r" (arg4), "r" (arg5), "r" (arg6)
: "rcx", "r11", "memory", "cc");
sh->probing_syscall = false;
return ret;
}
static const char *syscall_str(int msb, int start, int end)
{
static char buf[64];
const char * const type = (start & X32_BIT) ? "x32" : "x64";
int lsb = start;
/*
* Improve readability by stripping the x32 bit, but round
* toward zero so we don't display -1 as -1073741825.
*/
if (lsb < 0)
lsb |= X32_BIT;
else
lsb &= ~X32_BIT;
if (start == end)
snprintf(buf, sizeof buf, "%s syscall %d:%d",
type, msb, lsb);
else
snprintf(buf, sizeof buf, "%s syscalls %d:%d..%d",
type, msb, lsb, lsb + (end-start));
return buf;
}
static unsigned int _check_for(int msb, int start, int end, long long expect,
const char *expect_str)
{
unsigned int err = 0;
sh->indent++;
if (start != end)
sh->indent++;
for (int nr = start; nr <= end; nr++) {
long long ret = probe_syscall(msb, nr);
if (ret != expect) {
fail("%s returned %lld, but it should have returned %s\n",
syscall_str(msb, nr, nr),
ret, expect_str);
err++;
}
}
if (start != end)
sh->indent--;
if (err) {
if (start != end)
fail("%s had %u failure%s\n",
syscall_str(msb, start, end),
err, err == 1 ? "s" : "");
} else {
ok("%s returned %s as expected\n",
syscall_str(msb, start, end), expect_str);
}
sh->indent--;
return err;
}
#define check_for(msb,start,end,expect) \
_check_for(msb,start,end,expect,#expect)
static bool check_zero(int msb, int nr)
{
return check_for(msb, nr, nr, 0);
}
static bool check_enosys(int msb, int nr)
{
return check_for(msb, nr, nr, -ENOSYS);
}
/*
* Anyone diagnosing a failure will want to know whether the kernel
* supports x32. Tell them. This can also be used to conditionalize
* tests based on existence or nonexistence of x32.
*/
static bool test_x32(void)
{
long long ret;
pid_t mypid = getpid();
run("Checking for x32 by calling x32 getpid()\n");
ret = probe_syscall(0, SYS_GETPID | X32_BIT);
sh->indent++;
if (ret == mypid) {
info("x32 is supported\n");
with_x32 = true;
} else if (ret == -ENOSYS) {
info("x32 is not supported\n");
with_x32 = false;
} else {
fail("x32 getpid() returned %lld, but it should have returned either %lld or -ENOSYS\n", ret, (long long)mypid);
with_x32 = false;
}
sh->indent--;
return with_x32;
}
static void test_syscalls_common(int msb)
{
enum ptrace_pass pass = sh->ptrace_pass;
run("Checking some common syscalls as 64 bit\n");
check_zero(msb, SYS_READ);
check_zero(msb, SYS_WRITE);
run("Checking some 64-bit only syscalls as 64 bit\n");
check_zero(msb, X64_READV);
check_zero(msb, X64_WRITEV);
run("Checking out of range system calls\n");
check_for(msb, -64, -2, -ENOSYS);
if (pass >= PTP_FUZZRET)
check_for(msb, -1, -1, MODIFIED_BY_PTRACE);
else
check_for(msb, -1, -1, -ENOSYS);
check_for(msb, X32_BIT-64, X32_BIT-1, -ENOSYS);
check_for(msb, -64-X32_BIT, -1-X32_BIT, -ENOSYS);
check_for(msb, INT_MAX-64, INT_MAX-1, -ENOSYS);
}
static void test_syscalls_with_x32(int msb)
{
/*
* Syscalls 512-547 are "x32" syscalls. They are
* intended to be called with the x32 (0x40000000) bit
* set. Calling them without the x32 bit set is
* nonsense and should not work.
*/
run("Checking x32 syscalls as 64 bit\n");
check_for(msb, 512, 547, -ENOSYS);
run("Checking some common syscalls as x32\n");
check_zero(msb, SYS_READ | X32_BIT);
check_zero(msb, SYS_WRITE | X32_BIT);
run("Checking some x32 syscalls as x32\n");
check_zero(msb, X32_READV | X32_BIT);
check_zero(msb, X32_WRITEV | X32_BIT);
run("Checking some 64-bit syscalls as x32\n");
check_enosys(msb, X64_IOCTL | X32_BIT);
check_enosys(msb, X64_READV | X32_BIT);
check_enosys(msb, X64_WRITEV | X32_BIT);
}
static void test_syscalls_without_x32(int msb)
{
run("Checking for absence of x32 system calls\n");
check_for(msb, 0 | X32_BIT, 999 | X32_BIT, -ENOSYS);
}
static void test_syscall_numbering(void)
{
static const int msbs[] = {
0, 1, -1, X32_BIT-1, X32_BIT, X32_BIT-1, -X32_BIT, INT_MAX,
INT_MIN, INT_MIN+1
};
sh->indent++;
/*
* The MSB is supposed to be ignored, so we loop over a few
* to test that out.
*/
for (size_t i = 0; i < sizeof(msbs)/sizeof(msbs[0]); i++) {
int msb = msbs[i];
run("Checking system calls with msb = %d (0x%x)\n",
msb, msb);
sh->indent++;
test_syscalls_common(msb);
if (with_x32)
test_syscalls_with_x32(msb);
else
test_syscalls_without_x32(msb);
sh->indent--;
}
sh->indent--;
}
static void syscall_numbering_tracee(void)
{
enum ptrace_pass pass;
if (ptrace(PTRACE_TRACEME, 0, 0, 0)) {
crit("Failed to request tracing\n");
return;
}
raise(SIGSTOP);
for (sh->ptrace_pass = pass = PTP_NOTHING; pass < PTP_DONE;
sh->ptrace_pass = ++pass) {
run("Running tests under ptrace: %s\n", ptrace_pass_name[pass]);
test_syscall_numbering();
}
}
static void mess_with_syscall(pid_t testpid, enum ptrace_pass pass)
{
struct user_regs_struct regs;
sh->probing_syscall = false; /* Do this on entry only */
/* For these, don't even getregs */
if (pass == PTP_NOTHING || pass == PTP_DONE)
return;
ptrace(PTRACE_GETREGS, testpid, NULL, ®s);
if (regs.orig_rax != regs.rbx) {
fail("orig_rax %#llx doesn't match syscall number %#llx\n",
(unsigned long long)regs.orig_rax,
(unsigned long long)regs.rbx);
}
switch (pass) {
case PTP_GETREGS:
/* Just read, no writeback */
return;
case PTP_WRITEBACK:
/* Write back the same register state verbatim */
break;
case PTP_FUZZRET:
regs.rax = MODIFIED_BY_PTRACE;
break;
case PTP_FUZZHIGH:
regs.rax = MODIFIED_BY_PTRACE;
regs.orig_rax = regs.orig_rax | 0xffffffff00000000ULL;
break;
case PTP_INTNUM:
regs.rax = MODIFIED_BY_PTRACE;
regs.orig_rax = (int)regs.orig_rax;
break;
default:
crit("invalid ptrace_pass\n");
break;
}
ptrace(PTRACE_SETREGS, testpid, NULL, ®s);
}
static void syscall_numbering_tracer(pid_t testpid)
{
int wstatus;
do {
pid_t wpid = waitpid(testpid, &wstatus, 0);
if (wpid < 0 && errno != EINTR)
break;
if (wpid != testpid)
continue;
if (!WIFSTOPPED(wstatus))
break; /* Thread exited? */
if (sh->probing_syscall && WSTOPSIG(wstatus) == SIGTRAP)
mess_with_syscall(testpid, sh->ptrace_pass);
} while (sh->ptrace_pass != PTP_DONE &&
!ptrace(PTRACE_SYSCALL, testpid, NULL, NULL));
ptrace(PTRACE_DETACH, testpid, NULL, NULL);
/* Wait for the child process to terminate */
while (waitpid(testpid, &wstatus, 0) != testpid || !WIFEXITED(wstatus))
/* wait some more */;
}
static void test_traced_syscall_numbering(void)
{
pid_t testpid;
/* Launch the test thread; this thread continues as the tracer thread */
testpid = fork();
if (testpid < 0) {
crit("Unable to launch tracer process\n");
} else if (testpid == 0) {
syscall_numbering_tracee();
_exit(0);
} else {
syscall_numbering_tracer(testpid);
}
}
int main(void)
{
unsigned int nerr;
/*
* It is quite likely to get a segfault on a failure, so make
* sure the message gets out by setting stdout to nonbuffered.
*/
setvbuf(stdout, NULL, _IONBF, 0);
/*
* Harmless file descriptor to work on...
*/
nullfd = open("/dev/null", O_RDWR);
if (nullfd < 0) {
crit("Unable to open /dev/null: %s\n", strerror(errno));
}
/*
* Set up a block of shared memory...
*/
sh = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_SHARED, 0, 0);
if (sh == MAP_FAILED) {
crit("Unable to allocated shared memory block: %s\n",
strerror(errno));
}
with_x32 = test_x32();
run("Running tests without ptrace...\n");
test_syscall_numbering();
test_traced_syscall_numbering();
nerr = sh->nerr;
if (!nerr) {
ok("All system calls succeeded or failed as expected\n");
return 0;
} else {
fail("A total of %u system call%s had incorrect behavior\n",
nerr, nerr != 1 ? "s" : "");
return 1;
}
}
| linux-master | tools/testing/selftests/x86/syscall_numbering.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#define _GNU_SOURCE
#include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <openssl/err.h>
#include <openssl/pem.h>
#include "defines.h"
#include "main.h"
/*
* FIXME: OpenSSL 3.0 has deprecated some functions. For now just ignore
* the warnings.
*/
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
struct q1q2_ctx {
BN_CTX *bn_ctx;
BIGNUM *m;
BIGNUM *s;
BIGNUM *q1;
BIGNUM *qr;
BIGNUM *q2;
};
static void free_q1q2_ctx(struct q1q2_ctx *ctx)
{
BN_CTX_free(ctx->bn_ctx);
BN_free(ctx->m);
BN_free(ctx->s);
BN_free(ctx->q1);
BN_free(ctx->qr);
BN_free(ctx->q2);
}
static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
struct q1q2_ctx *ctx)
{
ctx->bn_ctx = BN_CTX_new();
ctx->s = BN_bin2bn(s, SGX_MODULUS_SIZE, NULL);
ctx->m = BN_bin2bn(m, SGX_MODULUS_SIZE, NULL);
ctx->q1 = BN_new();
ctx->qr = BN_new();
ctx->q2 = BN_new();
if (!ctx->bn_ctx || !ctx->s || !ctx->m || !ctx->q1 || !ctx->qr ||
!ctx->q2) {
free_q1q2_ctx(ctx);
return false;
}
return true;
}
static void reverse_bytes(void *data, int length)
{
int i = 0;
int j = length - 1;
uint8_t temp;
uint8_t *ptr = data;
while (i < j) {
temp = ptr[i];
ptr[i] = ptr[j];
ptr[j] = temp;
i++;
j--;
}
}
static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
uint8_t *q2)
{
struct q1q2_ctx ctx;
int len;
if (!alloc_q1q2_ctx(s, m, &ctx)) {
fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
return false;
}
if (!BN_mul(ctx.q1, ctx.s, ctx.s, ctx.bn_ctx))
goto out;
if (!BN_div(ctx.q1, ctx.qr, ctx.q1, ctx.m, ctx.bn_ctx))
goto out;
if (BN_num_bytes(ctx.q1) > SGX_MODULUS_SIZE) {
fprintf(stderr, "Too large Q1 %d bytes\n",
BN_num_bytes(ctx.q1));
goto out;
}
if (!BN_mul(ctx.q2, ctx.s, ctx.qr, ctx.bn_ctx))
goto out;
if (!BN_div(ctx.q2, NULL, ctx.q2, ctx.m, ctx.bn_ctx))
goto out;
if (BN_num_bytes(ctx.q2) > SGX_MODULUS_SIZE) {
fprintf(stderr, "Too large Q2 %d bytes\n",
BN_num_bytes(ctx.q2));
goto out;
}
len = BN_bn2bin(ctx.q1, q1);
reverse_bytes(q1, len);
len = BN_bn2bin(ctx.q2, q2);
reverse_bytes(q2, len);
free_q1q2_ctx(&ctx);
return true;
out:
free_q1q2_ctx(&ctx);
return false;
}
struct sgx_sigstruct_payload {
struct sgx_sigstruct_header header;
struct sgx_sigstruct_body body;
};
static bool check_crypto_errors(void)
{
int err;
bool had_errors = false;
const char *filename;
int line;
char str[256];
for ( ; ; ) {
if (ERR_peek_error() == 0)
break;
had_errors = true;
err = ERR_get_error_line(&filename, &line);
ERR_error_string_n(err, str, sizeof(str));
fprintf(stderr, "crypto: %s: %s:%d\n", str, filename, line);
}
return had_errors;
}
static inline const BIGNUM *get_modulus(RSA *key)
{
const BIGNUM *n;
RSA_get0_key(key, &n, NULL, NULL);
return n;
}
static RSA *gen_sign_key(void)
{
unsigned long sign_key_length;
BIO *bio;
RSA *key;
sign_key_length = (unsigned long)&sign_key_end -
(unsigned long)&sign_key;
bio = BIO_new_mem_buf(&sign_key, sign_key_length);
if (!bio)
return NULL;
key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, NULL);
BIO_free(bio);
return key;
}
enum mrtags {
MRECREATE = 0x0045544145524345,
MREADD = 0x0000000044444145,
MREEXTEND = 0x00444E4554584545,
};
static bool mrenclave_update(EVP_MD_CTX *ctx, const void *data)
{
if (!EVP_DigestUpdate(ctx, data, 64)) {
fprintf(stderr, "digest update failed\n");
return false;
}
return true;
}
static bool mrenclave_commit(EVP_MD_CTX *ctx, uint8_t *mrenclave)
{
unsigned int size;
if (!EVP_DigestFinal_ex(ctx, (unsigned char *)mrenclave, &size)) {
fprintf(stderr, "digest commit failed\n");
return false;
}
if (size != 32) {
fprintf(stderr, "invalid digest size = %u\n", size);
return false;
}
return true;
}
struct mrecreate {
uint64_t tag;
uint32_t ssaframesize;
uint64_t size;
uint8_t reserved[44];
} __attribute__((__packed__));
static bool mrenclave_ecreate(EVP_MD_CTX *ctx, uint64_t blob_size)
{
struct mrecreate mrecreate;
uint64_t encl_size;
for (encl_size = 0x1000; encl_size < blob_size; )
encl_size <<= 1;
memset(&mrecreate, 0, sizeof(mrecreate));
mrecreate.tag = MRECREATE;
mrecreate.ssaframesize = 1;
mrecreate.size = encl_size;
if (!EVP_DigestInit_ex(ctx, EVP_sha256(), NULL))
return false;
return mrenclave_update(ctx, &mrecreate);
}
struct mreadd {
uint64_t tag;
uint64_t offset;
uint64_t flags; /* SECINFO flags */
uint8_t reserved[40];
} __attribute__((__packed__));
static bool mrenclave_eadd(EVP_MD_CTX *ctx, uint64_t offset, uint64_t flags)
{
struct mreadd mreadd;
memset(&mreadd, 0, sizeof(mreadd));
mreadd.tag = MREADD;
mreadd.offset = offset;
mreadd.flags = flags;
return mrenclave_update(ctx, &mreadd);
}
struct mreextend {
uint64_t tag;
uint64_t offset;
uint8_t reserved[48];
} __attribute__((__packed__));
static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset,
const uint8_t *data)
{
struct mreextend mreextend;
int i;
for (i = 0; i < 0x1000; i += 0x100) {
memset(&mreextend, 0, sizeof(mreextend));
mreextend.tag = MREEXTEND;
mreextend.offset = offset + i;
if (!mrenclave_update(ctx, &mreextend))
return false;
if (!mrenclave_update(ctx, &data[i + 0x00]))
return false;
if (!mrenclave_update(ctx, &data[i + 0x40]))
return false;
if (!mrenclave_update(ctx, &data[i + 0x80]))
return false;
if (!mrenclave_update(ctx, &data[i + 0xC0]))
return false;
}
return true;
}
static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl,
struct encl_segment *seg)
{
uint64_t end = seg->size;
uint64_t offset;
for (offset = 0; offset < end; offset += PAGE_SIZE) {
if (!mrenclave_eadd(ctx, seg->offset + offset, seg->flags))
return false;
if (seg->measure) {
if (!mrenclave_eextend(ctx, seg->offset + offset, seg->src + offset))
return false;
}
}
return true;
}
bool encl_measure(struct encl *encl)
{
uint64_t header1[2] = {0x000000E100000006, 0x0000000000010000};
uint64_t header2[2] = {0x0000006000000101, 0x0000000100000060};
struct sgx_sigstruct *sigstruct = &encl->sigstruct;
struct sgx_sigstruct_payload payload;
uint8_t digest[SHA256_DIGEST_LENGTH];
unsigned int siglen;
RSA *key = NULL;
EVP_MD_CTX *ctx;
int i;
memset(sigstruct, 0, sizeof(*sigstruct));
sigstruct->header.header1[0] = header1[0];
sigstruct->header.header1[1] = header1[1];
sigstruct->header.header2[0] = header2[0];
sigstruct->header.header2[1] = header2[1];
sigstruct->exponent = 3;
sigstruct->body.attributes = SGX_ATTR_MODE64BIT;
sigstruct->body.xfrm = 3;
/* sanity check */
if (check_crypto_errors())
goto err;
key = gen_sign_key();
if (!key) {
ERR_print_errors_fp(stdout);
goto err;
}
BN_bn2bin(get_modulus(key), sigstruct->modulus);
ctx = EVP_MD_CTX_create();
if (!ctx)
goto err;
if (!mrenclave_ecreate(ctx, encl->src_size))
goto err;
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (!mrenclave_segment(ctx, encl, seg))
goto err;
}
if (!mrenclave_commit(ctx, sigstruct->body.mrenclave))
goto err;
memcpy(&payload.header, &sigstruct->header, sizeof(sigstruct->header));
memcpy(&payload.body, &sigstruct->body, sizeof(sigstruct->body));
SHA256((unsigned char *)&payload, sizeof(payload), digest);
if (!RSA_sign(NID_sha256, digest, SHA256_DIGEST_LENGTH,
sigstruct->signature, &siglen, key))
goto err;
if (!calc_q1q2(sigstruct->signature, sigstruct->modulus, sigstruct->q1,
sigstruct->q2))
goto err;
/* BE -> LE */
reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
EVP_MD_CTX_destroy(ctx);
RSA_free(key);
return true;
err:
EVP_MD_CTX_destroy(ctx);
RSA_free(key);
return false;
}
| linux-master | tools/testing/selftests/sgx/sigstruct.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#include <stddef.h>
#include "defines.h"
/*
* Data buffer spanning two pages that will be placed first in .data
* segment. Even if not used internally the second page is needed by
* external test manipulating page permissions.
*/
static uint8_t encl_buffer[8192] = { 1 };
enum sgx_enclu_function {
EACCEPT = 0x5,
EMODPE = 0x6,
};
static void do_encl_emodpe(void *_op)
{
struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
struct encl_op_emodpe *op = _op;
secinfo.flags = op->flags;
asm volatile(".byte 0x0f, 0x01, 0xd7"
:
: "a" (EMODPE),
"b" (&secinfo),
"c" (op->epc_addr));
}
static void do_encl_eaccept(void *_op)
{
struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
struct encl_op_eaccept *op = _op;
int rax;
secinfo.flags = op->flags;
asm volatile(".byte 0x0f, 0x01, 0xd7"
: "=a" (rax)
: "a" (EACCEPT),
"b" (&secinfo),
"c" (op->epc_addr));
op->ret = rax;
}
static void *memcpy(void *dest, const void *src, size_t n)
{
size_t i;
for (i = 0; i < n; i++)
((char *)dest)[i] = ((char *)src)[i];
return dest;
}
static void *memset(void *dest, int c, size_t n)
{
size_t i;
for (i = 0; i < n; i++)
((char *)dest)[i] = c;
return dest;
}
static void do_encl_init_tcs_page(void *_op)
{
struct encl_op_init_tcs_page *op = _op;
void *tcs = (void *)op->tcs_page;
uint32_t val_32;
memset(tcs, 0, 16); /* STATE and FLAGS */
memcpy(tcs + 16, &op->ssa, 8); /* OSSA */
memset(tcs + 24, 0, 4); /* CSSA */
val_32 = 1;
memcpy(tcs + 28, &val_32, 4); /* NSSA */
memcpy(tcs + 32, &op->entry, 8); /* OENTRY */
memset(tcs + 40, 0, 24); /* AEP, OFSBASE, OGSBASE */
val_32 = 0xFFFFFFFF;
memcpy(tcs + 64, &val_32, 4); /* FSLIMIT */
memcpy(tcs + 68, &val_32, 4); /* GSLIMIT */
memset(tcs + 72, 0, 4024); /* Reserved */
}
static void do_encl_op_put_to_buf(void *op)
{
struct encl_op_put_to_buf *op2 = op;
memcpy(&encl_buffer[0], &op2->value, 8);
}
static void do_encl_op_get_from_buf(void *op)
{
struct encl_op_get_from_buf *op2 = op;
memcpy(&op2->value, &encl_buffer[0], 8);
}
static void do_encl_op_put_to_addr(void *_op)
{
struct encl_op_put_to_addr *op = _op;
memcpy((void *)op->addr, &op->value, 8);
}
static void do_encl_op_get_from_addr(void *_op)
{
struct encl_op_get_from_addr *op = _op;
memcpy(&op->value, (void *)op->addr, 8);
}
static void do_encl_op_nop(void *_op)
{
}
void encl_body(void *rdi, void *rsi)
{
const void (*encl_op_array[ENCL_OP_MAX])(void *) = {
do_encl_op_put_to_buf,
do_encl_op_get_from_buf,
do_encl_op_put_to_addr,
do_encl_op_get_from_addr,
do_encl_op_nop,
do_encl_eaccept,
do_encl_emodpe,
do_encl_init_tcs_page,
};
struct encl_op_header *op = (struct encl_op_header *)rdi;
if (op->type < ENCL_OP_MAX)
(*encl_op_array[op->type])(op);
}
| linux-master | tools/testing/selftests/sgx/test_encl.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#include <cpuid.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/auxv.h>
#include "defines.h"
#include "../kselftest_harness.h"
#include "main.h"
static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
/*
* Security Information (SECINFO) data structure needed by a few SGX
* instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
* about an enclave page. &enum sgx_secinfo_page_state specifies the
* secinfo flags used for page state.
*/
enum sgx_secinfo_page_state {
SGX_SECINFO_PENDING = (1 << 3),
SGX_SECINFO_MODIFIED = (1 << 4),
SGX_SECINFO_PR = (1 << 5),
};
struct vdso_symtab {
Elf64_Sym *elf_symtab;
const char *elf_symstrtab;
Elf64_Word *elf_hashtab;
};
static Elf64_Dyn *vdso_get_dyntab(void *addr)
{
Elf64_Ehdr *ehdr = addr;
Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
int i;
for (i = 0; i < ehdr->e_phnum; i++)
if (phdrtab[i].p_type == PT_DYNAMIC)
return addr + phdrtab[i].p_offset;
return NULL;
}
static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
{
int i;
for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
if (dyntab[i].d_tag == tag)
return addr + dyntab[i].d_un.d_ptr;
return NULL;
}
static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
{
Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
if (!symtab->elf_symtab)
return false;
symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
if (!symtab->elf_symstrtab)
return false;
symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
if (!symtab->elf_hashtab)
return false;
return true;
}
static inline int sgx2_supported(void)
{
unsigned int eax, ebx, ecx, edx;
__cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
return eax & 0x2;
}
static unsigned long elf_sym_hash(const char *name)
{
unsigned long h = 0, high;
while (*name) {
h = (h << 4) + *name++;
high = h & 0xf0000000;
if (high)
h ^= high >> 24;
h &= ~high;
}
return h;
}
static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
{
Elf64_Word bucketnum = symtab->elf_hashtab[0];
Elf64_Word *buckettab = &symtab->elf_hashtab[2];
Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
Elf64_Sym *sym;
Elf64_Word i;
for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
i = chaintab[i]) {
sym = &symtab->elf_symtab[i];
if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
return sym;
}
return NULL;
}
/*
* Return the offset in the enclave where the TCS segment can be found.
* The first RW segment loaded is the TCS.
*/
static off_t encl_get_tcs_offset(struct encl *encl)
{
int i;
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
return seg->offset;
}
return -1;
}
/*
* Return the offset in the enclave where the data segment can be found.
* The first RW segment loaded is the TCS, skip that to get info on the
* data segment.
*/
static off_t encl_get_data_offset(struct encl *encl)
{
int i;
for (i = 1; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (seg->prot == (PROT_READ | PROT_WRITE))
return seg->offset;
}
return -1;
}
FIXTURE(enclave) {
struct encl encl;
struct sgx_enclave_run run;
};
static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
struct __test_metadata *_metadata)
{
Elf64_Sym *sgx_enter_enclave_sym = NULL;
struct vdso_symtab symtab;
struct encl_segment *seg;
char maps_line[256];
FILE *maps_file;
unsigned int i;
void *addr;
if (!encl_load("test_encl.elf", encl, heap_size)) {
encl_delete(encl);
TH_LOG("Failed to load the test enclave.");
return false;
}
if (!encl_measure(encl))
goto err;
if (!encl_build(encl))
goto err;
/*
* An enclave consumer only must do this.
*/
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
EXPECT_NE(addr, MAP_FAILED);
if (addr == MAP_FAILED)
goto err;
}
/* Get vDSO base address */
addr = (void *)getauxval(AT_SYSINFO_EHDR);
if (!addr)
goto err;
if (!vdso_get_symtab(addr, &symtab))
goto err;
sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
if (!sgx_enter_enclave_sym)
goto err;
vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
return true;
err:
for (i = 0; i < encl->nr_segments; i++) {
seg = &encl->segment_tbl[i];
TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
}
maps_file = fopen("/proc/self/maps", "r");
if (maps_file != NULL) {
while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
maps_line[strlen(maps_line) - 1] = '\0';
if (strstr(maps_line, "/dev/sgx_enclave"))
TH_LOG("%s", maps_line);
}
fclose(maps_file);
}
TH_LOG("Failed to initialize the test enclave.");
encl_delete(encl);
return false;
}
FIXTURE_SETUP(enclave)
{
}
FIXTURE_TEARDOWN(enclave)
{
encl_delete(&self->encl);
}
#define ENCL_CALL(op, run, clobbered) \
({ \
int ret; \
if ((clobbered)) \
ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
EENTER, 0, 0, (run)); \
else \
ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
(run)); \
ret; \
})
#define EXPECT_EEXIT(run) \
do { \
EXPECT_EQ((run)->function, EEXIT); \
if ((run)->function != EEXIT) \
TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
(run)->exception_error_code, (run)->exception_addr); \
} while (0)
TEST_F(enclave, unclobbered_vdso)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
/*
* A section metric is concatenated in a way that @low bits 12-31 define the
* bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
* metric.
*/
static unsigned long sgx_calc_section_metric(unsigned int low,
unsigned int high)
{
return (low & GENMASK_ULL(31, 12)) +
((high & GENMASK_ULL(19, 0)) << 32);
}
/*
* Sum total available physical SGX memory across all EPC sections
*
* Return: total available physical SGX memory available on system
*/
static unsigned long get_total_epc_mem(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned long total_size = 0;
unsigned int type;
int section = 0;
while (true) {
__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
type = eax & SGX_CPUID_EPC_MASK;
if (type == SGX_CPUID_EPC_INVALID)
break;
if (type != SGX_CPUID_EPC_SECTION)
break;
total_size += sgx_calc_section_metric(ecx, edx);
section++;
}
return total_size;
}
TEST_F(enclave, unclobbered_vdso_oversubscribed)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
unsigned long total_mem;
total_mem = get_total_epc_mem();
ASSERT_NE(total_mem, 0);
ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
{
struct sgx_enclave_remove_pages remove_ioc;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_get_from_buf get_op;
struct encl_op_eaccept eaccept_op;
struct encl_op_put_to_buf put_op;
struct encl_segment *heap;
unsigned long total_mem;
int ret, errno_save;
unsigned long addr;
unsigned long i;
/*
* Create enclave with additional heap that is as big as all
* available physical SGX memory.
*/
total_mem = get_total_epc_mem();
ASSERT_NE(total_mem, 0);
TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
total_mem);
ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/* SGX2 is supported by kernel and hardware, test can proceed. */
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
/* Trim entire heap. */
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = heap->offset;
modt_ioc.length = heap->size;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
heap->size);
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, heap->size);
/* EACCEPT all removed pages. */
addr = self->encl.encl_base + heap->offset;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.header.type = ENCL_OP_EACCEPT;
TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
heap->size);
for (i = 0; i < heap->size; i += 4096) {
eaccept_op.epc_addr = addr + i;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
ASSERT_EQ(eaccept_op.ret, 0);
ASSERT_EQ(self->run.function, EEXIT);
}
/* Complete page removal. */
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = heap->offset;
remove_ioc.length = heap->size;
TH_LOG("Removing %zd bytes from enclave may take a while ...",
heap->size);
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, heap->size);
}
TEST_F(enclave, clobbered_vdso)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
struct sgx_enclave_run *run)
{
run->user_data = 0;
return 0;
}
TEST_F(enclave, clobbered_vdso_and_user_function)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
self->run.user_handler = (__u64)test_handler;
self->run.user_data = 0xdeadbeef;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
/*
* Sanity check that it is possible to enter either of the two hardcoded TCS
*/
TEST_F(enclave, tcs_entry)
{
struct encl_op_header op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
op.type = ENCL_OP_NOP;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Move to the next TCS. */
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
/*
* Second page of .data segment is used to test changing PTE permissions.
* This spans the local encl_buffer within the test enclave.
*
* 1) Start with a sanity check: a value is written to the target page within
* the enclave and read back to ensure target page can be written to.
* 2) Change PTE permissions (RW -> RO) of target page within enclave.
* 3) Repeat (1) - this time expecting a regular #PF communicated via the
* vDSO.
* 4) Change PTE permissions of target page within enclave back to be RW.
* 5) Repeat (1) by resuming enclave, now expected to be possible to write to
* and read from target page within enclave.
*/
TEST_F(enclave, pte_permissions)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
unsigned long data_start;
int ret;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) +
PAGE_SIZE;
/*
* Sanity check to ensure it is possible to write to page that will
* have its permissions manipulated.
*/
/* Write MAGIC to page */
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that it is the
* value previously written (MAGIC).
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Change PTE permissions of target page within the enclave */
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
if (ret)
perror("mprotect");
/*
* PTE permissions of target page changed to read-only, EPCM
* permissions unchanged (EPCM permissions are RW), attempt to
* write to the page, expecting a regular #PF.
*/
put_addr_op.value = MAGIC2;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x7);
EXPECT_EQ(self->run.exception_addr, data_start);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/*
* Change PTE permissions back to enable enclave to write to the
* target page and resume enclave - do not expect any exceptions this
* time.
*/
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
if (ret)
perror("mprotect");
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
0, ERESUME, 0, 0, &self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC2);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
/*
* Modifying permissions of TCS page should not be possible.
*/
TEST_F(enclave, tcs_permissions)
{
struct sgx_enclave_restrict_permissions ioc;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&ioc, 0, sizeof(ioc));
/*
* Ensure kernel supports needed ioctl() and system supports needed
* commands.
*/
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
errno_save = ret == -1 ? errno : 0;
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
ASSERT_EQ(ret, -1);
/* ret == -1 */
if (errno_save == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
else if (errno_save == ENODEV)
SKIP(return, "System does not support SGX2");
/*
* Attempt to make TCS page read-only. This is not allowed and
* should be prevented by the kernel.
*/
ioc.offset = encl_get_tcs_offset(&self->encl);
ioc.length = PAGE_SIZE;
ioc.permissions = SGX_SECINFO_R;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno_save, EINVAL);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 0);
}
/*
* Enclave page permission test.
*
* Modify and restore enclave page's EPCM (enclave) permissions from
* outside enclave (ENCLS[EMODPR] via kernel) as well as from within
* enclave (via ENCLU[EMODPE]). Check for page fault if
* VMA allows access but EPCM permissions do not.
*/
TEST_F(enclave, epcm_permissions)
{
struct sgx_enclave_restrict_permissions restrict_ioc;
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
struct encl_op_emodpe emodpe_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
/*
* Ensure kernel supports needed ioctl() and system supports needed
* commands.
*/
memset(&restrict_ioc, 0, sizeof(restrict_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
&restrict_ioc);
errno_save = ret == -1 ? errno : 0;
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
ASSERT_EQ(ret, -1);
/* ret == -1 */
if (errno_save == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
else if (errno_save == ENODEV)
SKIP(return, "System does not support SGX2");
/*
* Page that will have its permissions changed is the second data
* page in the .data segment. This forms part of the local encl_buffer
* within the enclave.
*
* At start of test @data_start should have EPCM as well as PTE and
* VMA permissions of RW.
*/
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
/*
* Sanity check that page at @data_start is writable before making
* any changes to page permissions.
*
* Start by writing MAGIC to test page.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that
* page is writable.
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Change EPCM permissions to read-only. Kernel still considers
* the page writable.
*/
memset(&restrict_ioc, 0, sizeof(restrict_ioc));
restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
restrict_ioc.length = PAGE_SIZE;
restrict_ioc.permissions = SGX_SECINFO_R;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
&restrict_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(restrict_ioc.result, 0);
EXPECT_EQ(restrict_ioc.count, 4096);
/*
* EPCM permissions changed from kernel, need to EACCEPT from enclave.
*/
eaccept_op.epc_addr = data_start;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/*
* EPCM permissions of page is now read-only, expect #PF
* on EPCM when attempting to write to page from within enclave.
*/
put_addr_op.value = MAGIC2;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8007);
EXPECT_EQ(self->run.exception_addr, data_start);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/*
* Received AEX but cannot return to enclave at same entrypoint,
* need different TCS from where EPCM permission can be made writable
* again.
*/
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
/*
* Enter enclave at new TCS to change EPCM permissions to be
* writable again and thus fix the page fault that triggered the
* AEX.
*/
emodpe_op.epc_addr = data_start;
emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
emodpe_op.header.type = ENCL_OP_EMODPE;
EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Attempt to return to main TCS to resume execution at faulting
* instruction, PTE should continue to allow writing to the page.
*/
self->run.tcs = self->encl.encl_base;
/*
* Wrong page permissions that caused original fault has
* now been fixed via EPCM permissions.
* Resume execution in main TCS to re-attempt the memory access.
*/
self->run.tcs = self->encl.encl_base;
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
ERESUME, 0, 0,
&self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC2);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
/*
* Test the addition of pages to an initialized enclave via writing to
* a page belonging to the enclave's address space but was not added
* during enclave creation.
*/
TEST_F(enclave, augment)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
void *addr;
int i;
if (!sgx2_supported())
SKIP(return, "SGX2 not supported");
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
/*
* Actual enclave size is expected to be larger than the loaded
* test enclave since enclave size must be a power of 2 in bytes
* and test_encl does not consume it all.
*/
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
/*
* Create memory mapping for the page that will be added. New
* memory mapping is for one page right after all existing
* mappings.
* Kernel will allow new mapping using any permissions if it
* falls into the enclave's address range but not backed
* by existing enclave pages.
*/
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/*
* Attempt to write to the new page from within enclave.
* Expected to fail since page is not (yet) part of the enclave.
* The first #PF will trigger the addition of the page to the
* enclave, but since the new page needs an EACCEPT from within the
* enclave before it can be used it would not be possible
* to successfully return to the failing instruction. This is the
* cause of the second #PF captured here having the SGX bit set,
* it is from hardware preventing the page from being used.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)addr;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
if (self->run.exception_error_code == 0x6) {
munmap(addr, PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EQ(self->run.exception_error_code, 0x8007);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/* Handle AEX by running EACCEPT from new entry point. */
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
eaccept_op.epc_addr = self->encl.encl_base + total_size;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/* Can now return to main TCS to resume execution. */
self->run.tcs = self->encl.encl_base;
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
ERESUME, 0, 0,
&self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory from newly added page that was just written to,
* confirming that data previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)addr;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, PAGE_SIZE);
}
/*
* Test for the addition of pages to an initialized enclave via a
* pre-emptive run of EACCEPT on page to be added.
*/
TEST_F(enclave, augment_via_eaccept)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
void *addr;
int i;
if (!sgx2_supported())
SKIP(return, "SGX2 not supported");
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
/*
* Actual enclave size is expected to be larger than the loaded
* test enclave since enclave size must be a power of 2 in bytes while
* test_encl does not consume it all.
*/
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
/*
* mmap() a page at end of existing enclave to be used for dynamic
* EPC page.
*
* Kernel will allow new mapping using any permissions if it
* falls into the enclave's address range but not backed
* by existing enclave pages.
*/
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/*
* Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
* without a #PF). All should be transparent to userspace.
*/
eaccept_op.epc_addr = self->encl.encl_base + total_size;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
if (self->run.exception_vector == 14 &&
self->run.exception_error_code == 4 &&
self->run.exception_addr == self->encl.encl_base + total_size) {
munmap(addr, PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/*
* New page should be accessible from within enclave - attempt to
* write to it.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)addr;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory from newly added page that was just written to,
* confirming that data previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)addr;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, PAGE_SIZE);
}
/*
* SGX2 page type modification test in two phases:
* Phase 1:
* Create a new TCS, consisting out of three new pages (stack page with regular
* page type, SSA page with regular page type, and TCS page with TCS page
* type) in an initialized enclave and run a simple workload within it.
* Phase 2:
* Remove the three pages added in phase 1, add a new regular page at the
* same address that previously hosted the TCS page and verify that it can
* be modified.
*/
TEST_F(enclave, tcs_create)
{
struct encl_op_init_tcs_page init_tcs_page_op;
struct sgx_enclave_remove_pages remove_ioc;
struct encl_op_get_from_addr get_addr_op;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_get_from_buf get_buf_op;
struct encl_op_put_to_buf put_buf_op;
void *addr, *tcs, *stack_end, *ssa;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
uint64_t val_64;
int errno_save;
int ret, i;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
_metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/*
* Add three regular pages via EAUG: one will be the TCS stack, one
* will be the TCS SSA, and one will be the new TCS. The stack and
* SSA will remain as regular pages, the TCS page will need its
* type changed after populated with needed data.
*/
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
/*
* Actual enclave size is expected to be larger than the loaded
* test enclave since enclave size must be a power of 2 in bytes while
* test_encl does not consume it all.
*/
EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
/*
* mmap() three pages at end of existing enclave to be used for the
* three new pages.
*/
addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
stack_end = (void *)self->encl.encl_base + total_size;
tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
/*
* Run EACCEPT on each new page to trigger the
* EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
*/
eaccept_op.epc_addr = (unsigned long)stack_end;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
if (self->run.exception_vector == 14 &&
self->run.exception_error_code == 4 &&
self->run.exception_addr == (unsigned long)stack_end) {
munmap(addr, 3 * PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)ssa;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)tcs;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/*
* Three new pages added to enclave. Now populate the TCS page with
* needed data. This should be done from within enclave. Provide
* the function that will do the actual data population with needed
* data.
*/
/*
* New TCS will use the "encl_dyn_entry" entrypoint that expects
* stack to begin in page before TCS page.
*/
val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
EXPECT_NE(val_64, 0);
init_tcs_page_op.tcs_page = (unsigned long)tcs;
init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
init_tcs_page_op.entry = val_64;
init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Change TCS page type to TCS. */
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = total_size + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
/* EACCEPT new TCS page from enclave. */
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/* Run workload from new TCS. */
self->run.tcs = (unsigned long)tcs;
/*
* Simple workload to write to data buffer and read value back.
*/
put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_buf_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_buf_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
EXPECT_EQ(get_buf_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Phase 2 of test:
* Remove pages associated with new TCS, create a regular page
* where TCS page used to be and verify it can be used as a regular
* page.
*/
/* Start page removal by requesting change of page type to PT_TRIM. */
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = total_size;
modt_ioc.length = 3 * PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
/*
* Enter enclave via TCS #1 and approve page removal by sending
* EACCEPT for each of three removed pages.
*/
self->run.tcs = self->encl.encl_base;
eaccept_op.epc_addr = (unsigned long)stack_end;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)ssa;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/* Send final ioctl() to complete page removal. */
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = total_size;
remove_ioc.length = 3 * PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
/*
* Enter enclave via TCS #1 and access location where TCS #3 was to
* trigger dynamic add of regular page at that location.
*/
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/*
* New page should be accessible from within enclave - write to it.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)tcs;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory from newly added page that was just written to,
* confirming that data previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)tcs;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, 3 * PAGE_SIZE);
}
/*
* Ensure sane behavior if user requests page removal, does not run
* EACCEPT from within enclave but still attempts to finalize page removal
* with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
* because the removal was not EACCEPTed from within the enclave.
*/
TEST_F(enclave, remove_added_page_no_eaccept)
{
struct sgx_enclave_remove_pages remove_ioc;
struct encl_op_get_from_addr get_addr_op;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_put_to_addr put_addr_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/*
* Page that will be removed is the second data page in the .data
* segment. This forms part of the local encl_buffer within the
* enclave.
*/
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
/*
* Sanity check that page at @data_start is writable before
* removing it.
*
* Start by writing MAGIC to test page.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that data
* previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Start page removal by requesting change of page type to PT_TRIM */
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
/* Skip EACCEPT */
/* Send final ioctl() to complete page removal */
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
remove_ioc.length = PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
/* Operation not permitted since EACCEPT was omitted. */
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno_save, EPERM);
EXPECT_EQ(remove_ioc.count, 0);
}
/*
* Request enclave page removal but instead of correctly following with
* EACCEPT a read attempt to page is made from within the enclave.
*/
TEST_F(enclave, remove_added_page_invalid_access)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct sgx_enclave_modify_types ioc;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&ioc, 0, sizeof(ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/*
* Page that will be removed is the second data page in the .data
* segment. This forms part of the local encl_buffer within the
* enclave.
*/
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
/*
* Sanity check that page at @data_start is writable before
* removing it.
*
* Start by writing MAGIC to test page.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that data
* previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Start page removal by requesting change of page type to PT_TRIM. */
memset(&ioc, 0, sizeof(ioc));
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
ioc.length = PAGE_SIZE;
ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 4096);
/*
* Read from page that was just removed.
*/
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
/*
* From kernel perspective the page is present but according to SGX the
* page should not be accessible so a #PF with SGX bit set is
* expected.
*/
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8005);
EXPECT_EQ(self->run.exception_addr, data_start);
}
/*
* Request enclave page removal and correctly follow with
* EACCEPT but do not follow with removal ioctl() but instead a read attempt
* to removed page is made from within the enclave.
*/
TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct sgx_enclave_modify_types ioc;
struct encl_op_eaccept eaccept_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&ioc, 0, sizeof(ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/*
* Page that will be removed is the second data page in the .data
* segment. This forms part of the local encl_buffer within the
* enclave.
*/
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
/*
* Sanity check that page at @data_start is writable before
* removing it.
*
* Start by writing MAGIC to test page.
*/
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that data
* previously written (MAGIC) is present.
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Start page removal by requesting change of page type to PT_TRIM. */
memset(&ioc, 0, sizeof(ioc));
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
ioc.length = PAGE_SIZE;
ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 4096);
eaccept_op.epc_addr = (unsigned long)data_start;
eaccept_op.ret = 0;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
/* Skip ioctl() to remove page. */
/*
* Read from page that was just removed.
*/
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
/*
* From kernel perspective the page is present but according to SGX the
* page should not be accessible so a #PF with SGX bit set is
* expected.
*/
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8005);
EXPECT_EQ(self->run.exception_addr, data_start);
}
TEST_F(enclave, remove_untouched_page)
{
struct sgx_enclave_remove_pages remove_ioc;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_eaccept eaccept_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
/*
* Hardware (SGX2) and kernel support is needed for this test. Start
* with check that test has a chance of succeeding.
*/
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
/*
* Invalid parameters were provided during sanity check,
* expect command to fail.
*/
EXPECT_EQ(ret, -1);
/* SGX2 is supported by kernel and hardware, test can proceed. */
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
/*
* Enter enclave via TCS #1 and approve page removal by sending
* EACCEPT for removed page.
*/
eaccept_op.epc_addr = data_start;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
remove_ioc.length = PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, 4096);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/sgx/main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#include <assert.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include "defines.h"
#include "main.h"
void encl_delete(struct encl *encl)
{
struct encl_segment *heap_seg;
if (encl->encl_base)
munmap((void *)encl->encl_base, encl->encl_size);
if (encl->bin)
munmap(encl->bin, encl->bin_size);
if (encl->fd)
close(encl->fd);
if (encl->segment_tbl) {
heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
munmap(heap_seg->src, heap_seg->size);
free(encl->segment_tbl);
}
memset(encl, 0, sizeof(*encl));
}
static bool encl_map_bin(const char *path, struct encl *encl)
{
struct stat sb;
void *bin;
int ret;
int fd;
fd = open(path, O_RDONLY);
if (fd == -1) {
perror("enclave executable open()");
return false;
}
ret = stat(path, &sb);
if (ret) {
perror("enclave executable stat()");
goto err;
}
bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (bin == MAP_FAILED) {
perror("enclave executable mmap()");
goto err;
}
encl->bin = bin;
encl->bin_size = sb.st_size;
close(fd);
return true;
err:
close(fd);
return false;
}
static bool encl_ioc_create(struct encl *encl)
{
struct sgx_secs *secs = &encl->secs;
struct sgx_enclave_create ioc;
int rc;
assert(encl->encl_base != 0);
memset(secs, 0, sizeof(*secs));
secs->ssa_frame_size = 1;
secs->attributes = SGX_ATTR_MODE64BIT;
secs->xfrm = 3;
secs->base = encl->encl_base;
secs->size = encl->encl_size;
ioc.src = (unsigned long)secs;
rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
if (rc) {
perror("SGX_IOC_ENCLAVE_CREATE failed");
munmap((void *)secs->base, encl->encl_size);
return false;
}
return true;
}
static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
{
struct sgx_enclave_add_pages ioc;
struct sgx_secinfo secinfo;
int rc;
memset(&secinfo, 0, sizeof(secinfo));
secinfo.flags = seg->flags;
ioc.src = (uint64_t)seg->src;
ioc.offset = seg->offset;
ioc.length = seg->size;
ioc.secinfo = (unsigned long)&secinfo;
if (seg->measure)
ioc.flags = SGX_PAGE_MEASURE;
else
ioc.flags = 0;
rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
if (rc < 0) {
perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
return false;
}
return true;
}
/*
* Parse the enclave code's symbol table to locate and return address of
* the provided symbol
*/
uint64_t encl_get_entry(struct encl *encl, const char *symbol)
{
Elf64_Shdr *sections;
Elf64_Sym *symtab;
Elf64_Ehdr *ehdr;
char *sym_names;
int num_sym;
int i;
ehdr = encl->bin;
sections = encl->bin + ehdr->e_shoff;
for (i = 0; i < ehdr->e_shnum; i++) {
if (sections[i].sh_type == SHT_SYMTAB) {
symtab = (Elf64_Sym *)((char *)encl->bin + sections[i].sh_offset);
num_sym = sections[i].sh_size / sections[i].sh_entsize;
break;
}
}
for (i = 0; i < ehdr->e_shnum; i++) {
if (sections[i].sh_type == SHT_STRTAB) {
sym_names = (char *)encl->bin + sections[i].sh_offset;
break;
}
}
for (i = 0; i < num_sym; i++) {
Elf64_Sym *sym = &symtab[i];
if (!strcmp(symbol, sym_names + sym->st_name))
return (uint64_t)sym->st_value;
}
return 0;
}
bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
{
const char device_path[] = "/dev/sgx_enclave";
struct encl_segment *seg;
Elf64_Phdr *phdr_tbl;
off_t src_offset;
Elf64_Ehdr *ehdr;
struct stat sb;
void *ptr;
int i, j;
int ret;
int fd = -1;
memset(encl, 0, sizeof(*encl));
fd = open(device_path, O_RDWR);
if (fd < 0) {
perror("Unable to open /dev/sgx_enclave");
goto err;
}
ret = stat(device_path, &sb);
if (ret) {
perror("device file stat()");
goto err;
}
ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
if (ptr == (void *)-1) {
perror("mmap for read");
goto err;
}
munmap(ptr, PAGE_SIZE);
#define ERR_MSG \
"mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
" Check that /dev does not have noexec set:\n" \
" \tmount | grep \"/dev .*noexec\"\n" \
" If so, remount it executable: mount -o remount,exec /dev\n\n"
ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
if (ptr == (void *)-1) {
fprintf(stderr, ERR_MSG);
goto err;
}
munmap(ptr, PAGE_SIZE);
encl->fd = fd;
if (!encl_map_bin(path, encl))
goto err;
ehdr = encl->bin;
phdr_tbl = encl->bin + ehdr->e_phoff;
encl->nr_segments = 1; /* one for the heap */
for (i = 0; i < ehdr->e_phnum; i++) {
Elf64_Phdr *phdr = &phdr_tbl[i];
if (phdr->p_type == PT_LOAD)
encl->nr_segments++;
}
encl->segment_tbl = calloc(encl->nr_segments,
sizeof(struct encl_segment));
if (!encl->segment_tbl)
goto err;
for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
Elf64_Phdr *phdr = &phdr_tbl[i];
unsigned int flags = phdr->p_flags;
if (phdr->p_type != PT_LOAD)
continue;
seg = &encl->segment_tbl[j];
if (!!(flags & ~(PF_R | PF_W | PF_X))) {
fprintf(stderr,
"%d has invalid segment flags 0x%02x.\n", i,
phdr->p_flags);
goto err;
}
if (j == 0 && flags != (PF_R | PF_W)) {
fprintf(stderr,
"TCS has invalid segment flags 0x%02x.\n",
phdr->p_flags);
goto err;
}
if (j == 0) {
src_offset = phdr->p_offset & PAGE_MASK;
encl->src = encl->bin + src_offset;
seg->prot = PROT_READ | PROT_WRITE;
seg->flags = SGX_PAGE_TYPE_TCS << 8;
} else {
seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0;
seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0;
seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0;
seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
}
seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
seg->src = encl->src + seg->offset;
seg->measure = true;
j++;
}
assert(j == encl->nr_segments - 1);
seg = &encl->segment_tbl[j];
seg->offset = encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size;
seg->size = heap_size;
seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
seg->prot = PROT_READ | PROT_WRITE;
seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
seg->measure = false;
if (seg->src == MAP_FAILED)
goto err;
encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size;
for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
encl->encl_size <<= 1;
return true;
err:
if (fd != -1)
close(fd);
encl_delete(encl);
return false;
}
static bool encl_map_area(struct encl *encl)
{
size_t encl_size = encl->encl_size;
void *area;
area = mmap(NULL, encl_size * 2, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (area == MAP_FAILED) {
perror("reservation mmap()");
return false;
}
encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1);
munmap(area, encl->encl_base - (uint64_t)area);
munmap((void *)(encl->encl_base + encl_size),
(uint64_t)area + encl_size - encl->encl_base);
return true;
}
bool encl_build(struct encl *encl)
{
struct sgx_enclave_init ioc;
int ret;
int i;
if (!encl_map_area(encl))
return false;
if (!encl_ioc_create(encl))
return false;
/*
* Pages must be added before mapping VMAs because their permissions
* cap the VMA permissions.
*/
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (!encl_ioc_add_pages(encl, seg))
return false;
}
ioc.sigstruct = (uint64_t)&encl->sigstruct;
ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
if (ret) {
perror("SGX_IOC_ENCLAVE_INIT failed");
return false;
}
return true;
}
| linux-master | tools/testing/selftests/sgx/load.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Real Time Clock Driver Test
* by: Benjamin Gaignard ([email protected])
*
* To build
* gcc rtctest_setdate.c -o rtctest_setdate
*/
#include <stdio.h>
#include <linux/rtc.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
static const char default_time[] = "00:00:00";
int main(int argc, char **argv)
{
int fd, retval;
struct rtc_time new, current;
const char *rtc, *date;
const char *time = default_time;
switch (argc) {
case 4:
time = argv[3];
/* FALLTHROUGH */
case 3:
date = argv[2];
rtc = argv[1];
break;
default:
fprintf(stderr, "usage: rtctest_setdate <rtcdev> <DD-MM-YYYY> [HH:MM:SS]\n");
return 1;
}
fd = open(rtc, O_RDONLY);
if (fd == -1) {
perror(rtc);
exit(errno);
}
sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year);
new.tm_mon -= 1;
new.tm_year -= 1900;
sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec);
fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n",
new.tm_mday, new.tm_mon + 1, new.tm_year + 1900,
new.tm_hour, new.tm_min, new.tm_sec);
/* Write the new date in RTC */
retval = ioctl(fd, RTC_SET_TIME, &new);
if (retval == -1) {
perror("RTC_SET_TIME ioctl");
close(fd);
exit(errno);
}
/* Read back */
retval = ioctl(fd, RTC_RD_TIME, ¤t);
if (retval == -1) {
perror("RTC_RD_TIME ioctl");
exit(errno);
}
fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
current.tm_mday, current.tm_mon + 1, current.tm_year + 1900,
current.tm_hour, current.tm_min, current.tm_sec);
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/rtc/setdate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock Driver Test Program
*
* Copyright (c) 2018 Alexandre Belloni <[email protected]>
*/
#include <errno.h>
#include <fcntl.h>
#include <linux/rtc.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#define NUM_UIE 3
#define ALARM_DELTA 3
#define READ_LOOP_DURATION_SEC 30
#define READ_LOOP_SLEEP_MS 11
static char *rtc_file = "/dev/rtc0";
FIXTURE(rtc) {
int fd;
};
FIXTURE_SETUP(rtc) {
self->fd = open(rtc_file, O_RDONLY);
}
FIXTURE_TEARDOWN(rtc) {
close(self->fd);
}
TEST_F(rtc, date_read) {
int rc;
struct rtc_time rtc_tm;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
/* Read the RTC time/date */
rc = ioctl(self->fd, RTC_RD_TIME, &rtc_tm);
ASSERT_NE(-1, rc);
TH_LOG("Current RTC date/time is %02d/%02d/%02d %02d:%02d:%02d.",
rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900,
rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
}
static time_t rtc_time_to_timestamp(struct rtc_time *rtc_time)
{
struct tm tm_time = {
.tm_sec = rtc_time->tm_sec,
.tm_min = rtc_time->tm_min,
.tm_hour = rtc_time->tm_hour,
.tm_mday = rtc_time->tm_mday,
.tm_mon = rtc_time->tm_mon,
.tm_year = rtc_time->tm_year,
};
return mktime(&tm_time);
}
static void nanosleep_with_retries(long ns)
{
struct timespec req = {
.tv_sec = 0,
.tv_nsec = ns,
};
struct timespec rem;
while (nanosleep(&req, &rem) != 0) {
req.tv_sec = rem.tv_sec;
req.tv_nsec = rem.tv_nsec;
}
}
TEST_F_TIMEOUT(rtc, date_read_loop, READ_LOOP_DURATION_SEC + 2) {
int rc;
long iter_count = 0;
struct rtc_time rtc_tm;
time_t start_rtc_read, prev_rtc_read;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
TH_LOG("Continuously reading RTC time for %ds (with %dms breaks after every read).",
READ_LOOP_DURATION_SEC, READ_LOOP_SLEEP_MS);
rc = ioctl(self->fd, RTC_RD_TIME, &rtc_tm);
ASSERT_NE(-1, rc);
start_rtc_read = rtc_time_to_timestamp(&rtc_tm);
prev_rtc_read = start_rtc_read;
do {
time_t rtc_read;
rc = ioctl(self->fd, RTC_RD_TIME, &rtc_tm);
ASSERT_NE(-1, rc);
rtc_read = rtc_time_to_timestamp(&rtc_tm);
/* Time should not go backwards */
ASSERT_LE(prev_rtc_read, rtc_read);
/* Time should not increase more then 1s at a time */
ASSERT_GE(prev_rtc_read + 1, rtc_read);
/* Sleep 11ms to avoid killing / overheating the RTC */
nanosleep_with_retries(READ_LOOP_SLEEP_MS * 1000000);
prev_rtc_read = rtc_read;
iter_count++;
} while (prev_rtc_read <= start_rtc_read + READ_LOOP_DURATION_SEC);
TH_LOG("Performed %ld RTC time reads.", iter_count);
}
TEST_F_TIMEOUT(rtc, uie_read, NUM_UIE + 2) {
int i, rc, irq = 0;
unsigned long data;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
/* Turn on update interrupts */
rc = ioctl(self->fd, RTC_UIE_ON, 0);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip update IRQs not supported.");
return;
}
for (i = 0; i < NUM_UIE; i++) {
/* This read will block */
rc = read(self->fd, &data, sizeof(data));
ASSERT_NE(-1, rc);
irq++;
}
EXPECT_EQ(NUM_UIE, irq);
rc = ioctl(self->fd, RTC_UIE_OFF, 0);
ASSERT_NE(-1, rc);
}
TEST_F(rtc, uie_select) {
int i, rc, irq = 0;
unsigned long data;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
/* Turn on update interrupts */
rc = ioctl(self->fd, RTC_UIE_ON, 0);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip update IRQs not supported.");
return;
}
for (i = 0; i < NUM_UIE; i++) {
struct timeval tv = { .tv_sec = 2 };
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
/* The select will wait until an RTC interrupt happens. */
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
/* This read won't block */
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
irq++;
}
EXPECT_EQ(NUM_UIE, irq);
rc = ioctl(self->fd, RTC_UIE_OFF, 0);
ASSERT_NE(-1, rc);
}
TEST_F(rtc, alarm_alm_set) {
struct timeval tv = { .tv_sec = ALARM_DELTA + 2 };
unsigned long data;
struct rtc_time tm;
fd_set readfds;
time_t secs, new;
int rc;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&tm) + ALARM_DELTA;
gmtime_r(&secs, (struct tm *)&tm);
rc = ioctl(self->fd, RTC_ALM_SET, &tm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_ALM_READ, &tm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d:%02d:%02d.",
tm.tm_hour, tm.tm_min, tm.tm_sec);
/* Enable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_ON, 0);
ASSERT_NE(-1, rc);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
TEST_F(rtc, alarm_wkalm_set) {
struct timeval tv = { .tv_sec = ALARM_DELTA + 2 };
struct rtc_wkalrm alarm = { 0 };
struct rtc_time tm;
unsigned long data;
fd_set readfds;
time_t secs, new;
int rc;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&alarm.time) + ALARM_DELTA;
gmtime_r(&secs, (struct tm *)&alarm.time);
alarm.enabled = 1;
rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
alarm.time.tm_mday, alarm.time.tm_mon + 1,
alarm.time.tm_year + 1900, alarm.time.tm_hour,
alarm.time.tm_min, alarm.time.tm_sec);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
TEST_F_TIMEOUT(rtc, alarm_alm_set_minute, 65) {
struct timeval tv = { .tv_sec = 62 };
unsigned long data;
struct rtc_time tm;
fd_set readfds;
time_t secs, new;
int rc;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
gmtime_r(&secs, (struct tm *)&tm);
rc = ioctl(self->fd, RTC_ALM_SET, &tm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_ALM_READ, &tm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d:%02d:%02d.",
tm.tm_hour, tm.tm_min, tm.tm_sec);
/* Enable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_ON, 0);
ASSERT_NE(-1, rc);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
TEST_F_TIMEOUT(rtc, alarm_wkalm_set_minute, 65) {
struct timeval tv = { .tv_sec = 62 };
struct rtc_wkalrm alarm = { 0 };
struct rtc_time tm;
unsigned long data;
fd_set readfds;
time_t secs, new;
int rc;
if (self->fd == -1 && errno == ENOENT)
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
gmtime_r(&secs, (struct tm *)&alarm.time);
alarm.enabled = 1;
rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
alarm.time.tm_mday, alarm.time.tm_mon + 1,
alarm.time.tm_year + 1900, alarm.time.tm_hour,
alarm.time.tm_min, alarm.time.tm_sec);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
static void __attribute__((constructor))
__constructor_order_last(void)
{
if (!__constructor_order)
__constructor_order = _CONSTRUCTOR_ORDER_BACKWARD;
}
int main(int argc, char **argv)
{
switch (argc) {
case 2:
rtc_file = argv[1];
/* FALLTHROUGH */
case 1:
break;
default:
fprintf(stderr, "usage: %s [rtcdev]\n", argv[0]);
return 1;
}
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/rtc/rtctest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Samsung Electrnoics
* Bongsu Jeon <[email protected]>
*
* Test code for nci
*/
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <pthread.h>
#include <linux/genetlink.h>
#include <sys/socket.h>
#include <linux/nfc.h>
#include "../kselftest_harness.h"
#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
#define NLA_PAYLOAD(len) ((len) - NLA_HDRLEN)
#define MAX_MSG_SIZE 1024
#define IOCTL_GET_NCIDEV_IDX 0
#define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
NFC_PROTO_MIFARE_MASK | \
NFC_PROTO_FELICA_MASK | \
NFC_PROTO_ISO14443_MASK | \
NFC_PROTO_ISO14443_B_MASK | \
NFC_PROTO_ISO15693_MASK)
const __u8 nci_reset_cmd[] = {0x20, 0x00, 0x01, 0x01};
const __u8 nci_init_cmd[] = {0x20, 0x01, 0x00};
const __u8 nci_rf_discovery_cmd[] = {0x21, 0x03, 0x09, 0x04, 0x00, 0x01,
0x01, 0x01, 0x02, 0x01, 0x06, 0x01};
const __u8 nci_init_cmd_v2[] = {0x20, 0x01, 0x02, 0x00, 0x00};
const __u8 nci_rf_disc_map_cmd[] = {0x21, 0x00, 0x07, 0x02, 0x04, 0x03,
0x02, 0x05, 0x03, 0x03};
const __u8 nci_rf_deact_cmd[] = {0x21, 0x06, 0x01, 0x00};
const __u8 nci_reset_rsp[] = {0x40, 0x00, 0x03, 0x00, 0x10, 0x01};
const __u8 nci_reset_rsp_v2[] = {0x40, 0x00, 0x01, 0x00};
const __u8 nci_reset_ntf[] = {0x60, 0x00, 0x09, 0x02, 0x01, 0x20, 0x0e,
0x04, 0x61, 0x00, 0x04, 0x02};
const __u8 nci_init_rsp[] = {0x40, 0x01, 0x14, 0x00, 0x02, 0x0e, 0x02,
0x00, 0x03, 0x01, 0x02, 0x03, 0x02, 0xc8,
0x00, 0xff, 0x10, 0x00, 0x0e, 0x12, 0x00,
0x00, 0x04};
const __u8 nci_init_rsp_v2[] = {0x40, 0x01, 0x1c, 0x00, 0x1a, 0x7e, 0x06,
0x00, 0x02, 0x92, 0x04, 0xff, 0xff, 0x01,
0x00, 0x40, 0x06, 0x00, 0x00, 0x01, 0x01,
0x00, 0x02, 0x00, 0x03, 0x01, 0x01, 0x06,
0x00, 0x80, 0x00};
const __u8 nci_rf_disc_map_rsp[] = {0x41, 0x00, 0x01, 0x00};
const __u8 nci_rf_disc_rsp[] = {0x41, 0x03, 0x01, 0x00};
const __u8 nci_rf_deact_rsp[] = {0x41, 0x06, 0x01, 0x00};
const __u8 nci_rf_deact_ntf[] = {0x61, 0x06, 0x02, 0x00, 0x00};
const __u8 nci_rf_activate_ntf[] = {0x61, 0x05, 0x1D, 0x01, 0x02, 0x04, 0x00,
0xFF, 0xFF, 0x0C, 0x44, 0x03, 0x07, 0x04,
0x62, 0x26, 0x11, 0x80, 0x1D, 0x80, 0x01,
0x20, 0x00, 0x00, 0x00, 0x06, 0x05, 0x75,
0x77, 0x81, 0x02, 0x80};
const __u8 nci_t4t_select_cmd[] = {0x00, 0x00, 0x0C, 0x00, 0xA4, 0x04, 0x00,
0x07, 0xD2, 0x76, 0x00, 0x00, 0x85, 0x01, 0x01};
const __u8 nci_t4t_select_cmd2[] = {0x00, 0x00, 0x07, 0x00, 0xA4, 0x00, 0x0C, 0x02,
0xE1, 0x03};
const __u8 nci_t4t_select_cmd3[] = {0x00, 0x00, 0x07, 0x00, 0xA4, 0x00, 0x0C, 0x02,
0xE1, 0x04};
const __u8 nci_t4t_read_cmd[] = {0x00, 0x00, 0x05, 0x00, 0xB0, 0x00, 0x00, 0x0F};
const __u8 nci_t4t_read_rsp[] = {0x00, 0x00, 0x11, 0x00, 0x0F, 0x20, 0x00, 0x3B,
0x00, 0x34, 0x04, 0x06, 0xE1, 0x04, 0x08, 0x00,
0x00, 0x00, 0x90, 0x00};
const __u8 nci_t4t_read_cmd2[] = {0x00, 0x00, 0x05, 0x00, 0xB0, 0x00, 0x00, 0x02};
const __u8 nci_t4t_read_rsp2[] = {0x00, 0x00, 0x04, 0x00, 0x0F, 0x90, 0x00};
const __u8 nci_t4t_read_cmd3[] = {0x00, 0x00, 0x05, 0x00, 0xB0, 0x00, 0x02, 0x0F};
const __u8 nci_t4t_read_rsp3[] = {0x00, 0x00, 0x11, 0xD1, 0x01, 0x0B, 0x54, 0x02,
0x65, 0x6E, 0x4E, 0x46, 0x43, 0x20, 0x54, 0x45,
0x53, 0x54, 0x90, 0x00};
const __u8 nci_t4t_rsp_ok[] = {0x00, 0x00, 0x02, 0x90, 0x00};
struct msgtemplate {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[MAX_MSG_SIZE];
};
static int create_nl_socket(void)
{
int fd;
struct sockaddr_nl local;
fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (fd < 0)
return -1;
memset(&local, 0, sizeof(local));
local.nl_family = AF_NETLINK;
if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0)
goto error;
return fd;
error:
close(fd);
return -1;
}
static int send_cmd_mt_nla(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, int nla_num, __u16 nla_type[],
void *nla_data[], int nla_len[], __u16 flags)
{
struct sockaddr_nl nladdr;
struct msgtemplate msg;
struct nlattr *na;
int cnt, prv_len;
int r, buflen;
char *buf;
msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
msg.n.nlmsg_type = nlmsg_type;
msg.n.nlmsg_flags = flags;
msg.n.nlmsg_seq = 0;
msg.n.nlmsg_pid = nlmsg_pid;
msg.g.cmd = genl_cmd;
msg.g.version = 0x1;
prv_len = 0;
for (cnt = 0; cnt < nla_num; cnt++) {
na = (struct nlattr *)(GENLMSG_DATA(&msg) + prv_len);
na->nla_type = nla_type[cnt];
na->nla_len = nla_len[cnt] + NLA_HDRLEN;
if (nla_len[cnt] > 0)
memcpy(NLA_DATA(na), nla_data[cnt], nla_len[cnt]);
prv_len = NLA_ALIGN(nla_len[cnt]) + NLA_HDRLEN;
msg.n.nlmsg_len += prv_len;
}
buf = (char *)&msg;
buflen = msg.n.nlmsg_len;
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *)&nladdr,
sizeof(nladdr))) < buflen) {
if (r > 0) {
buf += r;
buflen -= r;
} else if (errno != EAGAIN) {
return -1;
}
}
return 0;
}
static int send_get_nfc_family(int sd, __u32 pid)
{
__u16 nla_get_family_type = CTRL_ATTR_FAMILY_NAME;
void *nla_get_family_data;
int nla_get_family_len;
char family_name[100];
nla_get_family_len = strlen(NFC_GENL_NAME) + 1;
strcpy(family_name, NFC_GENL_NAME);
nla_get_family_data = family_name;
return send_cmd_mt_nla(sd, GENL_ID_CTRL, pid, CTRL_CMD_GETFAMILY,
1, &nla_get_family_type, &nla_get_family_data,
&nla_get_family_len, NLM_F_REQUEST);
}
static int get_family_id(int sd, __u32 pid, __u32 *event_group)
{
struct {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[512];
} ans;
struct nlattr *na;
int resp_len;
__u16 id;
int len;
int rc;
rc = send_get_nfc_family(sd, pid);
if (rc < 0)
return 0;
resp_len = recv(sd, &ans, sizeof(ans), 0);
if (ans.n.nlmsg_type == NLMSG_ERROR || resp_len < 0 ||
!NLMSG_OK(&ans.n, resp_len))
return 0;
len = 0;
resp_len = GENLMSG_PAYLOAD(&ans.n);
na = (struct nlattr *)GENLMSG_DATA(&ans);
while (len < resp_len) {
len += NLA_ALIGN(na->nla_len);
if (na->nla_type == CTRL_ATTR_FAMILY_ID) {
id = *(__u16 *)NLA_DATA(na);
} else if (na->nla_type == CTRL_ATTR_MCAST_GROUPS) {
struct nlattr *nested_na;
struct nlattr *group_na;
int group_attr_len;
int group_attr;
nested_na = (struct nlattr *)((char *)na + NLA_HDRLEN);
group_na = (struct nlattr *)((char *)nested_na + NLA_HDRLEN);
group_attr_len = 0;
for (group_attr = CTRL_ATTR_MCAST_GRP_UNSPEC;
group_attr < CTRL_ATTR_MCAST_GRP_MAX; group_attr++) {
if (group_na->nla_type == CTRL_ATTR_MCAST_GRP_ID) {
*event_group = *(__u32 *)((char *)group_na +
NLA_HDRLEN);
break;
}
group_attr_len += NLA_ALIGN(group_na->nla_len) +
NLA_HDRLEN;
if (group_attr_len >= nested_na->nla_len)
break;
group_na = (struct nlattr *)((char *)group_na +
NLA_ALIGN(group_na->nla_len));
}
}
na = (struct nlattr *)(GENLMSG_DATA(&ans) + len);
}
return id;
}
static int send_cmd_with_idx(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, int dev_id)
{
__u16 nla_type = NFC_ATTR_DEVICE_INDEX;
void *nla_data = &dev_id;
int nla_len = 4;
return send_cmd_mt_nla(sd, nlmsg_type, nlmsg_pid, genl_cmd, 1,
&nla_type, &nla_data, &nla_len, NLM_F_REQUEST);
}
static int get_nci_devid(int sd, __u16 fid, __u32 pid, int dev_id, struct msgtemplate *msg)
{
int rc, resp_len;
rc = send_cmd_with_idx(sd, fid, pid, NFC_CMD_GET_DEVICE, dev_id);
if (rc < 0) {
rc = -1;
goto error;
}
resp_len = recv(sd, msg, sizeof(*msg), 0);
if (resp_len < 0) {
rc = -2;
goto error;
}
if (msg->n.nlmsg_type == NLMSG_ERROR ||
!NLMSG_OK(&msg->n, resp_len)) {
rc = -3;
goto error;
}
return 0;
error:
return rc;
}
static __u8 get_dev_enable_state(struct msgtemplate *msg)
{
struct nlattr *na;
int resp_len;
int len;
resp_len = GENLMSG_PAYLOAD(&msg->n);
na = (struct nlattr *)GENLMSG_DATA(msg);
len = 0;
while (len < resp_len) {
len += NLA_ALIGN(na->nla_len);
if (na->nla_type == NFC_ATTR_DEVICE_POWERED)
return *(char *)NLA_DATA(na);
na = (struct nlattr *)(GENLMSG_DATA(msg) + len);
}
return resp_len;
}
FIXTURE(NCI) {
int virtual_nci_fd;
bool open_state;
int dev_idex;
bool isNCI2;
int proto;
__u32 pid;
__u16 fid;
int sd;
};
FIXTURE_VARIANT(NCI) {
bool isNCI2;
};
FIXTURE_VARIANT_ADD(NCI, NCI1_0) {
.isNCI2 = false,
};
FIXTURE_VARIANT_ADD(NCI, NCI2_0) {
.isNCI2 = true,
};
static void *virtual_dev_open(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_reset_cmd))
goto error;
if (memcmp(nci_reset_cmd, buf, len))
goto error;
write(dev_fd, nci_reset_rsp, sizeof(nci_reset_rsp));
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_init_cmd))
goto error;
if (memcmp(nci_init_cmd, buf, len))
goto error;
write(dev_fd, nci_init_rsp, sizeof(nci_init_rsp));
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_rf_disc_map_cmd))
goto error;
if (memcmp(nci_rf_disc_map_cmd, buf, len))
goto error;
write(dev_fd, nci_rf_disc_map_rsp, sizeof(nci_rf_disc_map_rsp));
return (void *)0;
error:
return (void *)-1;
}
static void *virtual_dev_open_v2(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_reset_cmd))
goto error;
if (memcmp(nci_reset_cmd, buf, len))
goto error;
write(dev_fd, nci_reset_rsp_v2, sizeof(nci_reset_rsp_v2));
write(dev_fd, nci_reset_ntf, sizeof(nci_reset_ntf));
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_init_cmd_v2))
goto error;
if (memcmp(nci_init_cmd_v2, buf, len))
goto error;
write(dev_fd, nci_init_rsp_v2, sizeof(nci_init_rsp_v2));
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_rf_disc_map_cmd))
goto error;
if (memcmp(nci_rf_disc_map_cmd, buf, len))
goto error;
write(dev_fd, nci_rf_disc_map_rsp, sizeof(nci_rf_disc_map_rsp));
return (void *)0;
error:
return (void *)-1;
}
FIXTURE_SETUP(NCI)
{
struct msgtemplate msg;
pthread_t thread_t;
__u32 event_group;
int status;
int rc;
self->open_state = false;
self->proto = VIRTUAL_NFC_PROTOCOLS;
self->isNCI2 = variant->isNCI2;
self->sd = create_nl_socket();
ASSERT_NE(self->sd, -1);
self->pid = getpid();
self->fid = get_family_id(self->sd, self->pid, &event_group);
ASSERT_NE(self->fid, -1);
self->virtual_nci_fd = open("/dev/virtual_nci", O_RDWR);
ASSERT_GT(self->virtual_nci_fd, -1);
rc = setsockopt(self->sd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &event_group,
sizeof(event_group));
ASSERT_NE(rc, -1);
rc = ioctl(self->virtual_nci_fd, IOCTL_GET_NCIDEV_IDX, &self->dev_idex);
ASSERT_EQ(rc, 0);
rc = get_nci_devid(self->sd, self->fid, self->pid, self->dev_idex, &msg);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_dev_enable_state(&msg), 0);
if (self->isNCI2)
rc = pthread_create(&thread_t, NULL, virtual_dev_open_v2,
(void *)&self->virtual_nci_fd);
else
rc = pthread_create(&thread_t, NULL, virtual_dev_open,
(void *)&self->virtual_nci_fd);
ASSERT_GT(rc, -1);
rc = send_cmd_with_idx(self->sd, self->fid, self->pid,
NFC_CMD_DEV_UP, self->dev_idex);
EXPECT_EQ(rc, 0);
pthread_join(thread_t, (void **)&status);
ASSERT_EQ(status, 0);
self->open_state = true;
}
static void *virtual_deinit(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_reset_cmd))
goto error;
if (memcmp(nci_reset_cmd, buf, len))
goto error;
write(dev_fd, nci_reset_rsp, sizeof(nci_reset_rsp));
return (void *)0;
error:
return (void *)-1;
}
static void *virtual_deinit_v2(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_reset_cmd))
goto error;
if (memcmp(nci_reset_cmd, buf, len))
goto error;
write(dev_fd, nci_reset_rsp_v2, sizeof(nci_reset_rsp_v2));
write(dev_fd, nci_reset_ntf, sizeof(nci_reset_ntf));
return (void *)0;
error:
return (void *)-1;
}
FIXTURE_TEARDOWN(NCI)
{
pthread_t thread_t;
int status;
int rc;
if (self->open_state) {
if (self->isNCI2)
rc = pthread_create(&thread_t, NULL,
virtual_deinit_v2,
(void *)&self->virtual_nci_fd);
else
rc = pthread_create(&thread_t, NULL, virtual_deinit,
(void *)&self->virtual_nci_fd);
ASSERT_GT(rc, -1);
rc = send_cmd_with_idx(self->sd, self->fid, self->pid,
NFC_CMD_DEV_DOWN, self->dev_idex);
EXPECT_EQ(rc, 0);
pthread_join(thread_t, (void **)&status);
ASSERT_EQ(status, 0);
}
close(self->sd);
close(self->virtual_nci_fd);
self->open_state = false;
}
TEST_F(NCI, init)
{
struct msgtemplate msg;
int rc;
rc = get_nci_devid(self->sd, self->fid, self->pid, self->dev_idex,
&msg);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_dev_enable_state(&msg), 1);
}
static void *virtual_poll_start(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_rf_discovery_cmd))
goto error;
if (memcmp(nci_rf_discovery_cmd, buf, len))
goto error;
write(dev_fd, nci_rf_disc_rsp, sizeof(nci_rf_disc_rsp));
return (void *)0;
error:
return (void *)-1;
}
static void *virtual_poll_stop(void *data)
{
char buf[258];
int dev_fd;
int len;
dev_fd = *(int *)data;
len = read(dev_fd, buf, 258);
if (len <= 0)
goto error;
if (len != sizeof(nci_rf_deact_cmd))
goto error;
if (memcmp(nci_rf_deact_cmd, buf, len))
goto error;
write(dev_fd, nci_rf_deact_rsp, sizeof(nci_rf_deact_rsp));
return (void *)0;
error:
return (void *)-1;
}
int start_polling(int dev_idx, int proto, int virtual_fd, int sd, int fid, int pid)
{
__u16 nla_start_poll_type[2] = {NFC_ATTR_DEVICE_INDEX,
NFC_ATTR_PROTOCOLS};
void *nla_start_poll_data[2] = {&dev_idx, &proto};
int nla_start_poll_len[2] = {4, 4};
pthread_t thread_t;
int status;
int rc;
rc = pthread_create(&thread_t, NULL, virtual_poll_start,
(void *)&virtual_fd);
if (rc < 0)
return rc;
rc = send_cmd_mt_nla(sd, fid, pid, NFC_CMD_START_POLL, 2, nla_start_poll_type,
nla_start_poll_data, nla_start_poll_len, NLM_F_REQUEST);
if (rc != 0)
return rc;
pthread_join(thread_t, (void **)&status);
return status;
}
int stop_polling(int dev_idx, int virtual_fd, int sd, int fid, int pid)
{
pthread_t thread_t;
int status;
int rc;
rc = pthread_create(&thread_t, NULL, virtual_poll_stop,
(void *)&virtual_fd);
if (rc < 0)
return rc;
rc = send_cmd_with_idx(sd, fid, pid,
NFC_CMD_STOP_POLL, dev_idx);
if (rc != 0)
return rc;
pthread_join(thread_t, (void **)&status);
return status;
}
TEST_F(NCI, start_poll)
{
int status;
status = start_polling(self->dev_idex, self->proto, self->virtual_nci_fd,
self->sd, self->fid, self->pid);
EXPECT_EQ(status, 0);
status = stop_polling(self->dev_idex, self->virtual_nci_fd, self->sd,
self->fid, self->pid);
EXPECT_EQ(status, 0);
}
int get_taginfo(int dev_idx, int sd, int fid, int pid)
{
struct {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[512];
} ans;
struct nlattr *na;
__u32 protocol;
int targetidx;
__u8 sel_res;
int resp_len;
int len;
__u16 tagid_type;
void *tagid_type_data;
int tagid_len;
tagid_type = NFC_ATTR_DEVICE_INDEX;
tagid_type_data = &dev_idx;
tagid_len = 4;
send_cmd_mt_nla(sd, fid, pid, NFC_CMD_GET_TARGET, 1, &tagid_type,
&tagid_type_data, &tagid_len, NLM_F_REQUEST | NLM_F_DUMP);
resp_len = recv(sd, &ans, sizeof(ans), 0);
if (ans.n.nlmsg_type == NLMSG_ERROR || resp_len < 0 ||
!NLMSG_OK(&ans.n, resp_len))
return -1;
resp_len = GENLMSG_PAYLOAD(&ans.n);
na = (struct nlattr *)GENLMSG_DATA(&ans);
len = 0;
targetidx = -1;
protocol = -1;
sel_res = -1;
while (len < resp_len) {
len += NLA_ALIGN(na->nla_len);
if (na->nla_type == NFC_ATTR_TARGET_INDEX)
targetidx = *(int *)((char *)na + NLA_HDRLEN);
else if (na->nla_type == NFC_ATTR_TARGET_SEL_RES)
sel_res = *(__u8 *)((char *)na + NLA_HDRLEN);
else if (na->nla_type == NFC_ATTR_PROTOCOLS)
protocol = *(__u32 *)((char *)na + NLA_HDRLEN);
na = (struct nlattr *)(GENLMSG_DATA(&ans) + len);
}
if (targetidx == -1 || sel_res != 0x20 || protocol != NFC_PROTO_ISO14443_MASK)
return -1;
return targetidx;
}
int connect_socket(int dev_idx, int target_idx)
{
struct sockaddr_nfc addr;
int sock;
int err = 0;
sock = socket(AF_NFC, SOCK_SEQPACKET, NFC_SOCKPROTO_RAW);
if (sock == -1)
return -1;
addr.sa_family = AF_NFC;
addr.dev_idx = dev_idx;
addr.target_idx = target_idx;
addr.nfc_protocol = NFC_PROTO_ISO14443;
err = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
if (err) {
close(sock);
return -1;
}
return sock;
}
int connect_tag(int dev_idx, int virtual_fd, int sd, int fid, int pid)
{
struct genlmsghdr *genlhdr;
struct nlattr *na;
char evt_data[255];
int target_idx;
int resp_len;
int evt_dev;
write(virtual_fd, nci_rf_activate_ntf, sizeof(nci_rf_activate_ntf));
resp_len = recv(sd, evt_data, sizeof(evt_data), 0);
if (resp_len < 0)
return -1;
genlhdr = (struct genlmsghdr *)((struct nlmsghdr *)evt_data + 1);
na = (struct nlattr *)(genlhdr + 1);
evt_dev = *(int *)((char *)na + NLA_HDRLEN);
if (dev_idx != evt_dev)
return -1;
target_idx = get_taginfo(dev_idx, sd, fid, pid);
if (target_idx == -1)
return -1;
return connect_socket(dev_idx, target_idx);
}
int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_len,
const __u8 *rsp, __u32 rsp_len)
{
char buf[256];
int len;
send(nfc_sock, &cmd[3], cmd_len - 3, 0);
len = read(virtual_fd, buf, cmd_len);
if (len < 0 || memcmp(buf, cmd, cmd_len))
return -1;
write(virtual_fd, rsp, rsp_len);
len = recv(nfc_sock, buf, rsp_len - 2, 0);
if (len < 0 || memcmp(&buf[1], &rsp[3], rsp_len - 3))
return -1;
return 0;
}
int read_tag(int nfc_sock, int virtual_fd)
{
if (read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_select_cmd,
sizeof(nci_t4t_select_cmd), nci_t4t_rsp_ok,
sizeof(nci_t4t_rsp_ok)))
return -1;
if (read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_select_cmd2,
sizeof(nci_t4t_select_cmd2), nci_t4t_rsp_ok,
sizeof(nci_t4t_rsp_ok)))
return -1;
if (read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_read_cmd,
sizeof(nci_t4t_read_cmd), nci_t4t_read_rsp,
sizeof(nci_t4t_read_rsp)))
return -1;
if (read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_select_cmd3,
sizeof(nci_t4t_select_cmd3), nci_t4t_rsp_ok,
sizeof(nci_t4t_rsp_ok)))
return -1;
if (read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_read_cmd2,
sizeof(nci_t4t_read_cmd2), nci_t4t_read_rsp2,
sizeof(nci_t4t_read_rsp2)))
return -1;
return read_write_nci_cmd(nfc_sock, virtual_fd, nci_t4t_read_cmd3,
sizeof(nci_t4t_read_cmd3), nci_t4t_read_rsp3,
sizeof(nci_t4t_read_rsp3));
}
static void *virtual_deactivate_proc(void *data)
{
int virtual_fd;
char buf[256];
int deactcmd_len;
int len;
virtual_fd = *(int *)data;
deactcmd_len = sizeof(nci_rf_deact_cmd);
len = read(virtual_fd, buf, deactcmd_len);
if (len != deactcmd_len || memcmp(buf, nci_rf_deact_cmd, deactcmd_len))
return (void *)-1;
write(virtual_fd, nci_rf_deact_rsp, sizeof(nci_rf_deact_rsp));
write(virtual_fd, nci_rf_deact_ntf, sizeof(nci_rf_deact_ntf));
return (void *)0;
}
int disconnect_tag(int nfc_sock, int virtual_fd)
{
pthread_t thread_t;
char buf[256];
int status;
int len;
send(nfc_sock, &nci_t4t_select_cmd3[3], sizeof(nci_t4t_select_cmd3) - 3, 0);
len = read(virtual_fd, buf, sizeof(nci_t4t_select_cmd3));
if (len < 0 || memcmp(buf, nci_t4t_select_cmd3, sizeof(nci_t4t_select_cmd3)))
return -1;
len = recv(nfc_sock, buf, sizeof(nci_t4t_rsp_ok), 0);
if (len != -1)
return -1;
status = pthread_create(&thread_t, NULL, virtual_deactivate_proc,
(void *)&virtual_fd);
close(nfc_sock);
pthread_join(thread_t, (void **)&status);
return status;
}
TEST_F(NCI, t4t_tag_read)
{
int nfc_sock;
int status;
status = start_polling(self->dev_idex, self->proto, self->virtual_nci_fd,
self->sd, self->fid, self->pid);
EXPECT_EQ(status, 0);
nfc_sock = connect_tag(self->dev_idex, self->virtual_nci_fd, self->sd,
self->fid, self->pid);
ASSERT_GT(nfc_sock, -1);
status = read_tag(nfc_sock, self->virtual_nci_fd);
ASSERT_EQ(status, 0);
status = disconnect_tag(nfc_sock, self->virtual_nci_fd);
EXPECT_EQ(status, 0);
}
TEST_F(NCI, deinit)
{
struct msgtemplate msg;
pthread_t thread_t;
int status;
int rc;
rc = get_nci_devid(self->sd, self->fid, self->pid, self->dev_idex,
&msg);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_dev_enable_state(&msg), 1);
if (self->isNCI2)
rc = pthread_create(&thread_t, NULL, virtual_deinit_v2,
(void *)&self->virtual_nci_fd);
else
rc = pthread_create(&thread_t, NULL, virtual_deinit,
(void *)&self->virtual_nci_fd);
ASSERT_GT(rc, -1);
rc = send_cmd_with_idx(self->sd, self->fid, self->pid,
NFC_CMD_DEV_DOWN, self->dev_idex);
EXPECT_EQ(rc, 0);
pthread_join(thread_t, (void **)&status);
self->open_state = 0;
ASSERT_EQ(status, 0);
rc = get_nci_devid(self->sd, self->fid, self->pid, self->dev_idex,
&msg);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_dev_enable_state(&msg), 0);
/* Test that operations that normally send packets to the driver
* don't cause issues when the device is already closed.
* Note: the send of NFC_CMD_DEV_UP itself still succeeds it's just
* that the device won't actually be up.
*/
close(self->virtual_nci_fd);
self->virtual_nci_fd = -1;
rc = send_cmd_with_idx(self->sd, self->fid, self->pid,
NFC_CMD_DEV_UP, self->dev_idex);
EXPECT_EQ(rc, 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/nci/nci_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
*
* Test code for seccomp bpf.
*/
#define _GNU_SOURCE
#include <sys/types.h>
/*
* glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
* we need to use the kernel's siginfo.h file and trick glibc
* into accepting it.
*/
#if !__GLIBC_PREREQ(2, 26)
# include <asm/siginfo.h>
# define __have_siginfo_t 1
# define __have_sigval_t 1
# define __have_sigevent_t 1
#endif
#include <errno.h>
#include <linux/filter.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/seccomp.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <linux/elf.h>
#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <sys/times.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <linux/kcmp.h>
#include <sys/resource.h>
#include <sys/capability.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <poll.h>
#include "../kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
/* Attempt to de-conflict with the selftests tree. */
#ifndef SKIP
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#ifndef PR_SET_PTRACER
# define PR_SET_PTRACER 0x59616d61
#endif
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
#endif
#ifndef PR_SECCOMP_EXT
#define PR_SECCOMP_EXT 43
#endif
#ifndef SECCOMP_EXT_ACT
#define SECCOMP_EXT_ACT 1
#endif
#ifndef SECCOMP_EXT_ACT_TSYNC
#define SECCOMP_EXT_ACT_TSYNC 1
#endif
#ifndef SECCOMP_MODE_STRICT
#define SECCOMP_MODE_STRICT 1
#endif
#ifndef SECCOMP_MODE_FILTER
#define SECCOMP_MODE_FILTER 2
#endif
#ifndef SECCOMP_RET_ALLOW
struct seccomp_data {
int nr;
__u32 arch;
__u64 instruction_pointer;
__u64 args[6];
};
#endif
#ifndef SECCOMP_RET_KILL_PROCESS
#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
#endif
#ifndef SECCOMP_RET_KILL
#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
#endif
#ifndef SECCOMP_RET_LOG
#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
#endif
#ifndef __NR_seccomp
# if defined(__i386__)
# define __NR_seccomp 354
# elif defined(__x86_64__)
# define __NR_seccomp 317
# elif defined(__arm__)
# define __NR_seccomp 383
# elif defined(__aarch64__)
# define __NR_seccomp 277
# elif defined(__riscv)
# define __NR_seccomp 277
# elif defined(__csky__)
# define __NR_seccomp 277
# elif defined(__loongarch__)
# define __NR_seccomp 277
# elif defined(__hppa__)
# define __NR_seccomp 338
# elif defined(__powerpc__)
# define __NR_seccomp 358
# elif defined(__s390__)
# define __NR_seccomp 348
# elif defined(__xtensa__)
# define __NR_seccomp 337
# elif defined(__sh__)
# define __NR_seccomp 372
# elif defined(__mc68000__)
# define __NR_seccomp 380
# else
# warning "seccomp syscall number unknown for this architecture"
# define __NR_seccomp 0xffff
# endif
#endif
#ifndef SECCOMP_SET_MODE_STRICT
#define SECCOMP_SET_MODE_STRICT 0
#endif
#ifndef SECCOMP_SET_MODE_FILTER
#define SECCOMP_SET_MODE_FILTER 1
#endif
#ifndef SECCOMP_GET_ACTION_AVAIL
#define SECCOMP_GET_ACTION_AVAIL 2
#endif
#ifndef SECCOMP_GET_NOTIF_SIZES
#define SECCOMP_GET_NOTIF_SIZES 3
#endif
#ifndef SECCOMP_FILTER_FLAG_TSYNC
#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
#endif
#ifndef SECCOMP_FILTER_FLAG_LOG
#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
#endif
#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#endif
#ifndef PTRACE_SECCOMP_GET_METADATA
#define PTRACE_SECCOMP_GET_METADATA 0x420d
struct seccomp_metadata {
__u64 filter_off; /* Input: which filter */
__u64 flags; /* Output: filter's flags */
};
#endif
#ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
#endif
#ifndef SECCOMP_RET_USER_NOTIF
#define SECCOMP_RET_USER_NOTIF 0x7fc00000U
#define SECCOMP_IOC_MAGIC '!'
#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
/* Flags for seccomp notification fd ioctl. */
#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
struct seccomp_notif_resp)
#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
struct seccomp_notif {
__u64 id;
__u32 pid;
__u32 flags;
struct seccomp_data data;
};
struct seccomp_notif_resp {
__u64 id;
__s64 val;
__s32 error;
__u32 flags;
};
struct seccomp_notif_sizes {
__u16 seccomp_notif;
__u16 seccomp_notif_resp;
__u16 seccomp_data;
};
#endif
#ifndef SECCOMP_IOCTL_NOTIF_ADDFD
/* On success, the return value is the remote process's added fd number */
#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
struct seccomp_notif_addfd)
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
struct seccomp_notif_addfd {
__u64 id;
__u32 flags;
__u32 srcfd;
__u32 newfd;
__u32 newfd_flags;
};
#endif
#ifndef SECCOMP_ADDFD_FLAG_SEND
#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
#endif
struct seccomp_notif_addfd_small {
__u64 id;
char weird[4];
};
#define SECCOMP_IOCTL_NOTIF_ADDFD_SMALL \
SECCOMP_IOW(3, struct seccomp_notif_addfd_small)
struct seccomp_notif_addfd_big {
union {
struct seccomp_notif_addfd addfd;
char buf[sizeof(struct seccomp_notif_addfd) + 8];
};
};
#define SECCOMP_IOCTL_NOTIF_ADDFD_BIG \
SECCOMP_IOWR(3, struct seccomp_notif_addfd_big)
#ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY
#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
#endif
#ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE
#define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001
#endif
#ifndef SECCOMP_FILTER_FLAG_TSYNC_ESRCH
#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
#endif
#ifndef SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV
#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5)
#endif
#ifndef seccomp
int seccomp(unsigned int op, unsigned int flags, void *args)
{
errno = 0;
return syscall(__NR_seccomp, op, flags, args);
}
#endif
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
#else
#error "wut? Unknown __BYTE_ORDER__?!"
#endif
#define SIBLING_EXIT_UNKILLED 0xbadbeef
#define SIBLING_EXIT_FAILURE 0xbadface
#define SIBLING_EXIT_NEWPRIVS 0xbadfeed
static int __filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2)
{
#ifdef __NR_kcmp
errno = 0;
return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2);
#else
errno = ENOSYS;
return -1;
#endif
}
/* Have TH_LOG report actual location filecmp() is used. */
#define filecmp(pid1, pid2, fd1, fd2) ({ \
int _ret; \
\
_ret = __filecmp(pid1, pid2, fd1, fd2); \
if (_ret != 0) { \
if (_ret < 0 && errno == ENOSYS) { \
TH_LOG("kcmp() syscall missing (test is less accurate)");\
_ret = 0; \
} \
} \
_ret; })
TEST(kcmp)
{
int ret;
ret = __filecmp(getpid(), getpid(), 1, 1);
EXPECT_EQ(ret, 0);
if (ret != 0 && errno == ENOSYS)
SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)");
}
TEST(mode_strict_support)
{
long ret;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SECCOMP");
}
syscall(__NR_exit, 0);
}
TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
{
long ret;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SECCOMP");
}
syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
NULL, NULL, NULL);
EXPECT_FALSE(true) {
TH_LOG("Unreachable!");
}
}
/* Note! This doesn't test no new privs behavior */
TEST(no_new_privs_support)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
EXPECT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
}
/* Tests kernel support by checking for a copy_from_user() fault on NULL. */
TEST(mode_filter_support)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
}
}
TEST(mode_filter_without_nnp)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
cap_t cap = cap_get_proc();
cap_flag_value_t is_cap_sys_admin = 0;
ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
ASSERT_LE(0, ret) {
TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
}
errno = 0;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
/* Succeeds with CAP_SYS_ADMIN, fails without */
cap_get_flag(cap, CAP_SYS_ADMIN, CAP_EFFECTIVE, &is_cap_sys_admin);
if (!is_cap_sys_admin) {
EXPECT_EQ(-1, ret);
EXPECT_EQ(EACCES, errno);
} else {
EXPECT_EQ(0, ret);
}
}
#define MAX_INSNS_PER_PATH 32768
TEST(filter_size_limits)
{
int i;
int count = BPF_MAXINSNS + 1;
struct sock_filter allow[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter *filter;
struct sock_fprog prog = { };
long ret;
filter = calloc(count, sizeof(*filter));
ASSERT_NE(NULL, filter);
for (i = 0; i < count; i++)
filter[i] = allow[0];
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
prog.filter = filter;
prog.len = count;
/* Too many filter instructions in a single filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_NE(0, ret) {
TH_LOG("Installing %d insn filter was allowed", prog.len);
}
/* One less is okay, though. */
prog.len -= 1;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
}
}
TEST(filter_chain_limits)
{
int i;
int count = BPF_MAXINSNS;
struct sock_filter allow[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter *filter;
struct sock_fprog prog = { };
long ret;
filter = calloc(count, sizeof(*filter));
ASSERT_NE(NULL, filter);
for (i = 0; i < count; i++)
filter[i] = allow[0];
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
prog.filter = filter;
prog.len = 1;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
prog.len = count;
/* Too many total filter instructions. */
for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
if (ret != 0)
break;
}
ASSERT_NE(0, ret) {
TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
i, count, i * (count + 4));
}
}
TEST(mode_filter_cannot_move_to_strict)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
TEST(mode_filter_get_seccomp)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
EXPECT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
EXPECT_EQ(2, ret);
}
TEST(ALLOW_all)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
}
TEST(empty_prog)
{
struct sock_filter filter[] = {
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
TEST(log_all)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
/* getppid() should succeed and be logged (no check for logging) */
EXPECT_EQ(parent, syscall(__NR_getppid));
}
TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, syscall(__NR_getpid)) {
TH_LOG("getpid() shouldn't ever return");
}
}
/* return code >= 0x80000000 is unused. */
TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, syscall(__NR_getpid)) {
TH_LOG("getpid() shouldn't ever return");
}
}
TEST_SIGNAL(KILL_all, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
}
TEST_SIGNAL(KILL_one, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
{
void *fatal_address;
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
/* Only both with lower 32-bit for now. */
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
(unsigned long)&fatal_address, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
struct tms timebuf;
clock_t clock = times(&timebuf);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_LE(clock, syscall(__NR_times, &timebuf));
/* times() should never return. */
EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
}
TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
{
#ifndef __NR_mmap2
int sysno = __NR_mmap;
#else
int sysno = __NR_mmap2;
#endif
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
/* Only both with lower 32-bit for now. */
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
int fd;
void *map1, *map2;
int page_size = sysconf(_SC_PAGESIZE);
ASSERT_LT(0, page_size);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
fd = open("/dev/zero", O_RDONLY);
ASSERT_NE(-1, fd);
EXPECT_EQ(parent, syscall(__NR_getppid));
map1 = (void *)syscall(sysno,
NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
EXPECT_NE(MAP_FAILED, map1);
/* mmap2() should never return. */
map2 = (void *)syscall(sysno,
NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
EXPECT_EQ(MAP_FAILED, map2);
/* The test failed, so clean up the resources. */
munmap(map1, page_size);
munmap(map2, page_size);
close(fd);
}
/* This is a thread task to die via seccomp filter violation. */
void *kill_thread(void *data)
{
bool die = (bool)data;
if (die) {
prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
return (void *)SIBLING_EXIT_FAILURE;
}
return (void *)SIBLING_EXIT_UNKILLED;
}
enum kill_t {
KILL_THREAD,
KILL_PROCESS,
RET_UNKNOWN
};
/* Prepare a thread that will kill itself or both of us. */
void kill_thread_or_group(struct __test_metadata *_metadata,
enum kill_t kill_how)
{
pthread_t thread;
void *status;
/* Kill only when calling __NR_prctl. */
struct sock_filter filter_thread[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog_thread = {
.len = (unsigned short)ARRAY_SIZE(filter_thread),
.filter = filter_thread,
};
int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA;
struct sock_filter filter_process[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, kill),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog_process = {
.len = (unsigned short)ARRAY_SIZE(filter_process),
.filter = filter_process,
};
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0,
kill_how == KILL_THREAD ? &prog_thread
: &prog_process));
/*
* Add the KILL_THREAD rule again to make sure that the KILL_PROCESS
* flag cannot be downgraded by a new filter.
*/
if (kill_how == KILL_PROCESS)
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread));
/* Start a thread that will exit immediately. */
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
ASSERT_EQ(0, pthread_join(thread, &status));
ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status);
/* Start a thread that will die immediately. */
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
ASSERT_EQ(0, pthread_join(thread, &status));
ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status);
/*
* If we get here, only the spawned thread died. Let the parent know
* the whole process didn't die (i.e. this thread, the spawner,
* stayed running).
*/
exit(42);
}
TEST(KILL_thread)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, KILL_THREAD);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If only the thread was killed, we'll see exit 42. */
ASSERT_TRUE(WIFEXITED(status));
ASSERT_EQ(42, WEXITSTATUS(status));
}
TEST(KILL_process)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, KILL_PROCESS);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If the entire process was killed, we'll see SIGSYS. */
ASSERT_TRUE(WIFSIGNALED(status));
ASSERT_EQ(SIGSYS, WTERMSIG(status));
}
TEST(KILL_unknown)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, RET_UNKNOWN);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If the entire process was killed, we'll see SIGSYS. */
EXPECT_TRUE(WIFSIGNALED(status)) {
TH_LOG("Unknown SECCOMP_RET is only killing the thread?");
}
ASSERT_EQ(SIGSYS, WTERMSIG(status));
}
/* TODO(wad) add 64-bit versus 32-bit arg tests. */
TEST(arg_out_of_range)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
#define ERRNO_FILTER(name, errno) \
struct sock_filter _read_filter_##name[] = { \
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \
offsetof(struct seccomp_data, nr)), \
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \
}; \
struct sock_fprog prog_##name = { \
.len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \
.filter = _read_filter_##name, \
}
/* Make sure basic errno values are correctly passed through a filter. */
TEST(ERRNO_valid)
{
ERRNO_FILTER(valid, E2BIG);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(E2BIG, errno);
}
/* Make sure an errno of zero is correctly handled by the arch code. */
TEST(ERRNO_zero)
{
ERRNO_FILTER(zero, 0);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* "errno" of 0 is ok. */
EXPECT_EQ(0, read(-1, NULL, 0));
}
/*
* The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller.
* This tests that the errno value gets capped correctly, fixed by
* 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO").
*/
TEST(ERRNO_capped)
{
ERRNO_FILTER(capped, 4096);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(4095, errno);
}
/*
* Filters are processed in reverse order: last applied is executed first.
* Since only the SECCOMP_RET_ACTION mask is tested for return values, the
* SECCOMP_RET_DATA mask results will follow the most recently applied
* matching filter return (and not the lowest or highest value).
*/
TEST(ERRNO_order)
{
ERRNO_FILTER(first, 11);
ERRNO_FILTER(second, 13);
ERRNO_FILTER(third, 12);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(12, errno);
}
FIXTURE(TRAP) {
struct sock_fprog prog;
};
FIXTURE_SETUP(TRAP)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
memset(&self->prog, 0, sizeof(self->prog));
self->prog.filter = malloc(sizeof(filter));
ASSERT_NE(NULL, self->prog.filter);
memcpy(self->prog.filter, filter, sizeof(filter));
self->prog.len = (unsigned short)ARRAY_SIZE(filter);
}
FIXTURE_TEARDOWN(TRAP)
{
if (self->prog.filter)
free(self->prog.filter);
}
TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
syscall(__NR_getpid);
}
/* Ensure that SIGSYS overrides SIG_IGN */
TEST_F_SIGNAL(TRAP, ign, SIGSYS)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
signal(SIGSYS, SIG_IGN);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
syscall(__NR_getpid);
}
static siginfo_t TRAP_info;
static volatile int TRAP_nr;
static void TRAP_action(int nr, siginfo_t *info, void *void_context)
{
memcpy(&TRAP_info, info, sizeof(TRAP_info));
TRAP_nr = nr;
}
TEST_F(TRAP, handler)
{
int ret, test;
struct sigaction act;
sigset_t mask;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
sigaddset(&mask, SIGSYS);
act.sa_sigaction = &TRAP_action;
act.sa_flags = SA_SIGINFO;
ret = sigaction(SIGSYS, &act, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("sigaction failed");
}
ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("sigprocmask failed");
}
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
TRAP_nr = 0;
memset(&TRAP_info, 0, sizeof(TRAP_info));
/* Expect the registers to be rolled back. (nr = error) may vary
* based on arch. */
ret = syscall(__NR_getpid);
/* Silence gcc warning about volatile. */
test = TRAP_nr;
EXPECT_EQ(SIGSYS, test);
struct local_sigsys {
void *_call_addr; /* calling user insn */
int _syscall; /* triggering system call number */
unsigned int _arch; /* AUDIT_ARCH_* of syscall */
} *sigsys = (struct local_sigsys *)
#ifdef si_syscall
&(TRAP_info.si_call_addr);
#else
&TRAP_info.si_pid;
#endif
EXPECT_EQ(__NR_getpid, sigsys->_syscall);
/* Make sure arch is non-zero. */
EXPECT_NE(0, sigsys->_arch);
EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
}
FIXTURE(precedence) {
struct sock_fprog allow;
struct sock_fprog log;
struct sock_fprog trace;
struct sock_fprog error;
struct sock_fprog trap;
struct sock_fprog kill;
};
FIXTURE_SETUP(precedence)
{
struct sock_filter allow_insns[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter log_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
};
struct sock_filter trace_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
};
struct sock_filter error_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
};
struct sock_filter trap_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
};
struct sock_filter kill_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
};
memset(self, 0, sizeof(*self));
#define FILTER_ALLOC(_x) \
self->_x.filter = malloc(sizeof(_x##_insns)); \
ASSERT_NE(NULL, self->_x.filter); \
memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
FILTER_ALLOC(allow);
FILTER_ALLOC(log);
FILTER_ALLOC(trace);
FILTER_ALLOC(error);
FILTER_ALLOC(trap);
FILTER_ALLOC(kill);
}
FIXTURE_TEARDOWN(precedence)
{
#define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
FILTER_FREE(allow);
FILTER_FREE(log);
FILTER_FREE(trace);
FILTER_FREE(error);
FILTER_FREE(trap);
FILTER_FREE(kill);
}
TEST_F(precedence, allow_ok)
{
pid_t parent, res = 0;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
/* Should work just fine. */
res = syscall(__NR_getppid);
EXPECT_EQ(parent, res);
}
TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
{
pid_t parent, res = 0;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
/* Should work just fine. */
res = syscall(__NR_getppid);
EXPECT_EQ(parent, res);
/* getpid() should never return. */
res = syscall(__NR_getpid);
EXPECT_EQ(0, res);
}
TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, errno_is_third)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, errno_is_third_in_any_order)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, trace_is_fourth)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* No ptracer */
EXPECT_EQ(-1, syscall(__NR_getpid));
}
TEST_F(precedence, trace_is_fourth_in_any_order)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* No ptracer */
EXPECT_EQ(-1, syscall(__NR_getpid));
}
TEST_F(precedence, log_is_fifth)
{
pid_t mypid, parent;
long ret;
mypid = getpid();
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* Should also work just fine */
EXPECT_EQ(mypid, syscall(__NR_getpid));
}
TEST_F(precedence, log_is_fifth_in_any_order)
{
pid_t mypid, parent;
long ret;
mypid = getpid();
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* Should also work just fine */
EXPECT_EQ(mypid, syscall(__NR_getpid));
}
#ifndef PTRACE_O_TRACESECCOMP
#define PTRACE_O_TRACESECCOMP 0x00000080
#endif
/* Catch the Ubuntu 12.04 value error. */
#if PTRACE_EVENT_SECCOMP != 7
#undef PTRACE_EVENT_SECCOMP
#endif
#ifndef PTRACE_EVENT_SECCOMP
#define PTRACE_EVENT_SECCOMP 7
#endif
#define PTRACE_EVENT_MASK(status) ((status) >> 16)
bool tracer_running;
void tracer_stop(int sig)
{
tracer_running = false;
}
typedef void tracer_func_t(struct __test_metadata *_metadata,
pid_t tracee, int status, void *args);
void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
tracer_func_t tracer_func, void *args, bool ptrace_syscall)
{
int ret = -1;
struct sigaction action = {
.sa_handler = tracer_stop,
};
/* Allow external shutdown. */
tracer_running = true;
ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
errno = 0;
while (ret == -1 && errno != EINVAL)
ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
ASSERT_EQ(0, ret) {
kill(tracee, SIGKILL);
}
/* Wait for attach stop */
wait(NULL);
ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ?
PTRACE_O_TRACESYSGOOD :
PTRACE_O_TRACESECCOMP);
ASSERT_EQ(0, ret) {
TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
kill(tracee, SIGKILL);
}
ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
tracee, NULL, 0);
ASSERT_EQ(0, ret);
/* Unblock the tracee */
ASSERT_EQ(1, write(fd, "A", 1));
ASSERT_EQ(0, close(fd));
/* Run until we're shut down. Must assert to stop execution. */
while (tracer_running) {
int status;
if (wait(&status) != tracee)
continue;
if (WIFSIGNALED(status)) {
/* Child caught a fatal signal. */
return;
}
if (WIFEXITED(status)) {
/* Child exited with code. */
return;
}
/* Check if we got an expected event. */
ASSERT_EQ(WIFCONTINUED(status), false);
ASSERT_EQ(WIFSTOPPED(status), true);
ASSERT_EQ(WSTOPSIG(status) & SIGTRAP, SIGTRAP) {
TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status));
}
tracer_func(_metadata, tracee, status, args);
ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
tracee, NULL, 0);
ASSERT_EQ(0, ret);
}
/* Directly report the status of our test harness results. */
syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
}
/* Common tracer setup/teardown functions. */
void cont_handler(int num)
{ }
pid_t setup_trace_fixture(struct __test_metadata *_metadata,
tracer_func_t func, void *args, bool ptrace_syscall)
{
char sync;
int pipefd[2];
pid_t tracer_pid;
pid_t tracee = getpid();
/* Setup a pipe for clean synchronization. */
ASSERT_EQ(0, pipe(pipefd));
/* Fork a child which we'll promote to tracer */
tracer_pid = fork();
ASSERT_LE(0, tracer_pid);
signal(SIGALRM, cont_handler);
if (tracer_pid == 0) {
close(pipefd[0]);
start_tracer(_metadata, pipefd[1], tracee, func, args,
ptrace_syscall);
syscall(__NR_exit, 0);
}
close(pipefd[1]);
prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
read(pipefd[0], &sync, 1);
close(pipefd[0]);
return tracer_pid;
}
void teardown_trace_fixture(struct __test_metadata *_metadata,
pid_t tracer)
{
if (tracer) {
int status;
/*
* Extract the exit code from the other process and
* adopt it for ourselves in case its asserts failed.
*/
ASSERT_EQ(0, kill(tracer, SIGUSR1));
ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
if (WEXITSTATUS(status))
_metadata->passed = 0;
}
}
/* "poke" tracer arguments and function. */
struct tracer_args_poke_t {
unsigned long poke_addr;
};
void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
void *args)
{
int ret;
unsigned long msg;
struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
/* If this fails, don't try to recover. */
ASSERT_EQ(0x1001, msg) {
kill(tracee, SIGKILL);
}
/*
* Poke in the message.
* Registers are not touched to try to keep this relatively arch
* agnostic.
*/
ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
EXPECT_EQ(0, ret);
}
FIXTURE(TRACE_poke) {
struct sock_fprog prog;
pid_t tracer;
long poked;
struct tracer_args_poke_t tracer_args;
};
FIXTURE_SETUP(TRACE_poke)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
self->poked = 0;
memset(&self->prog, 0, sizeof(self->prog));
self->prog.filter = malloc(sizeof(filter));
ASSERT_NE(NULL, self->prog.filter);
memcpy(self->prog.filter, filter, sizeof(filter));
self->prog.len = (unsigned short)ARRAY_SIZE(filter);
/* Set up tracer args. */
self->tracer_args.poke_addr = (unsigned long)&self->poked;
/* Launch tracer. */
self->tracer = setup_trace_fixture(_metadata, tracer_poke,
&self->tracer_args, false);
}
FIXTURE_TEARDOWN(TRACE_poke)
{
teardown_trace_fixture(_metadata, self->tracer);
if (self->prog.filter)
free(self->prog.filter);
}
TEST_F(TRACE_poke, read_has_side_effects)
{
ssize_t ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, self->poked);
ret = read(-1, NULL, 0);
EXPECT_EQ(-1, ret);
EXPECT_EQ(0x1001, self->poked);
}
TEST_F(TRACE_poke, getpid_runs_normally)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, self->poked);
EXPECT_NE(0, syscall(__NR_getpid));
EXPECT_EQ(0, self->poked);
}
#if defined(__x86_64__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).orig_rax
# define SYSCALL_RET(_regs) (_regs).rax
#elif defined(__i386__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).orig_eax
# define SYSCALL_RET(_regs) (_regs).eax
#elif defined(__arm__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).ARM_r7
# ifndef PTRACE_SET_SYSCALL
# define PTRACE_SET_SYSCALL 23
# endif
# define SYSCALL_NUM_SET(_regs, _nr) \
EXPECT_EQ(0, ptrace(PTRACE_SET_SYSCALL, tracee, NULL, _nr))
# define SYSCALL_RET(_regs) (_regs).ARM_r0
#elif defined(__aarch64__)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM(_regs) (_regs).regs[8]
# ifndef NT_ARM_SYSTEM_CALL
# define NT_ARM_SYSTEM_CALL 0x404
# endif
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
struct iovec __v; \
typeof(_nr) __nr = (_nr); \
__v.iov_base = &__nr; \
__v.iov_len = sizeof(__nr); \
EXPECT_EQ(0, ptrace(PTRACE_SETREGSET, tracee, \
NT_ARM_SYSTEM_CALL, &__v)); \
} while (0)
# define SYSCALL_RET(_regs) (_regs).regs[0]
#elif defined(__loongarch__)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM(_regs) (_regs).regs[11]
# define SYSCALL_RET(_regs) (_regs).regs[4]
#elif defined(__riscv) && __riscv_xlen == 64
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).a7
# define SYSCALL_RET(_regs) (_regs).a0
#elif defined(__csky__)
# define ARCH_REGS struct pt_regs
# if defined(__CSKYABIV2__)
# define SYSCALL_NUM(_regs) (_regs).regs[3]
# else
# define SYSCALL_NUM(_regs) (_regs).regs[9]
# endif
# define SYSCALL_RET(_regs) (_regs).a0
#elif defined(__hppa__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).gr[20]
# define SYSCALL_RET(_regs) (_regs).gr[28]
#elif defined(__powerpc__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).gpr[0]
# define SYSCALL_RET(_regs) (_regs).gpr[3]
# define SYSCALL_RET_SET(_regs, _val) \
do { \
typeof(_val) _result = (_val); \
if ((_regs.trap & 0xfff0) == 0x3000) { \
/* \
* scv 0 system call uses -ve result \
* for error, so no need to adjust. \
*/ \
SYSCALL_RET(_regs) = _result; \
} else { \
/* \
* A syscall error is signaled by the \
* CR0 SO bit and the code is stored as \
* a positive value. \
*/ \
if (_result < 0) { \
SYSCALL_RET(_regs) = -_result; \
(_regs).ccr |= 0x10000000; \
} else { \
SYSCALL_RET(_regs) = _result; \
(_regs).ccr &= ~0x10000000; \
} \
} \
} while (0)
# define SYSCALL_RET_SET_ON_PTRACE_EXIT
#elif defined(__s390__)
# define ARCH_REGS s390_regs
# define SYSCALL_NUM(_regs) (_regs).gprs[2]
# define SYSCALL_RET_SET(_regs, _val) \
TH_LOG("Can't modify syscall return on this architecture")
#elif defined(__mips__)
# include <asm/unistd_nr_n32.h>
# include <asm/unistd_nr_n64.h>
# include <asm/unistd_nr_o32.h>
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) \
({ \
typeof((_regs).regs[2]) _nr; \
if ((_regs).regs[2] == __NR_O32_Linux) \
_nr = (_regs).regs[4]; \
else \
_nr = (_regs).regs[2]; \
_nr; \
})
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
if ((_regs).regs[2] == __NR_O32_Linux) \
(_regs).regs[4] = _nr; \
else \
(_regs).regs[2] = _nr; \
} while (0)
# define SYSCALL_RET_SET(_regs, _val) \
TH_LOG("Can't modify syscall return on this architecture")
#elif defined(__xtensa__)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM(_regs) (_regs).syscall
/*
* On xtensa syscall return value is in the register
* a2 of the current window which is not fixed.
*/
#define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2]
#elif defined(__sh__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).regs[3]
# define SYSCALL_RET(_regs) (_regs).regs[0]
#elif defined(__mc68000__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).orig_d0
# define SYSCALL_RET(_regs) (_regs).d0
#else
# error "Do not know how to find your architecture's registers and syscalls"
#endif
/*
* Most architectures can change the syscall by just updating the
* associated register. This is the default if not defined above.
*/
#ifndef SYSCALL_NUM_SET
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
SYSCALL_NUM(_regs) = (_nr); \
} while (0)
#endif
/*
* Most architectures can change the syscall return value by just
* writing to the SYSCALL_RET register. This is the default if not
* defined above. If an architecture cannot set the return value
* (for example when the syscall and return value register is
* shared), report it with TH_LOG() in an arch-specific definition
* of SYSCALL_RET_SET() above, and leave SYSCALL_RET undefined.
*/
#if !defined(SYSCALL_RET) && !defined(SYSCALL_RET_SET)
# error "One of SYSCALL_RET or SYSCALL_RET_SET is needed for this arch"
#endif
#ifndef SYSCALL_RET_SET
# define SYSCALL_RET_SET(_regs, _val) \
do { \
SYSCALL_RET(_regs) = (_val); \
} while (0)
#endif
/* When the syscall return can't be changed, stub out the tests for it. */
#ifndef SYSCALL_RET
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
#else
# define EXPECT_SYSCALL_RETURN(val, action) \
do { \
errno = 0; \
if (val < 0) { \
EXPECT_EQ(-1, action); \
EXPECT_EQ(-(val), errno); \
} else { \
EXPECT_EQ(val, action); \
} \
} while (0)
#endif
/*
* Some architectures (e.g. powerpc) can only set syscall
* return values on syscall exit during ptrace.
*/
const bool ptrace_entry_set_syscall_nr = true;
const bool ptrace_entry_set_syscall_ret =
#ifndef SYSCALL_RET_SET_ON_PTRACE_EXIT
true;
#else
false;
#endif
/*
* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
* architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
*/
#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__)
# define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs))
# define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs))
#else
# define ARCH_GETREGS(_regs) ({ \
struct iovec __v; \
__v.iov_base = &(_regs); \
__v.iov_len = sizeof(_regs); \
ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &__v); \
})
# define ARCH_SETREGS(_regs) ({ \
struct iovec __v; \
__v.iov_base = &(_regs); \
__v.iov_len = sizeof(_regs); \
ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &__v); \
})
#endif
/* Architecture-specific syscall fetching routine. */
int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
{
ARCH_REGS regs;
EXPECT_EQ(0, ARCH_GETREGS(regs)) {
return -1;
}
return SYSCALL_NUM(regs);
}
/* Architecture-specific syscall changing routine. */
void __change_syscall(struct __test_metadata *_metadata,
pid_t tracee, long *syscall, long *ret)
{
ARCH_REGS orig, regs;
/* Do not get/set registers if we have nothing to do. */
if (!syscall && !ret)
return;
EXPECT_EQ(0, ARCH_GETREGS(regs)) {
return;
}
orig = regs;
if (syscall)
SYSCALL_NUM_SET(regs, *syscall);
if (ret)
SYSCALL_RET_SET(regs, *ret);
/* Flush any register changes made. */
if (memcmp(&orig, ®s, sizeof(orig)) != 0)
EXPECT_EQ(0, ARCH_SETREGS(regs));
}
/* Change only syscall number. */
void change_syscall_nr(struct __test_metadata *_metadata,
pid_t tracee, long syscall)
{
__change_syscall(_metadata, tracee, &syscall, NULL);
}
/* Change syscall return value (and set syscall number to -1). */
void change_syscall_ret(struct __test_metadata *_metadata,
pid_t tracee, long ret)
{
long syscall = -1;
__change_syscall(_metadata, tracee, &syscall, &ret);
}
void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee,
int status, void *args)
{
int ret;
unsigned long msg;
EXPECT_EQ(PTRACE_EVENT_MASK(status), PTRACE_EVENT_SECCOMP) {
TH_LOG("Unexpected ptrace event: %d", PTRACE_EVENT_MASK(status));
return;
}
/* Make sure we got the right message. */
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
/* Validate and take action on expected syscalls. */
switch (msg) {
case 0x1002:
/* change getpid to getppid. */
EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
change_syscall_nr(_metadata, tracee, __NR_getppid);
break;
case 0x1003:
/* skip gettid with valid return code. */
EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
change_syscall_ret(_metadata, tracee, 45000);
break;
case 0x1004:
/* skip openat with error. */
EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
change_syscall_ret(_metadata, tracee, -ESRCH);
break;
case 0x1005:
/* do nothing (allow getppid) */
EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
break;
default:
EXPECT_EQ(0, msg) {
TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
kill(tracee, SIGKILL);
}
}
}
FIXTURE(TRACE_syscall) {
struct sock_fprog prog;
pid_t tracer, mytid, mypid, parent;
long syscall_nr;
};
void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
int status, void *args)
{
int ret;
unsigned long msg;
static bool entry;
long syscall_nr_val, syscall_ret_val;
long *syscall_nr = NULL, *syscall_ret = NULL;
FIXTURE_DATA(TRACE_syscall) *self = args;
EXPECT_EQ(WSTOPSIG(status) & 0x80, 0x80) {
TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status));
return;
}
/*
* The traditional way to tell PTRACE_SYSCALL entry/exit
* is by counting.
*/
entry = !entry;
/* Make sure we got an appropriate message. */
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY
: PTRACE_EVENTMSG_SYSCALL_EXIT, msg);
/*
* Some architectures only support setting return values during
* syscall exit under ptrace, and on exit the syscall number may
* no longer be available. Therefore, save the initial sycall
* number here, so it can be examined during both entry and exit
* phases.
*/
if (entry)
self->syscall_nr = get_syscall(_metadata, tracee);
/*
* Depending on the architecture's syscall setting abilities, we
* pick which things to set during this phase (entry or exit).
*/
if (entry == ptrace_entry_set_syscall_nr)
syscall_nr = &syscall_nr_val;
if (entry == ptrace_entry_set_syscall_ret)
syscall_ret = &syscall_ret_val;
/* Now handle the actual rewriting cases. */
switch (self->syscall_nr) {
case __NR_getpid:
syscall_nr_val = __NR_getppid;
/* Never change syscall return for this case. */
syscall_ret = NULL;
break;
case __NR_gettid:
syscall_nr_val = -1;
syscall_ret_val = 45000;
break;
case __NR_openat:
syscall_nr_val = -1;
syscall_ret_val = -ESRCH;
break;
default:
/* Unhandled, do nothing. */
return;
}
__change_syscall(_metadata, tracee, syscall_nr, syscall_ret);
}
FIXTURE_VARIANT(TRACE_syscall) {
/*
* All of the SECCOMP_RET_TRACE behaviors can be tested with either
* SECCOMP_RET_TRACE+PTRACE_CONT or plain ptrace()+PTRACE_SYSCALL.
* This indicates if we should use SECCOMP_RET_TRACE (false), or
* ptrace (true).
*/
bool use_ptrace;
};
FIXTURE_VARIANT_ADD(TRACE_syscall, ptrace) {
.use_ptrace = true,
};
FIXTURE_VARIANT_ADD(TRACE_syscall, seccomp) {
.use_ptrace = false,
};
FIXTURE_SETUP(TRACE_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Prepare some testable syscall results. */
self->mytid = syscall(__NR_gettid);
ASSERT_GT(self->mytid, 0);
ASSERT_NE(self->mytid, 1) {
TH_LOG("Running this test as init is not supported. :)");
}
self->mypid = getpid();
ASSERT_GT(self->mypid, 0);
ASSERT_EQ(self->mytid, self->mypid);
self->parent = getppid();
ASSERT_GT(self->parent, 0);
ASSERT_NE(self->parent, self->mypid);
/* Launch tracer. */
self->tracer = setup_trace_fixture(_metadata,
variant->use_ptrace ? tracer_ptrace
: tracer_seccomp,
self, variant->use_ptrace);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
/* Do not install seccomp rewrite filters, as we'll use ptrace instead. */
if (variant->use_ptrace)
return;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
}
FIXTURE_TEARDOWN(TRACE_syscall)
{
teardown_trace_fixture(_metadata, self->tracer);
}
TEST(negative_ENOSYS)
{
#if defined(__arm__)
SKIP(return, "arm32 does not support calling syscall -1");
#endif
/*
* There should be no difference between an "internal" skip
* and userspace asking for syscall "-1".
*/
errno = 0;
EXPECT_EQ(-1, syscall(-1));
EXPECT_EQ(errno, ENOSYS);
/* And no difference for "still not valid but not -1". */
errno = 0;
EXPECT_EQ(-1, syscall(-101));
EXPECT_EQ(errno, ENOSYS);
}
TEST_F(TRACE_syscall, negative_ENOSYS)
{
negative_ENOSYS(_metadata);
}
TEST_F(TRACE_syscall, syscall_allowed)
{
/* getppid works as expected (no changes). */
EXPECT_EQ(self->parent, syscall(__NR_getppid));
EXPECT_NE(self->mypid, syscall(__NR_getppid));
}
TEST_F(TRACE_syscall, syscall_redirected)
{
/* getpid has been redirected to getppid as expected. */
EXPECT_EQ(self->parent, syscall(__NR_getpid));
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST_F(TRACE_syscall, syscall_errno)
{
/* Tracer should skip the open syscall, resulting in ESRCH. */
EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
}
TEST_F(TRACE_syscall, syscall_faked)
{
/* Tracer skips the gettid syscall and store altered return value. */
EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F_SIGNAL(TRACE_syscall, kill_immediate, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_mknodat, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Install "kill on mknodat" filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
/* This should immediately die with SIGSYS, regardless of tracer. */
EXPECT_EQ(-1, syscall(__NR_mknodat, -1, NULL, 0, 0));
}
TEST_F(TRACE_syscall, skip_after)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Install additional "errno on getppid" filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
/* Tracer will redirect getpid to getppid, and we should see EPERM. */
errno = 0;
EXPECT_EQ(-1, syscall(__NR_getpid));
EXPECT_EQ(EPERM, errno);
}
TEST_F_SIGNAL(TRACE_syscall, kill_after, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Install additional "death on getppid" filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
/* Tracer will redirect getpid to getppid, and we should die. */
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST(seccomp_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Reject insane operation. */
ret = seccomp(-1, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject crazy op value!");
}
/* Reject strict with flags or pointer. */
ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject mode strict with flags!");
}
ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject mode strict with uargs!");
}
/* Reject insane args for filter. */
ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject crazy filter flags!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Did not reject NULL filter!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
EXPECT_EQ(0, errno) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
strerror(errno));
}
}
TEST(seccomp_syscall_mode_lock)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(0, ret) {
TH_LOG("Could not install filter!");
}
/* Make sure neither entry point will switch to strict. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Switched to mode strict!");
}
ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Switched to mode strict!");
}
}
/*
* Test detection of known and unknown filter flags. Userspace needs to be able
* to check if a filter flag is supported by the current kernel and a good way
* of doing that is by attempting to enter filter mode, with the flag bit in
* question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
* that the flag is valid and EINVAL indicates that the flag is invalid.
*/
TEST(detect_seccomp_filter_flags)
{
unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
SECCOMP_FILTER_FLAG_LOG,
SECCOMP_FILTER_FLAG_SPEC_ALLOW,
SECCOMP_FILTER_FLAG_NEW_LISTENER,
SECCOMP_FILTER_FLAG_TSYNC_ESRCH };
unsigned int exclusive[] = {
SECCOMP_FILTER_FLAG_TSYNC,
SECCOMP_FILTER_FLAG_NEW_LISTENER };
unsigned int flag, all_flags, exclusive_mask;
int i;
long ret;
/* Test detection of individual known-good filter flags */
for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
int bits = 0;
flag = flags[i];
/* Make sure the flag is a single bit! */
while (flag) {
if (flag & 0x1)
bits ++;
flag >>= 1;
}
ASSERT_EQ(1, bits);
flag = flags[i];
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
flag);
}
all_flags |= flag;
}
/*
* Test detection of all known-good filter flags combined. But
* for the exclusive flags we need to mask them out and try them
* individually for the "all flags" testing.
*/
exclusive_mask = 0;
for (i = 0; i < ARRAY_SIZE(exclusive); i++)
exclusive_mask |= exclusive[i];
for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
flag = all_flags & ~exclusive_mask;
flag |= exclusive[i];
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
flag);
}
}
/* Test detection of an unknown filter flags, without exclusives. */
flag = -1;
flag &= ~exclusive_mask;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
flag);
}
/*
* Test detection of an unknown filter flag that may simply need to be
* added to this test
*/
flag = flags[ARRAY_SIZE(flags) - 1] << 1;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
flag);
}
}
TEST(TSYNC_first)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(0, ret) {
TH_LOG("Could not install initial filter with TSYNC!");
}
}
#define TSYNC_SIBLINGS 2
struct tsync_sibling {
pthread_t tid;
pid_t system_tid;
sem_t *started;
pthread_cond_t *cond;
pthread_mutex_t *mutex;
int diverge;
int num_waits;
struct sock_fprog *prog;
struct __test_metadata *metadata;
};
/*
* To avoid joining joined threads (which is not allowed by Bionic),
* make sure we both successfully join and clear the tid to skip a
* later join attempt during fixture teardown. Any remaining threads
* will be directly killed during teardown.
*/
#define PTHREAD_JOIN(tid, status) \
do { \
int _rc = pthread_join(tid, status); \
if (_rc) { \
TH_LOG("pthread_join of tid %u failed: %d\n", \
(unsigned int)tid, _rc); \
} else { \
tid = 0; \
} \
} while (0)
FIXTURE(TSYNC) {
struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS];
sem_t started;
pthread_cond_t cond;
pthread_mutex_t mutex;
int sibling_count;
};
FIXTURE_SETUP(TSYNC)
{
struct sock_filter root_filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter apply_filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
memset(&self->root_prog, 0, sizeof(self->root_prog));
memset(&self->apply_prog, 0, sizeof(self->apply_prog));
memset(&self->sibling, 0, sizeof(self->sibling));
self->root_prog.filter = malloc(sizeof(root_filter));
ASSERT_NE(NULL, self->root_prog.filter);
memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
self->apply_prog.filter = malloc(sizeof(apply_filter));
ASSERT_NE(NULL, self->apply_prog.filter);
memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
self->sibling_count = 0;
pthread_mutex_init(&self->mutex, NULL);
pthread_cond_init(&self->cond, NULL);
sem_init(&self->started, 0, 0);
self->sibling[0].tid = 0;
self->sibling[0].cond = &self->cond;
self->sibling[0].started = &self->started;
self->sibling[0].mutex = &self->mutex;
self->sibling[0].diverge = 0;
self->sibling[0].num_waits = 1;
self->sibling[0].prog = &self->root_prog;
self->sibling[0].metadata = _metadata;
self->sibling[1].tid = 0;
self->sibling[1].cond = &self->cond;
self->sibling[1].started = &self->started;
self->sibling[1].mutex = &self->mutex;
self->sibling[1].diverge = 0;
self->sibling[1].prog = &self->root_prog;
self->sibling[1].num_waits = 1;
self->sibling[1].metadata = _metadata;
}
FIXTURE_TEARDOWN(TSYNC)
{
int sib = 0;
if (self->root_prog.filter)
free(self->root_prog.filter);
if (self->apply_prog.filter)
free(self->apply_prog.filter);
for ( ; sib < self->sibling_count; ++sib) {
struct tsync_sibling *s = &self->sibling[sib];
if (!s->tid)
continue;
/*
* If a thread is still running, it may be stuck, so hit
* it over the head really hard.
*/
pthread_kill(s->tid, 9);
}
pthread_mutex_destroy(&self->mutex);
pthread_cond_destroy(&self->cond);
sem_destroy(&self->started);
}
void *tsync_sibling(void *data)
{
long ret = 0;
struct tsync_sibling *me = data;
me->system_tid = syscall(__NR_gettid);
pthread_mutex_lock(me->mutex);
if (me->diverge) {
/* Just re-apply the root prog to fork the tree */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
me->prog, 0, 0);
}
sem_post(me->started);
/* Return outside of started so parent notices failures. */
if (ret) {
pthread_mutex_unlock(me->mutex);
return (void *)SIBLING_EXIT_FAILURE;
}
do {
pthread_cond_wait(me->cond, me->mutex);
me->num_waits = me->num_waits - 1;
} while (me->num_waits);
pthread_mutex_unlock(me->mutex);
ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
if (!ret)
return (void *)SIBLING_EXIT_NEWPRIVS;
read(-1, NULL, 0);
return (void *)SIBLING_EXIT_UNKILLED;
}
void tsync_start_sibling(struct tsync_sibling *sibling)
{
pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
}
TEST_F(TSYNC, siblings_fail_prctl)
{
long ret;
void *status;
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Check prctl failure detection by requesting sib 0 diverge. */
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("setting filter failed");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
/* Signal the threads to clean up*/
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure diverging sibling failed to call prctl. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_with_ancestor)
{
long ret;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Could install filter on all threads!");
}
/* Tell the siblings to test the policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
TEST_F(TSYNC, two_sibling_want_nnp)
{
void *status;
/* start siblings before any prctl() operations */
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
/* Tell the siblings to test no policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both upset about lacking nnp. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
}
TEST_F(TSYNC, two_siblings_with_no_filter)
{
long ret;
void *status;
/* start siblings before any prctl() operations */
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Could install filter on all threads!");
}
/* Tell the siblings to test the policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
TEST_F(TSYNC, two_siblings_with_one_divergence)
{
long ret;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(self->sibling[0].system_tid, ret) {
TH_LOG("Did not fail on diverged sibling.");
}
/* Wake the threads */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_with_one_divergence_no_tid_in_err)
{
long ret, flags;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
flags = SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flags, &self->apply_prog);
ASSERT_EQ(ESRCH, errno) {
TH_LOG("Did not return ESRCH for diverged sibling.");
}
ASSERT_EQ(-1, ret) {
TH_LOG("Did not fail on diverged sibling.");
}
/* Wake the threads */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_not_under_filter)
{
long ret, sib;
void *status;
struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/*
* Sibling 0 will have its own seccomp policy
* and Sibling 1 will not be under seccomp at
* all. Sibling 1 will enter seccomp and 0
* will cause failure.
*/
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(ret, self->sibling[0].system_tid) {
TH_LOG("Did not fail on diverged sibling.");
}
sib = 1;
if (ret == self->sibling[0].system_tid)
sib = 0;
pthread_mutex_lock(&self->mutex);
/* Increment the other siblings num_waits so we can clean up
* the one we just saw.
*/
self->sibling[!sib].num_waits += 1;
/* Signal the thread to clean up*/
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
nanosleep(&delay, NULL);
/* Switch to the remaining sibling */
sib = !sib;
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Expected the remaining sibling to sync");
};
pthread_mutex_lock(&self->mutex);
/* If remaining sibling didn't have a chance to wake up during
* the first broadcast, manually reduce the num_waits now.
*/
if (self->sibling[sib].num_waits > 1)
self->sibling[sib].num_waits = 1;
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
nanosleep(&delay, NULL);
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret); /* just us chickens */
}
/* Make sure restarted syscalls are seen directly as "restart_syscall". */
TEST(syscall_restart)
{
long ret;
unsigned long msg;
pid_t child_pid;
int pipefd[2];
int status;
siginfo_t info = { };
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
#ifdef __NR_sigreturn
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 7, 0),
#endif
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 6, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 5, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 4, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 5, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_clock_nanosleep, 4, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
/* Allow __NR_write for easy logging. */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
/* The nanosleep jump target. */
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
/* The restart_syscall jump target. */
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
#if defined(__arm__)
struct utsname utsbuf;
#endif
ASSERT_EQ(0, pipe(pipefd));
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
/* Child uses EXPECT not ASSERT to deliver status correctly. */
char buf = ' ';
struct timespec timeout = { };
/* Attach parent as tracer and stop. */
EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
EXPECT_EQ(0, raise(SIGSTOP));
EXPECT_EQ(0, close(pipefd[1]));
EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
EXPECT_EQ(0, ret) {
TH_LOG("Failed to install filter!");
}
EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
TH_LOG("Failed to read() sync from parent");
}
EXPECT_EQ('.', buf) {
TH_LOG("Failed to get sync data from read()");
}
/* Start nanosleep to be interrupted. */
timeout.tv_sec = 1;
errno = 0;
EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
TH_LOG("Call to nanosleep() failed (errno %d: %s)",
errno, strerror(errno));
}
/* Read final sync from parent. */
EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
TH_LOG("Failed final read() from parent");
}
EXPECT_EQ('!', buf) {
TH_LOG("Failed to get final data from read()");
}
/* Directly report the status of our test harness results. */
syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
: EXIT_FAILURE);
}
EXPECT_EQ(0, close(pipefd[0]));
/* Attach to child, setup options, and release. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
PTRACE_O_TRACESECCOMP));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(1, write(pipefd[1], ".", 1));
/* Wait for nanosleep() to start. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
ASSERT_EQ(0x100, msg);
ret = get_syscall(_metadata, child_pid);
EXPECT_TRUE(ret == __NR_nanosleep || ret == __NR_clock_nanosleep);
/* Might as well check siginfo for sanity while we're here. */
ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
ASSERT_EQ(SIGTRAP, info.si_signo);
ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
EXPECT_EQ(0, info.si_errno);
EXPECT_EQ(getuid(), info.si_uid);
/* Verify signal delivery came from child (seccomp-triggered). */
EXPECT_EQ(child_pid, info.si_pid);
/* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
ASSERT_EQ(0, kill(child_pid, SIGSTOP));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
/*
* There is no siginfo on SIGSTOP any more, so we can't verify
* signal delivery came from parent now (getpid() == info.si_pid).
* https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
* At least verify the SIGSTOP via PTRACE_GETSIGINFO.
*/
EXPECT_EQ(SIGSTOP, info.si_signo);
/* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
ASSERT_EQ(0, kill(child_pid, SIGCONT));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGCONT, WSTOPSIG(status));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
/* Wait for restart_syscall() to start. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
ASSERT_EQ(0x200, msg);
ret = get_syscall(_metadata, child_pid);
#if defined(__arm__)
/*
* FIXME:
* - native ARM registers do NOT expose true syscall.
* - compat ARM registers on ARM64 DO expose true syscall.
*/
ASSERT_EQ(0, uname(&utsbuf));
if (strncmp(utsbuf.machine, "arm", 3) == 0) {
EXPECT_EQ(__NR_nanosleep, ret);
} else
#endif
{
EXPECT_EQ(__NR_restart_syscall, ret);
}
/* Write again to end test. */
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(1, write(pipefd[1], "!", 1));
EXPECT_EQ(0, close(pipefd[1]));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
if (WIFSIGNALED(status) || WEXITSTATUS(status))
_metadata->passed = 0;
}
TEST_SIGNAL(filter_flag_log, SIGSYS)
{
struct sock_filter allow_filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter kill_filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog allow_prog = {
.len = (unsigned short)ARRAY_SIZE(allow_filter),
.filter = allow_filter,
};
struct sock_fprog kill_prog = {
.len = (unsigned short)ARRAY_SIZE(kill_filter),
.filter = kill_filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
/* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */
ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG,
&allow_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_NE(0, ret) {
TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!");
}
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!");
}
/* Verify that a simple, permissive filter can be added with no flags */
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
EXPECT_EQ(0, ret);
/* See if the same filter can be added with the FILTER_FLAG_LOG flag */
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
&allow_prog);
ASSERT_NE(EINVAL, errno) {
TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!");
}
EXPECT_EQ(0, ret);
/* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
&kill_prog);
EXPECT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST(get_action_avail)
{
__u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP,
SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE,
SECCOMP_RET_LOG, SECCOMP_RET_ALLOW };
__u32 unknown_action = 0x10000000U;
int i;
long ret;
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_NE(EINVAL, errno) {
TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!");
}
EXPECT_EQ(ret, 0);
for (i = 0; i < ARRAY_SIZE(actions); i++) {
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]);
EXPECT_EQ(ret, 0) {
TH_LOG("Expected action (0x%X) not available!",
actions[i]);
}
}
/* Check that an unknown action is handled properly (EOPNOTSUPP) */
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action);
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, EOPNOTSUPP);
}
TEST(get_metadata)
{
pid_t pid;
int pipefd[2];
char buf;
struct seccomp_metadata md;
long ret;
/* Only real root can get metadata. */
if (geteuid()) {
SKIP(return, "get_metadata requires real root");
return;
}
ASSERT_EQ(0, pipe(pipefd));
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
/* one with log, one without */
EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
SECCOMP_FILTER_FLAG_LOG, &prog));
EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
EXPECT_EQ(0, close(pipefd[0]));
ASSERT_EQ(1, write(pipefd[1], "1", 1));
ASSERT_EQ(0, close(pipefd[1]));
while (1)
sleep(100);
}
ASSERT_EQ(0, close(pipefd[1]));
ASSERT_EQ(1, read(pipefd[0], &buf, 1));
ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
ASSERT_EQ(pid, waitpid(pid, NULL, 0));
/* Past here must not use ASSERT or child process is never killed. */
md.filter_off = 0;
errno = 0;
ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
EXPECT_EQ(sizeof(md), ret) {
if (errno == EINVAL)
SKIP(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)");
}
EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
EXPECT_EQ(md.filter_off, 0);
md.filter_off = 1;
ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
EXPECT_EQ(sizeof(md), ret);
EXPECT_EQ(md.flags, 0);
EXPECT_EQ(md.filter_off, 1);
skip:
ASSERT_EQ(0, kill(pid, SIGKILL));
}
static int user_notif_syscall(int nr, unsigned int flags)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
}
#define USER_NOTIF_MAGIC INT_MAX
TEST(user_notification_basic)
{
pid_t pid;
long ret;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
struct pollfd pollfd;
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
pid = fork();
ASSERT_GE(pid, 0);
/* Check that we get -ENOSYS with no listener attached */
if (pid == 0) {
if (user_notif_syscall(__NR_getppid, 0) < 0)
exit(1);
ret = syscall(__NR_getppid);
exit(ret >= 0 || errno != ENOSYS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/* Add some no-op filters for grins. */
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/* Installing a second listener in the chain should EBUSY */
EXPECT_EQ(user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER),
-1);
EXPECT_EQ(errno, EBUSY);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLIN);
/* Test that we can't pass garbage to the kernel. */
memset(&req, 0, sizeof(req));
req.pid = -1;
errno = 0;
ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
if (ret) {
req.pid = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
EXPECT_EQ(req.data.nr, __NR_getppid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
/* check that we make sure flags == 0 */
resp.flags = 1;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.flags = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_with_tsync)
{
int ret;
unsigned int flags;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* these were exclusive */
flags = SECCOMP_FILTER_FLAG_NEW_LISTENER |
SECCOMP_FILTER_FLAG_TSYNC;
ASSERT_EQ(-1, user_notif_syscall(__NR_getppid, flags));
ASSERT_EQ(EINVAL, errno);
/* but now they're not */
flags |= SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
ret = user_notif_syscall(__NR_getppid, flags);
close(ret);
ASSERT_LE(0, ret);
}
TEST(user_notification_kill_in_middle)
{
pid_t pid;
long ret;
int listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/*
* Check that nothing bad happens when we kill the task in the middle
* of a syscall.
*/
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0);
EXPECT_EQ(kill(pid, SIGKILL), 0);
EXPECT_EQ(waitpid(pid, NULL, 0), pid);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1);
resp.id = req.id;
ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp);
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, ENOENT);
}
static int handled = -1;
static void signal_handler(int signal)
{
if (write(handled, "c", 1) != 1)
perror("write from signal");
}
TEST(user_notification_signal)
{
pid_t pid;
long ret;
int status, listener, sk_pair[2];
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
char c;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_notif_syscall(__NR_gettid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(sk_pair[0]);
handled = sk_pair[1];
if (signal(SIGUSR1, signal_handler) == SIG_ERR) {
perror("signal");
exit(1);
}
/*
* ERESTARTSYS behavior is a bit hard to test, because we need
* to rely on a signal that has not yet been handled. Let's at
* least check that the error code gets propagated through, and
* hope that it doesn't break when there is actually a signal :)
*/
ret = syscall(__NR_gettid);
exit(!(ret == -1 && errno == 512));
}
close(sk_pair[1]);
memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(kill(pid, SIGUSR1), 0);
/*
* Make sure the signal really is delivered, which means we're not
* stuck in the user notification code any more and the notification
* should be dead.
*/
EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
resp.id = req.id;
resp.error = -EPERM;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, ENOENT);
memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
resp.id = req.id;
resp.error = -512; /* -ERESTARTSYS */
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_closed_listener)
{
pid_t pid;
long ret;
int status, listener;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/*
* Check that we get an ENOSYS when the listener is closed.
*/
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(listener);
ret = syscall(__NR_getppid);
exit(ret != -1 && errno != ENOSYS);
}
close(listener);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
/*
* Check that a pid in a child namespace still shows up as valid in ours.
*/
TEST(user_notification_child_pid_ns)
{
pid_t pid;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0) {
if (errno == EINVAL)
SKIP(return, "kernel missing CLONE_NEWUSER support");
};
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(listener);
}
/*
* Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e.
* invalid.
*/
TEST(user_notification_sibling_pid_ns)
{
pid_t pid, pid2;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ASSERT_EQ(unshare(CLONE_NEWPID), 0);
pid2 = fork();
ASSERT_GE(pid2, 0);
if (pid2 == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
exit(WEXITSTATUS(status));
}
/* Create the sibling ns, and sibling in it. */
ASSERT_EQ(unshare(CLONE_NEWPID), 0) {
if (errno == EPERM)
SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN");
}
ASSERT_EQ(errno, 0);
pid2 = fork();
ASSERT_GE(pid2, 0);
if (pid2 == 0) {
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
/*
* The pid should be 0, i.e. the task is in some namespace that
* we can't "see".
*/
EXPECT_EQ(req.pid, 0);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
exit(0);
}
close(listener);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_fault_recv)
{
pid_t pid;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(unshare(CLONE_NEWUSER), 0) {
if (errno == EINVAL)
SKIP(return, "kernel missing CLONE_NEWUSER support");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
/* Do a bad recv() */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
EXPECT_EQ(errno, EFAULT);
/* We should still be able to receive this notification, though. */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(seccomp_get_notif_sizes)
{
struct seccomp_notif_sizes sizes;
ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
}
TEST(user_notification_continue)
{
pid_t pid;
long ret;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
struct pollfd pollfd;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
int dup_fd, pipe_fds[2];
pid_t self;
ASSERT_GE(pipe(pipe_fds), 0);
dup_fd = dup(pipe_fds[0]);
ASSERT_GE(dup_fd, 0);
EXPECT_NE(pipe_fds[0], dup_fd);
self = getpid();
ASSERT_EQ(filecmp(self, self, pipe_fds[0], dup_fd), 0);
exit(0);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLIN);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
EXPECT_EQ(req.data.nr, __NR_dup);
resp.id = req.id;
resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE;
/*
* Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other
* args be set to 0.
*/
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.error = USER_NOTIF_MAGIC;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.error = 0;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) {
if (errno == EINVAL)
SKIP(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE");
}
skip:
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status)) {
if (WEXITSTATUS(status) == 2) {
SKIP(return, "Kernel does not support kcmp() syscall");
return;
}
}
}
TEST(user_notification_filter_empty)
{
pid_t pid;
long ret;
int status;
struct pollfd pollfd;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
if (__NR_clone3 < 0)
SKIP(return, "Test not built with clone3 support");
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
int listener;
listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0)
_exit(EXIT_FAILURE);
if (dup2(listener, 200) != 200)
_exit(EXIT_FAILURE);
close(listener);
_exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* The seccomp filter has become unused so we should be notified once
* the kernel gets around to cleaning up task struct.
*/
pollfd.fd = 200;
pollfd.events = POLLHUP;
EXPECT_GT(poll(&pollfd, 1, 2000), 0);
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
static void *do_thread(void *data)
{
return NULL;
}
TEST(user_notification_filter_empty_threaded)
{
pid_t pid;
long ret;
int status;
struct pollfd pollfd;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
if (__NR_clone3 < 0)
SKIP(return, "Test not built with clone3 support");
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
pid_t pid1, pid2;
int listener, status;
pthread_t thread;
listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0)
_exit(EXIT_FAILURE);
if (dup2(listener, 200) != 200)
_exit(EXIT_FAILURE);
close(listener);
pid1 = fork();
if (pid1 < 0)
_exit(EXIT_FAILURE);
if (pid1 == 0)
_exit(EXIT_SUCCESS);
pid2 = fork();
if (pid2 < 0)
_exit(EXIT_FAILURE);
if (pid2 == 0)
_exit(EXIT_SUCCESS);
if (pthread_create(&thread, NULL, do_thread, NULL) ||
pthread_join(thread, NULL))
_exit(EXIT_FAILURE);
if (pthread_create(&thread, NULL, do_thread, NULL) ||
pthread_join(thread, NULL))
_exit(EXIT_FAILURE);
if (waitpid(pid1, &status, 0) != pid1 || !WIFEXITED(status) ||
WEXITSTATUS(status))
_exit(EXIT_FAILURE);
if (waitpid(pid2, &status, 0) != pid2 || !WIFEXITED(status) ||
WEXITSTATUS(status))
_exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* The seccomp filter has become unused so we should be notified once
* the kernel gets around to cleaning up task struct.
*/
pollfd.fd = 200;
pollfd.events = POLLHUP;
EXPECT_GT(poll(&pollfd, 1, 2000), 0);
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
TEST(user_notification_addfd)
{
pid_t pid;
long ret;
int status, listener, memfd, fd, nextfd;
struct seccomp_notif_addfd addfd = {};
struct seccomp_notif_addfd_small small = {};
struct seccomp_notif_addfd_big big = {};
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
/* There may be arbitrary already-open fds at test start. */
memfd = memfd_create("test", 0);
ASSERT_GE(memfd, 0);
nextfd = memfd + 1;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* fd: 4 */
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_EQ(listener, nextfd++);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
/* fds will be added and this value is expected */
if (syscall(__NR_getppid) != USER_NOTIF_MAGIC)
exit(1);
/* Atomic addfd+send is received here. Check it is a valid fd */
if (fcntl(syscall(__NR_getppid), F_GETFD) == -1)
exit(1);
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
}
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
addfd.srcfd = memfd;
addfd.newfd = 0;
addfd.id = req.id;
addfd.flags = 0x0;
/* Verify bad newfd_flags cannot be set */
addfd.newfd_flags = ~O_CLOEXEC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.newfd_flags = O_CLOEXEC;
/* Verify bad flags cannot be set */
addfd.flags = 0xff;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.flags = 0;
/* Verify that remote_fd cannot be set without setting flags */
addfd.newfd = 1;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.newfd = 0;
/* Verify small size cannot be set */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_SMALL, &small), -1);
EXPECT_EQ(errno, EINVAL);
/* Verify we can't send bits filled in unknown buffer area */
memset(&big, 0xAA, sizeof(big));
big.addfd = addfd;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big), -1);
EXPECT_EQ(errno, E2BIG);
/* Verify we can set an arbitrary remote fd */
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
EXPECT_EQ(fd, nextfd++);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Verify we can set an arbitrary remote fd with large size */
memset(&big, 0x0, sizeof(big));
big.addfd = addfd;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
EXPECT_EQ(fd, nextfd++);
/* Verify we can set a specific remote fd */
addfd.newfd = 42;
addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
EXPECT_EQ(fd, 42);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Resume syscall */
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/*
* This sets the ID of the ADD FD to the last request plus 1. The
* notification ID increments 1 per notification.
*/
addfd.id = req.id + 1;
/* This spins until the underlying notification is generated */
while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 &&
errno != -EINPROGRESS)
nanosleep(&delay, NULL);
memset(&req, 0, sizeof(req));
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(addfd.id, req.id);
/* Verify we can do an atomic addfd and send */
addfd.newfd = 0;
addfd.flags = SECCOMP_ADDFD_FLAG_SEND;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
/*
* Child has earlier "low" fds and now 42, so we expect the next
* lowest available fd to be assigned here.
*/
EXPECT_EQ(fd, nextfd++);
ASSERT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/*
* This sets the ID of the ADD FD to the last request plus 1. The
* notification ID increments 1 per notification.
*/
addfd.id = req.id + 1;
/* This spins until the underlying notification is generated */
while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 &&
errno != -EINPROGRESS)
nanosleep(&delay, NULL);
memset(&req, 0, sizeof(req));
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(addfd.id, req.id);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/* Wait for child to finish. */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(memfd);
}
TEST(user_notification_addfd_rlimit)
{
pid_t pid;
long ret;
int status, listener, memfd;
struct seccomp_notif_addfd addfd = {};
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
const struct rlimit lim = {
.rlim_cur = 0,
.rlim_max = 0,
};
memfd = memfd_create("test", 0);
ASSERT_GE(memfd, 0);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(prlimit(pid, RLIMIT_NOFILE, &lim, NULL), 0);
addfd.srcfd = memfd;
addfd.newfd_flags = O_CLOEXEC;
addfd.newfd = 0;
addfd.id = req.id;
addfd.flags = 0;
/* Should probably spot check /proc/sys/fs/file-nr */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EMFILE);
addfd.flags = SECCOMP_ADDFD_FLAG_SEND;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EMFILE);
addfd.newfd = 100;
addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EBADF);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/* Wait for child to finish. */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(memfd);
}
#ifndef SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP
#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
#endif
TEST(user_notification_sync)
{
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
int status, listener;
pid_t pid;
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/* Try to set invalid flags. */
EXPECT_SYSCALL_RETURN(-EINVAL,
ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS, 0xffffffff, 0));
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS,
SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP, 0), 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
ASSERT_EQ(ret, USER_NOTIF_MAGIC) {
_exit(1);
}
_exit(0);
}
req.pid = 0;
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(req.data.nr, __NR_getppid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
resp.flags = 0;
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
ASSERT_EQ(waitpid(pid, &status, 0), pid);
ASSERT_EQ(status, 0);
}
/* Make sure PTRACE_O_SUSPEND_SECCOMP requires CAP_SYS_ADMIN. */
FIXTURE(O_SUSPEND_SECCOMP) {
pid_t pid;
};
FIXTURE_SETUP(O_SUSPEND_SECCOMP)
{
ERRNO_FILTER(block_read, E2BIG);
cap_value_t cap_list[] = { CAP_SYS_ADMIN };
cap_t caps;
self->pid = 0;
/* make sure we don't have CAP_SYS_ADMIN */
caps = cap_get_proc();
ASSERT_NE(NULL, caps);
ASSERT_EQ(0, cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
ASSERT_EQ(0, cap_set_proc(caps));
cap_free(caps);
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
ASSERT_EQ(0, prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_block_read));
self->pid = fork();
ASSERT_GE(self->pid, 0);
if (self->pid == 0) {
while (1)
pause();
_exit(127);
}
}
FIXTURE_TEARDOWN(O_SUSPEND_SECCOMP)
{
if (self->pid)
kill(self->pid, SIGKILL);
}
TEST_F(O_SUSPEND_SECCOMP, setoptions)
{
int wstatus;
ASSERT_EQ(0, ptrace(PTRACE_ATTACH, self->pid, NULL, 0));
ASSERT_EQ(self->pid, wait(&wstatus));
ASSERT_EQ(-1, ptrace(PTRACE_SETOPTIONS, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP));
if (errno == EINVAL)
SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)");
ASSERT_EQ(EPERM, errno);
}
TEST_F(O_SUSPEND_SECCOMP, seize)
{
int ret;
ret = ptrace(PTRACE_SEIZE, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP);
ASSERT_EQ(-1, ret);
if (errno == EINVAL)
SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)");
ASSERT_EQ(EPERM, errno);
}
/*
* get_nth - Get the nth, space separated entry in a file.
*
* Returns the length of the read field.
* Throws error if field is zero-lengthed.
*/
static ssize_t get_nth(struct __test_metadata *_metadata, const char *path,
const unsigned int position, char **entry)
{
char *line = NULL;
unsigned int i;
ssize_t nread;
size_t len = 0;
FILE *f;
f = fopen(path, "r");
ASSERT_NE(f, NULL) {
TH_LOG("Could not open %s: %s", path, strerror(errno));
}
for (i = 0; i < position; i++) {
nread = getdelim(&line, &len, ' ', f);
ASSERT_GE(nread, 0) {
TH_LOG("Failed to read %d entry in file %s", i, path);
}
}
fclose(f);
ASSERT_GT(nread, 0) {
TH_LOG("Entry in file %s had zero length", path);
}
*entry = line;
return nread - 1;
}
/* For a given PID, get the task state (D, R, etc...) */
static char get_proc_stat(struct __test_metadata *_metadata, pid_t pid)
{
char proc_path[100] = {0};
char status;
char *line;
snprintf(proc_path, sizeof(proc_path), "/proc/%d/stat", pid);
ASSERT_EQ(get_nth(_metadata, proc_path, 3, &line), 1);
status = *line;
free(line);
return status;
}
TEST(user_notification_fifo)
{
struct seccomp_notif_resp resp = {};
struct seccomp_notif req = {};
int i, status, listener;
pid_t pid, pids[3];
__u64 baseid;
long ret;
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Setup a listener */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
baseid = req.id + 1;
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
/* check that we make sure flags == 0 */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/* Start children, and generate notifications */
for (i = 0; i < ARRAY_SIZE(pids); i++) {
pid = fork();
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
pids[i] = pid;
}
/* This spins until all of the children are sleeping */
restart_wait:
for (i = 0; i < ARRAY_SIZE(pids); i++) {
if (get_proc_stat(_metadata, pids[i]) != 'S') {
nanosleep(&delay, NULL);
goto restart_wait;
}
}
/* Read the notifications in order (and respond) */
for (i = 0; i < ARRAY_SIZE(pids); i++) {
memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.id, baseid + i);
resp.id = req.id;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
}
/* Make sure notifications were received */
for (i = 0; i < ARRAY_SIZE(pids); i++) {
EXPECT_EQ(waitpid(pids[i], &status, 0), pids[i]);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
}
/* get_proc_syscall - Get the syscall in progress for a given pid
*
* Returns the current syscall number for a given process
* Returns -1 if not in syscall (running or blocked)
*/
static long get_proc_syscall(struct __test_metadata *_metadata, int pid)
{
char proc_path[100] = {0};
long ret = -1;
ssize_t nread;
char *line;
snprintf(proc_path, sizeof(proc_path), "/proc/%d/syscall", pid);
nread = get_nth(_metadata, proc_path, 1, &line);
ASSERT_GT(nread, 0);
if (!strncmp("running", line, MIN(7, nread)))
ret = strtol(line, NULL, 16);
free(line);
return ret;
}
/* Ensure non-fatal signals prior to receive are unmodified */
TEST(user_notification_wait_killable_pre_notification)
{
struct sigaction new_action = {
.sa_handler = signal_handler,
};
int listener, status, sk_pair[2];
pid_t pid;
long ret;
char c;
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret)
{
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_notif_syscall(
__NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER |
SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV);
ASSERT_GE(listener, 0);
/*
* Check that we can kill the process with SIGUSR1 prior to receiving
* the notification. SIGUSR1 is wired up to a custom signal handler,
* and make sure it gets called.
*/
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(sk_pair[0]);
handled = sk_pair[1];
/* Setup the non-fatal sigaction without SA_RESTART */
if (sigaction(SIGUSR1, &new_action, NULL)) {
perror("sigaction");
exit(1);
}
ret = syscall(__NR_getppid);
/* Make sure we got a return from a signal interruption */
exit(ret != -1 || errno != EINTR);
}
/*
* Make sure we've gotten to the seccomp user notification wait
* from getppid prior to sending any signals
*/
while (get_proc_syscall(_metadata, pid) != __NR_getppid &&
get_proc_stat(_metadata, pid) != 'S')
nanosleep(&delay, NULL);
/* Send non-fatal kill signal */
EXPECT_EQ(kill(pid, SIGUSR1), 0);
/* wait for process to exit (exit checks for EINTR) */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
}
/* Ensure non-fatal signals after receive are blocked */
TEST(user_notification_wait_killable)
{
struct sigaction new_action = {
.sa_handler = signal_handler,
};
struct seccomp_notif_resp resp = {};
struct seccomp_notif req = {};
int listener, status, sk_pair[2];
pid_t pid;
long ret;
char c;
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret)
{
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_notif_syscall(
__NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER |
SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(sk_pair[0]);
handled = sk_pair[1];
/* Setup the sigaction without SA_RESTART */
if (sigaction(SIGUSR1, &new_action, NULL)) {
perror("sigaction");
exit(1);
}
/* Make sure that the syscall is completed (no EINTR) */
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
/*
* Get the notification, to make move the notifying process into a
* non-preemptible (TASK_KILLABLE) state.
*/
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
/* Send non-fatal kill signal */
EXPECT_EQ(kill(pid, SIGUSR1), 0);
/*
* Make sure the task enters moves to TASK_KILLABLE by waiting for
* D (Disk Sleep) state after receiving non-fatal signal.
*/
while (get_proc_stat(_metadata, pid) != 'D')
nanosleep(&delay, NULL);
resp.id = req.id;
resp.val = USER_NOTIF_MAGIC;
/* Make sure the notification is found and able to be replied to */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/*
* Make sure that the signal handler does get called once we're back in
* userspace.
*/
EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
/* wait for process to exit (exit checks for USER_NOTIF_MAGIC) */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
/* Ensure fatal signals after receive are not blocked */
TEST(user_notification_wait_killable_fatal)
{
struct seccomp_notif req = {};
int listener, status;
pid_t pid;
long ret;
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret)
{
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(
__NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER |
SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
/* This should never complete as it should get a SIGTERM */
syscall(__NR_getppid);
exit(1);
}
while (get_proc_stat(_metadata, pid) != 'S')
nanosleep(&delay, NULL);
/*
* Get the notification, to make move the notifying process into a
* non-preemptible (TASK_KILLABLE) state.
*/
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
/* Kill the process with a fatal signal */
EXPECT_EQ(kill(pid, SIGTERM), 0);
/*
* Wait for the process to exit, and make sure the process terminated
* due to the SIGTERM signal.
*/
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFSIGNALED(status));
EXPECT_EQ(SIGTERM, WTERMSIG(status));
}
/*
* TODO:
* - expand NNP testing
* - better arch-specific TRACE and TRAP handlers.
* - endianness checking when appropriate
* - 64-bit arg prodding
* - arch value testing (x86 modes especially)
* - verify that FILTER_FLAG_LOG filters generate log messages
* - verify that RET_LOG generates log messages
*/
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/seccomp/seccomp_bpf.c |
/*
* Strictly speaking, this is not a test. But it can report during test
* runs so relative performace can be measured.
*/
#define _GNU_SOURCE
#include <assert.h>
#include <limits.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include "../kselftest.h"
unsigned long long timing(clockid_t clk_id, unsigned long long samples)
{
struct timespec start, finish;
unsigned long long i;
pid_t pid, ret;
pid = getpid();
assert(clock_gettime(clk_id, &start) == 0);
for (i = 0; i < samples; i++) {
ret = syscall(__NR_getpid);
assert(pid == ret);
}
assert(clock_gettime(clk_id, &finish) == 0);
i = finish.tv_sec - start.tv_sec;
i *= 1000000000ULL;
i += finish.tv_nsec - start.tv_nsec;
printf("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
finish.tv_sec, finish.tv_nsec,
start.tv_sec, start.tv_nsec,
i, (double)i / 1000000000.0);
return i;
}
unsigned long long calibrate(void)
{
struct timespec start, finish;
unsigned long long i, samples, step = 9973;
pid_t pid, ret;
int seconds = 15;
printf("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
samples = 0;
pid = getpid();
assert(clock_gettime(CLOCK_MONOTONIC, &start) == 0);
do {
for (i = 0; i < step; i++) {
ret = syscall(__NR_getpid);
assert(pid == ret);
}
assert(clock_gettime(CLOCK_MONOTONIC, &finish) == 0);
samples += step;
i = finish.tv_sec - start.tv_sec;
i *= 1000000000ULL;
i += finish.tv_nsec - start.tv_nsec;
} while (i < 1000000000ULL);
return samples * seconds;
}
bool approx(int i_one, int i_two)
{
double one = i_one, one_bump = one * 0.01;
double two = i_two, two_bump = two * 0.01;
one_bump = one + MAX(one_bump, 2.0);
two_bump = two + MAX(two_bump, 2.0);
/* Equal to, or within 1% or 2 digits */
if (one == two ||
(one > two && one <= two_bump) ||
(two > one && two <= one_bump))
return true;
return false;
}
bool le(int i_one, int i_two)
{
if (i_one <= i_two)
return true;
return false;
}
long compare(const char *name_one, const char *name_eval, const char *name_two,
unsigned long long one, bool (*eval)(int, int), unsigned long long two)
{
bool good;
printf("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
(long long)one, name_eval, (long long)two);
if (one > INT_MAX) {
printf("Miscalculation! Measurement went negative: %lld\n", (long long)one);
return 1;
}
if (two > INT_MAX) {
printf("Miscalculation! Measurement went negative: %lld\n", (long long)two);
return 1;
}
good = eval(one, two);
printf("%s\n", good ? "✔️" : "❌");
return good ? 0 : 1;
}
int main(int argc, char *argv[])
{
struct sock_filter bitmap_filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog bitmap_prog = {
.len = (unsigned short)ARRAY_SIZE(bitmap_filter),
.filter = bitmap_filter,
};
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, args[0])),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret, bits;
unsigned long long samples, calc;
unsigned long long native, filter1, filter2, bitmap1, bitmap2;
unsigned long long entry, per_filter1, per_filter2;
setbuf(stdout, NULL);
printf("Running on:\n");
system("uname -a");
printf("Current BPF sysctl settings:\n");
/* Avoid using "sysctl" which may not be installed. */
system("grep -H . /proc/sys/net/core/bpf_jit_enable");
system("grep -H . /proc/sys/net/core/bpf_jit_harden");
if (argc > 1)
samples = strtoull(argv[1], NULL, 0);
else
samples = calibrate();
printf("Benchmarking %llu syscalls...\n", samples);
/* Native call */
native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
printf("getpid native: %llu ns\n", native);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
assert(ret == 0);
/* One filter resulting in a bitmap */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
assert(ret == 0);
bitmap1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
printf("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
/* Second filter resulting in a bitmap */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
assert(ret == 0);
bitmap2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
printf("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
/* Third filter, can no longer be converted to bitmap */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
assert(ret == 0);
filter1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
printf("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
/* Fourth filter, can not be converted to bitmap because of filter 3 */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
assert(ret == 0);
filter2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
printf("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
/* Estimations */
#define ESTIMATE(fmt, var, what) do { \
var = (what); \
printf("Estimated " fmt ": %llu ns\n", var); \
if (var > INT_MAX) \
goto more_samples; \
} while (0)
ESTIMATE("total seccomp overhead for 1 bitmapped filter", calc,
bitmap1 - native);
ESTIMATE("total seccomp overhead for 2 bitmapped filters", calc,
bitmap2 - native);
ESTIMATE("total seccomp overhead for 3 full filters", calc,
filter1 - native);
ESTIMATE("total seccomp overhead for 4 full filters", calc,
filter2 - native);
ESTIMATE("seccomp entry overhead", entry,
bitmap1 - native - (bitmap2 - bitmap1));
ESTIMATE("seccomp per-filter overhead (last 2 diff)", per_filter1,
filter2 - filter1);
ESTIMATE("seccomp per-filter overhead (filters / 4)", per_filter2,
(filter2 - native - entry) / 4);
printf("Expectations:\n");
ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1);
bits = compare("native", "≤", "1 filter", native, le, filter1);
if (bits)
goto more_samples;
ret |= compare("per-filter (last 2 diff)", "≈", "per-filter (filters / 4)",
per_filter1, approx, per_filter2);
bits = compare("1 bitmapped", "≈", "2 bitmapped",
bitmap1 - native, approx, bitmap2 - native);
if (bits) {
printf("Skipping constant action bitmap expectations: they appear unsupported.\n");
goto out;
}
ret |= compare("entry", "≈", "1 bitmapped", entry, approx, bitmap1 - native);
ret |= compare("entry", "≈", "2 bitmapped", entry, approx, bitmap2 - native);
ret |= compare("native + entry + (per filter * 4)", "≈", "4 filters total",
entry + (per_filter1 * 4) + native, approx, filter2);
if (ret == 0)
goto out;
more_samples:
printf("Saw unexpected benchmark result. Try running again with more samples?\n");
out:
return 0;
}
| linux-master | tools/testing/selftests/seccomp/seccomp_benchmark.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/types.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <linux/netlink.h>
#include <linux/connector.h>
#include <linux/cn_proc.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <strings.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include "../kselftest.h"
#define NL_MESSAGE_SIZE (sizeof(struct nlmsghdr) + sizeof(struct cn_msg) + \
sizeof(struct proc_input))
#define NL_MESSAGE_SIZE_NF (sizeof(struct nlmsghdr) + sizeof(struct cn_msg) + \
sizeof(int))
#define MAX_EVENTS 1
volatile static int interrupted;
static int nl_sock, ret_errno, tcount;
static struct epoll_event evn;
static int filter;
#ifdef ENABLE_PRINTS
#define Printf printf
#else
#define Printf ksft_print_msg
#endif
int send_message(void *pinp)
{
char buff[NL_MESSAGE_SIZE];
struct nlmsghdr *hdr;
struct cn_msg *msg;
hdr = (struct nlmsghdr *)buff;
if (filter)
hdr->nlmsg_len = NL_MESSAGE_SIZE;
else
hdr->nlmsg_len = NL_MESSAGE_SIZE_NF;
hdr->nlmsg_type = NLMSG_DONE;
hdr->nlmsg_flags = 0;
hdr->nlmsg_seq = 0;
hdr->nlmsg_pid = getpid();
msg = (struct cn_msg *)NLMSG_DATA(hdr);
msg->id.idx = CN_IDX_PROC;
msg->id.val = CN_VAL_PROC;
msg->seq = 0;
msg->ack = 0;
msg->flags = 0;
if (filter) {
msg->len = sizeof(struct proc_input);
((struct proc_input *)msg->data)->mcast_op =
((struct proc_input *)pinp)->mcast_op;
((struct proc_input *)msg->data)->event_type =
((struct proc_input *)pinp)->event_type;
} else {
msg->len = sizeof(int);
*(int *)msg->data = *(enum proc_cn_mcast_op *)pinp;
}
if (send(nl_sock, hdr, hdr->nlmsg_len, 0) == -1) {
ret_errno = errno;
perror("send failed");
return -3;
}
return 0;
}
int register_proc_netlink(int *efd, void *input)
{
struct sockaddr_nl sa_nl;
int err = 0, epoll_fd;
nl_sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (nl_sock == -1) {
ret_errno = errno;
perror("socket failed");
return -1;
}
bzero(&sa_nl, sizeof(sa_nl));
sa_nl.nl_family = AF_NETLINK;
sa_nl.nl_groups = CN_IDX_PROC;
sa_nl.nl_pid = getpid();
if (bind(nl_sock, (struct sockaddr *)&sa_nl, sizeof(sa_nl)) == -1) {
ret_errno = errno;
perror("bind failed");
return -2;
}
epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (epoll_fd < 0) {
ret_errno = errno;
perror("epoll_create1 failed");
return -2;
}
err = send_message(input);
if (err < 0)
return err;
evn.events = EPOLLIN;
evn.data.fd = nl_sock;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, nl_sock, &evn) < 0) {
ret_errno = errno;
perror("epoll_ctl failed");
return -3;
}
*efd = epoll_fd;
return 0;
}
static void sigint(int sig)
{
interrupted = 1;
}
int handle_packet(char *buff, int fd, struct proc_event *event)
{
struct nlmsghdr *hdr;
hdr = (struct nlmsghdr *)buff;
if (hdr->nlmsg_type == NLMSG_ERROR) {
perror("NLMSG_ERROR error\n");
return -3;
} else if (hdr->nlmsg_type == NLMSG_DONE) {
event = (struct proc_event *)
((struct cn_msg *)NLMSG_DATA(hdr))->data;
tcount++;
switch (event->what) {
case PROC_EVENT_EXIT:
Printf("Exit process %d (tgid %d) with code %d, signal %d\n",
event->event_data.exit.process_pid,
event->event_data.exit.process_tgid,
event->event_data.exit.exit_code,
event->event_data.exit.exit_signal);
break;
case PROC_EVENT_FORK:
Printf("Fork process %d (tgid %d), parent %d (tgid %d)\n",
event->event_data.fork.child_pid,
event->event_data.fork.child_tgid,
event->event_data.fork.parent_pid,
event->event_data.fork.parent_tgid);
break;
case PROC_EVENT_EXEC:
Printf("Exec process %d (tgid %d)\n",
event->event_data.exec.process_pid,
event->event_data.exec.process_tgid);
break;
case PROC_EVENT_UID:
Printf("UID process %d (tgid %d) uid %d euid %d\n",
event->event_data.id.process_pid,
event->event_data.id.process_tgid,
event->event_data.id.r.ruid,
event->event_data.id.e.euid);
break;
case PROC_EVENT_GID:
Printf("GID process %d (tgid %d) gid %d egid %d\n",
event->event_data.id.process_pid,
event->event_data.id.process_tgid,
event->event_data.id.r.rgid,
event->event_data.id.e.egid);
break;
case PROC_EVENT_SID:
Printf("SID process %d (tgid %d)\n",
event->event_data.sid.process_pid,
event->event_data.sid.process_tgid);
break;
case PROC_EVENT_PTRACE:
Printf("Ptrace process %d (tgid %d), Tracer %d (tgid %d)\n",
event->event_data.ptrace.process_pid,
event->event_data.ptrace.process_tgid,
event->event_data.ptrace.tracer_pid,
event->event_data.ptrace.tracer_tgid);
break;
case PROC_EVENT_COMM:
Printf("Comm process %d (tgid %d) comm %s\n",
event->event_data.comm.process_pid,
event->event_data.comm.process_tgid,
event->event_data.comm.comm);
break;
case PROC_EVENT_COREDUMP:
Printf("Coredump process %d (tgid %d) parent %d, (tgid %d)\n",
event->event_data.coredump.process_pid,
event->event_data.coredump.process_tgid,
event->event_data.coredump.parent_pid,
event->event_data.coredump.parent_tgid);
break;
default:
break;
}
}
return 0;
}
int handle_events(int epoll_fd, struct proc_event *pev)
{
char buff[CONNECTOR_MAX_MSG_SIZE];
struct epoll_event ev[MAX_EVENTS];
int i, event_count = 0, err = 0;
event_count = epoll_wait(epoll_fd, ev, MAX_EVENTS, -1);
if (event_count < 0) {
ret_errno = errno;
if (ret_errno != EINTR)
perror("epoll_wait failed");
return -3;
}
for (i = 0; i < event_count; i++) {
if (!(ev[i].events & EPOLLIN))
continue;
if (recv(ev[i].data.fd, buff, sizeof(buff), 0) == -1) {
ret_errno = errno;
perror("recv failed");
return -3;
}
err = handle_packet(buff, ev[i].data.fd, pev);
if (err < 0)
return err;
}
return 0;
}
int main(int argc, char *argv[])
{
int epoll_fd, err;
struct proc_event proc_ev;
struct proc_input input;
signal(SIGINT, sigint);
if (argc > 2) {
printf("Expected 0(assume no-filter) or 1 argument(-f)\n");
exit(KSFT_SKIP);
}
if (argc == 2) {
if (strcmp(argv[1], "-f") == 0) {
filter = 1;
} else {
printf("Valid option : -f (for filter feature)\n");
exit(KSFT_SKIP);
}
}
if (filter) {
input.event_type = PROC_EVENT_NONZERO_EXIT;
input.mcast_op = PROC_CN_MCAST_LISTEN;
err = register_proc_netlink(&epoll_fd, (void*)&input);
} else {
enum proc_cn_mcast_op op = PROC_CN_MCAST_LISTEN;
err = register_proc_netlink(&epoll_fd, (void*)&op);
}
if (err < 0) {
if (err == -2)
close(nl_sock);
if (err == -3) {
close(nl_sock);
close(epoll_fd);
}
exit(1);
}
while (!interrupted) {
err = handle_events(epoll_fd, &proc_ev);
if (err < 0) {
if (ret_errno == EINTR)
continue;
if (err == -2)
close(nl_sock);
if (err == -3) {
close(nl_sock);
close(epoll_fd);
}
exit(1);
}
}
if (filter) {
input.mcast_op = PROC_CN_MCAST_IGNORE;
send_message((void*)&input);
} else {
enum proc_cn_mcast_op op = PROC_CN_MCAST_IGNORE;
send_message((void*)&op);
}
close(epoll_fd);
close(nl_sock);
printf("Done total count: %d\n", tcount);
exit(0);
}
| linux-master | tools/testing/selftests/connector/proc_filter.c |
/*
* Copyright (c) 2019 Alexey Dobriyan <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Test that pointing #! script interpreter to self doesn't recurse. */
#include <errno.h>
#include <sched.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <unistd.h>
int main(void)
{
if (unshare(CLONE_NEWNS) == -1) {
if (errno == ENOSYS || errno == EPERM) {
fprintf(stderr, "error: unshare, errno %d\n", errno);
return 4;
}
fprintf(stderr, "error: unshare, errno %d\n", errno);
return 1;
}
if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
fprintf(stderr, "error: mount '/', errno %d\n", errno);
return 1;
}
/* Require "exec" filesystem. */
if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1) {
fprintf(stderr, "error: mount ramfs, errno %d\n", errno);
return 1;
}
#define FILENAME "/tmp/1"
int fd = creat(FILENAME, 0700);
if (fd == -1) {
fprintf(stderr, "error: creat, errno %d\n", errno);
return 1;
}
#define S "#!" FILENAME "\n"
if (write(fd, S, strlen(S)) != strlen(S)) {
fprintf(stderr, "error: write, errno %d\n", errno);
return 1;
}
close(fd);
int rv = execve(FILENAME, NULL, NULL);
if (rv == -1 && errno == ELOOP) {
return 0;
}
fprintf(stderr, "error: execve, rv %d, errno %d\n", rv, errno);
return 1;
}
| linux-master | tools/testing/selftests/exec/recursion-depth.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Google, Inc.
*
* Selftests for execveat(2).
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE /* to get O_PATH, AT_EMPTY_PATH */
#endif
#include <sys/sendfile.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "../kselftest.h"
static char longpath[2 * PATH_MAX] = "";
static char *envp[] = { "IN_TEST=yes", NULL, NULL };
static char *argv[] = { "execveat", "99", NULL };
static int execveat_(int fd, const char *path, char **argv, char **envp,
int flags)
{
#ifdef __NR_execveat
return syscall(__NR_execveat, fd, path, argv, envp, flags);
#else
errno = ENOSYS;
return -1;
#endif
}
#define check_execveat_fail(fd, path, flags, errno) \
_check_execveat_fail(fd, path, flags, errno, #errno)
static int _check_execveat_fail(int fd, const char *path, int flags,
int expected_errno, const char *errno_str)
{
int rc;
errno = 0;
printf("Check failure of execveat(%d, '%s', %d) with %s... ",
fd, path?:"(null)", flags, errno_str);
rc = execveat_(fd, path, argv, envp, flags);
if (rc > 0) {
printf("[FAIL] (unexpected success from execveat(2))\n");
return 1;
}
if (errno != expected_errno) {
printf("[FAIL] (expected errno %d (%s) not %d (%s)\n",
expected_errno, strerror(expected_errno),
errno, strerror(errno));
return 1;
}
printf("[OK]\n");
return 0;
}
static int check_execveat_invoked_rc(int fd, const char *path, int flags,
int expected_rc, int expected_rc2)
{
int status;
int rc;
pid_t child;
int pathlen = path ? strlen(path) : 0;
if (pathlen > 40)
printf("Check success of execveat(%d, '%.20s...%s', %d)... ",
fd, path, (path + pathlen - 20), flags);
else
printf("Check success of execveat(%d, '%s', %d)... ",
fd, path?:"(null)", flags);
child = fork();
if (child < 0) {
printf("[FAIL] (fork() failed)\n");
return 1;
}
if (child == 0) {
/* Child: do execveat(). */
rc = execveat_(fd, path, argv, envp, flags);
printf("[FAIL]: execveat() failed, rc=%d errno=%d (%s)\n",
rc, errno, strerror(errno));
exit(1); /* should not reach here */
}
/* Parent: wait for & check child's exit status. */
rc = waitpid(child, &status, 0);
if (rc != child) {
printf("[FAIL] (waitpid(%d,...) returned %d)\n", child, rc);
return 1;
}
if (!WIFEXITED(status)) {
printf("[FAIL] (child %d did not exit cleanly, status=%08x)\n",
child, status);
return 1;
}
if ((WEXITSTATUS(status) != expected_rc) &&
(WEXITSTATUS(status) != expected_rc2)) {
printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
child, WEXITSTATUS(status), expected_rc, expected_rc2);
return 1;
}
printf("[OK]\n");
return 0;
}
static int check_execveat(int fd, const char *path, int flags)
{
return check_execveat_invoked_rc(fd, path, flags, 99, 99);
}
static char *concat(const char *left, const char *right)
{
char *result = malloc(strlen(left) + strlen(right) + 1);
strcpy(result, left);
strcat(result, right);
return result;
}
static int open_or_die(const char *filename, int flags)
{
int fd = open(filename, flags);
if (fd < 0) {
printf("Failed to open '%s'; "
"check prerequisites are available\n", filename);
exit(1);
}
return fd;
}
static void exe_cp(const char *src, const char *dest)
{
int in_fd = open_or_die(src, O_RDONLY);
int out_fd = open(dest, O_RDWR|O_CREAT|O_TRUNC, 0755);
struct stat info;
fstat(in_fd, &info);
sendfile(out_fd, in_fd, NULL, info.st_size);
close(in_fd);
close(out_fd);
}
#define XX_DIR_LEN 200
static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
{
int fail = 0;
int ii, count, len;
char longname[XX_DIR_LEN + 1];
int fd;
if (*longpath == '\0') {
/* Create a filename close to PATH_MAX in length */
char *cwd = getcwd(NULL, 0);
if (!cwd) {
printf("Failed to getcwd(), errno=%d (%s)\n",
errno, strerror(errno));
return 2;
}
strcpy(longpath, cwd);
strcat(longpath, "/");
memset(longname, 'x', XX_DIR_LEN - 1);
longname[XX_DIR_LEN - 1] = '/';
longname[XX_DIR_LEN] = '\0';
count = (PATH_MAX - 3 - strlen(cwd)) / XX_DIR_LEN;
for (ii = 0; ii < count; ii++) {
strcat(longpath, longname);
mkdir(longpath, 0755);
}
len = (PATH_MAX - 3 - strlen(cwd)) - (count * XX_DIR_LEN);
if (len <= 0)
len = 1;
memset(longname, 'y', len);
longname[len] = '\0';
strcat(longpath, longname);
free(cwd);
}
exe_cp(src, longpath);
/*
* Execute as a pre-opened file descriptor, which works whether this is
* a script or not (because the interpreter sees a filename like
* "/dev/fd/20").
*/
fd = open(longpath, O_RDONLY);
if (fd > 0) {
printf("Invoke copy of '%s' via filename of length %zu:\n",
src, strlen(longpath));
fail += check_execveat(fd, "", AT_EMPTY_PATH);
} else {
printf("Failed to open length %zu filename, errno=%d (%s)\n",
strlen(longpath), errno, strerror(errno));
fail++;
}
/*
* Execute as a long pathname relative to "/". If this is a script,
* the interpreter will launch but fail to open the script because its
* name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
*
* The failure code is usually 127 (POSIX: "If a command is not found,
* the exit status shall be 127."), but some systems give 126 (POSIX:
* "If the command name is found, but it is not an executable utility,
* the exit status shall be 126."), so allow either.
*/
if (is_script)
fail += check_execveat_invoked_rc(root_dfd, longpath + 1, 0,
127, 126);
else
fail += check_execveat(root_dfd, longpath + 1, 0);
return fail;
}
static int run_tests(void)
{
int fail = 0;
char *fullname = realpath("execveat", NULL);
char *fullname_script = realpath("script", NULL);
char *fullname_symlink = concat(fullname, ".symlink");
int subdir_dfd = open_or_die("subdir", O_DIRECTORY|O_RDONLY);
int subdir_dfd_ephemeral = open_or_die("subdir.ephemeral",
O_DIRECTORY|O_RDONLY);
int dot_dfd = open_or_die(".", O_DIRECTORY|O_RDONLY);
int root_dfd = open_or_die("/", O_DIRECTORY|O_RDONLY);
int dot_dfd_path = open_or_die(".", O_DIRECTORY|O_RDONLY|O_PATH);
int dot_dfd_cloexec = open_or_die(".", O_DIRECTORY|O_RDONLY|O_CLOEXEC);
int fd = open_or_die("execveat", O_RDONLY);
int fd_path = open_or_die("execveat", O_RDONLY|O_PATH);
int fd_symlink = open_or_die("execveat.symlink", O_RDONLY);
int fd_denatured = open_or_die("execveat.denatured", O_RDONLY);
int fd_denatured_path = open_or_die("execveat.denatured",
O_RDONLY|O_PATH);
int fd_script = open_or_die("script", O_RDONLY);
int fd_ephemeral = open_or_die("execveat.ephemeral", O_RDONLY);
int fd_ephemeral_path = open_or_die("execveat.path.ephemeral",
O_RDONLY|O_PATH);
int fd_script_ephemeral = open_or_die("script.ephemeral", O_RDONLY);
int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
/* Check if we have execveat at all, and bail early if not */
errno = 0;
execveat_(-1, NULL, NULL, NULL, 0);
if (errno == ENOSYS) {
ksft_exit_skip(
"ENOSYS calling execveat - no kernel support?\n");
}
/* Change file position to confirm it doesn't affect anything */
lseek(fd, 10, SEEK_SET);
/* Normal executable file: */
/* dfd + path */
fail += check_execveat(subdir_dfd, "../execveat", 0);
fail += check_execveat(dot_dfd, "execveat", 0);
fail += check_execveat(dot_dfd_path, "execveat", 0);
/* absolute path */
fail += check_execveat(AT_FDCWD, fullname, 0);
/* absolute path with nonsense dfd */
fail += check_execveat(99, fullname, 0);
/* fd + no path */
fail += check_execveat(fd, "", AT_EMPTY_PATH);
/* O_CLOEXEC fd + no path */
fail += check_execveat(fd_cloexec, "", AT_EMPTY_PATH);
/* O_PATH fd */
fail += check_execveat(fd_path, "", AT_EMPTY_PATH);
/* Mess with executable file that's already open: */
/* fd + no path to a file that's been renamed */
rename("execveat.ephemeral", "execveat.moved");
fail += check_execveat(fd_ephemeral, "", AT_EMPTY_PATH);
/* fd + no path to a file that's been deleted */
unlink("execveat.moved"); /* remove the file now fd open */
fail += check_execveat(fd_ephemeral, "", AT_EMPTY_PATH);
/* Mess with executable file that's already open with O_PATH */
/* fd + no path to a file that's been deleted */
unlink("execveat.path.ephemeral");
fail += check_execveat(fd_ephemeral_path, "", AT_EMPTY_PATH);
/* Invalid argument failures */
fail += check_execveat_fail(fd, "", 0, ENOENT);
fail += check_execveat_fail(fd, NULL, AT_EMPTY_PATH, EFAULT);
/* Symlink to executable file: */
/* dfd + path */
fail += check_execveat(dot_dfd, "execveat.symlink", 0);
fail += check_execveat(dot_dfd_path, "execveat.symlink", 0);
/* absolute path */
fail += check_execveat(AT_FDCWD, fullname_symlink, 0);
/* fd + no path, even with AT_SYMLINK_NOFOLLOW (already followed) */
fail += check_execveat(fd_symlink, "", AT_EMPTY_PATH);
fail += check_execveat(fd_symlink, "",
AT_EMPTY_PATH|AT_SYMLINK_NOFOLLOW);
/* Symlink fails when AT_SYMLINK_NOFOLLOW set: */
/* dfd + path */
fail += check_execveat_fail(dot_dfd, "execveat.symlink",
AT_SYMLINK_NOFOLLOW, ELOOP);
fail += check_execveat_fail(dot_dfd_path, "execveat.symlink",
AT_SYMLINK_NOFOLLOW, ELOOP);
/* absolute path */
fail += check_execveat_fail(AT_FDCWD, fullname_symlink,
AT_SYMLINK_NOFOLLOW, ELOOP);
/* Non-regular file failure */
fail += check_execveat_fail(dot_dfd, "pipe", 0, EACCES);
unlink("pipe");
/* Shell script wrapping executable file: */
/* dfd + path */
fail += check_execveat(subdir_dfd, "../script", 0);
fail += check_execveat(dot_dfd, "script", 0);
fail += check_execveat(dot_dfd_path, "script", 0);
/* absolute path */
fail += check_execveat(AT_FDCWD, fullname_script, 0);
/* fd + no path */
fail += check_execveat(fd_script, "", AT_EMPTY_PATH);
fail += check_execveat(fd_script, "",
AT_EMPTY_PATH|AT_SYMLINK_NOFOLLOW);
/* O_CLOEXEC fd fails for a script (as script file inaccessible) */
fail += check_execveat_fail(fd_script_cloexec, "", AT_EMPTY_PATH,
ENOENT);
fail += check_execveat_fail(dot_dfd_cloexec, "script", 0, ENOENT);
/* Mess with script file that's already open: */
/* fd + no path to a file that's been renamed */
rename("script.ephemeral", "script.moved");
fail += check_execveat(fd_script_ephemeral, "", AT_EMPTY_PATH);
/* fd + no path to a file that's been deleted */
unlink("script.moved"); /* remove the file while fd open */
fail += check_execveat(fd_script_ephemeral, "", AT_EMPTY_PATH);
/* Rename a subdirectory in the path: */
rename("subdir.ephemeral", "subdir.moved");
fail += check_execveat(subdir_dfd_ephemeral, "../script", 0);
fail += check_execveat(subdir_dfd_ephemeral, "script", 0);
/* Remove the subdir and its contents */
unlink("subdir.moved/script");
unlink("subdir.moved");
/* Shell loads via deleted subdir OK because name starts with .. */
fail += check_execveat(subdir_dfd_ephemeral, "../script", 0);
fail += check_execveat_fail(subdir_dfd_ephemeral, "script", 0, ENOENT);
/* Flag values other than AT_SYMLINK_NOFOLLOW => EINVAL */
fail += check_execveat_fail(dot_dfd, "execveat", 0xFFFF, EINVAL);
/* Invalid path => ENOENT */
fail += check_execveat_fail(dot_dfd, "no-such-file", 0, ENOENT);
fail += check_execveat_fail(dot_dfd_path, "no-such-file", 0, ENOENT);
fail += check_execveat_fail(AT_FDCWD, "no-such-file", 0, ENOENT);
/* Attempt to execute directory => EACCES */
fail += check_execveat_fail(dot_dfd, "", AT_EMPTY_PATH, EACCES);
/* Attempt to execute non-executable => EACCES */
fail += check_execveat_fail(dot_dfd, "Makefile", 0, EACCES);
fail += check_execveat_fail(fd_denatured, "", AT_EMPTY_PATH, EACCES);
fail += check_execveat_fail(fd_denatured_path, "", AT_EMPTY_PATH,
EACCES);
/* Attempt to execute nonsense FD => EBADF */
fail += check_execveat_fail(99, "", AT_EMPTY_PATH, EBADF);
fail += check_execveat_fail(99, "execveat", 0, EBADF);
/* Attempt to execute relative to non-directory => ENOTDIR */
fail += check_execveat_fail(fd, "execveat", 0, ENOTDIR);
fail += check_execveat_pathmax(root_dfd, "execveat", 0);
fail += check_execveat_pathmax(root_dfd, "script", 1);
return fail;
}
static void prerequisites(void)
{
int fd;
const char *script = "#!/bin/sh\nexit $*\n";
/* Create ephemeral copies of files */
exe_cp("execveat", "execveat.ephemeral");
exe_cp("execveat", "execveat.path.ephemeral");
exe_cp("script", "script.ephemeral");
mkdir("subdir.ephemeral", 0755);
fd = open("subdir.ephemeral/script", O_RDWR|O_CREAT|O_TRUNC, 0755);
write(fd, script, strlen(script));
close(fd);
mkfifo("pipe", 0755);
}
int main(int argc, char **argv)
{
int ii;
int rc;
const char *verbose = getenv("VERBOSE");
if (argc >= 2) {
/* If we are invoked with an argument, don't run tests. */
const char *in_test = getenv("IN_TEST");
if (verbose) {
printf(" invoked with:");
for (ii = 0; ii < argc; ii++)
printf(" [%d]='%s'", ii, argv[ii]);
printf("\n");
}
/* Check expected environment transferred. */
if (!in_test || strcmp(in_test, "yes") != 0) {
printf("[FAIL] (no IN_TEST=yes in env)\n");
return 1;
}
/* Use the final argument as an exit code. */
rc = atoi(argv[argc - 1]);
fflush(stdout);
} else {
prerequisites();
if (verbose)
envp[1] = "VERBOSE=1";
rc = run_tests();
if (rc > 0)
printf("%d tests failed\n", rc);
}
return rc;
}
| linux-master | tools/testing/selftests/exec/execveat.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Test that empty argvs are swapped out for a single empty string. */
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "../kselftest.h"
#define FORK(exec) \
do { \
pid = fork(); \
if (pid == 0) { \
/* Child */ \
exec; /* Some kind of exec */ \
perror("# " #exec); \
return 1; \
} \
check_result(pid, #exec); \
} while (0)
void check_result(pid_t pid, const char *msg)
{
int wstatus;
if (pid == (pid_t)-1) {
perror("# fork");
ksft_test_result_fail("fork failed: %s\n", msg);
return;
}
if (waitpid(pid, &wstatus, 0) < 0) {
perror("# waitpid");
ksft_test_result_fail("waitpid failed: %s\n", msg);
return;
}
if (!WIFEXITED(wstatus)) {
ksft_test_result_fail("child did not exit: %s\n", msg);
return;
}
if (WEXITSTATUS(wstatus) != 0) {
ksft_test_result_fail("non-zero exit: %s\n", msg);
return;
}
ksft_test_result_pass("%s\n", msg);
}
int main(int argc, char *argv[], char *envp[])
{
pid_t pid;
static char * const args[] = { NULL };
static char * const str[] = { "", NULL };
/* argc counting checks */
if (argc < 1) {
fprintf(stderr, "# FAIL: saw argc == 0 (old kernel?)\n");
return 1;
}
if (argc != 1) {
fprintf(stderr, "# FAIL: unknown argc (%d)\n", argc);
return 1;
}
if (argv[0][0] == '\0') {
/* Good, we found a NULL terminated string at argv[0]! */
return 0;
}
/* Test runner. */
ksft_print_header();
ksft_set_plan(5);
FORK(execve(argv[0], str, NULL));
FORK(execve(argv[0], NULL, NULL));
FORK(execve(argv[0], NULL, envp));
FORK(execve(argv[0], args, NULL));
FORK(execve(argv[0], args, envp));
ksft_exit(ksft_cnt.ksft_pass == ksft_plan);
}
| linux-master | tools/testing/selftests/exec/null-argv.c |
// SPDX-License-Identifier: GPL-2.0-only
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
struct Statistics {
unsigned long long load_address;
unsigned long long alignment;
};
int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
{
struct Statistics *stats = (struct Statistics *) data;
int i;
if (info->dlpi_name != NULL && info->dlpi_name[0] != '\0') {
// Ignore headers from other than the executable.
return 2;
}
stats->load_address = (unsigned long long) info->dlpi_addr;
stats->alignment = 0;
for (i = 0; i < info->dlpi_phnum; i++) {
if (info->dlpi_phdr[i].p_type != PT_LOAD)
continue;
if (info->dlpi_phdr[i].p_align > stats->alignment)
stats->alignment = info->dlpi_phdr[i].p_align;
}
return 1; // Terminate dl_iterate_phdr.
}
int main(int argc, char **argv)
{
struct Statistics extracted;
unsigned long long misalign;
int ret;
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
if (ret != 1) {
fprintf(stderr, "FAILED\n");
return 1;
}
if (extracted.alignment == 0) {
fprintf(stderr, "No alignment found\n");
return 1;
} else if (extracted.alignment & (extracted.alignment - 1)) {
fprintf(stderr, "Alignment is not a power of 2\n");
return 1;
}
misalign = extracted.load_address & (extracted.alignment - 1);
if (misalign) {
printf("alignment = %llu, load_address = %llu\n",
extracted.alignment, extracted.load_address);
fprintf(stderr, "FAILED\n");
return 1;
}
fprintf(stderr, "PASS\n");
return 0;
}
| linux-master | tools/testing/selftests/exec/load_address.c |
// SPDX-License-Identifier: GPL-2.0+
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include "../kselftest_harness.h"
/* Remove a file, ignoring the result if it didn't exist. */
void rm(struct __test_metadata *_metadata, const char *pathname,
int is_dir)
{
int rc;
if (is_dir)
rc = rmdir(pathname);
else
rc = unlink(pathname);
if (rc < 0) {
ASSERT_EQ(errno, ENOENT) {
TH_LOG("Not ENOENT: %s", pathname);
}
} else {
ASSERT_EQ(rc, 0) {
TH_LOG("Failed to remove: %s", pathname);
}
}
}
FIXTURE(file) {
char *pathname;
int is_dir;
};
FIXTURE_VARIANT(file)
{
const char *name;
int expected;
int is_dir;
void (*setup)(struct __test_metadata *_metadata,
FIXTURE_DATA(file) *self,
const FIXTURE_VARIANT(file) *variant);
int major, minor, mode; /* for mknod() */
};
void setup_link(struct __test_metadata *_metadata,
FIXTURE_DATA(file) *self,
const FIXTURE_VARIANT(file) *variant)
{
const char * const paths[] = {
"/bin/true",
"/usr/bin/true",
};
int i;
for (i = 0; i < ARRAY_SIZE(paths); i++) {
if (access(paths[i], X_OK) == 0) {
ASSERT_EQ(symlink(paths[i], self->pathname), 0);
return;
}
}
ASSERT_EQ(1, 0) {
TH_LOG("Could not find viable 'true' binary");
}
}
FIXTURE_VARIANT_ADD(file, S_IFLNK)
{
.name = "S_IFLNK",
.expected = ELOOP,
.setup = setup_link,
};
void setup_dir(struct __test_metadata *_metadata,
FIXTURE_DATA(file) *self,
const FIXTURE_VARIANT(file) *variant)
{
ASSERT_EQ(mkdir(self->pathname, 0755), 0);
}
FIXTURE_VARIANT_ADD(file, S_IFDIR)
{
.name = "S_IFDIR",
.is_dir = 1,
.expected = EACCES,
.setup = setup_dir,
};
void setup_node(struct __test_metadata *_metadata,
FIXTURE_DATA(file) *self,
const FIXTURE_VARIANT(file) *variant)
{
dev_t dev;
int rc;
dev = makedev(variant->major, variant->minor);
rc = mknod(self->pathname, 0755 | variant->mode, dev);
ASSERT_EQ(rc, 0) {
if (errno == EPERM)
SKIP(return, "Please run as root; cannot mknod(%s)",
variant->name);
}
}
FIXTURE_VARIANT_ADD(file, S_IFBLK)
{
.name = "S_IFBLK",
.expected = EACCES,
.setup = setup_node,
/* /dev/loop0 */
.major = 7,
.minor = 0,
.mode = S_IFBLK,
};
FIXTURE_VARIANT_ADD(file, S_IFCHR)
{
.name = "S_IFCHR",
.expected = EACCES,
.setup = setup_node,
/* /dev/zero */
.major = 1,
.minor = 5,
.mode = S_IFCHR,
};
void setup_fifo(struct __test_metadata *_metadata,
FIXTURE_DATA(file) *self,
const FIXTURE_VARIANT(file) *variant)
{
ASSERT_EQ(mkfifo(self->pathname, 0755), 0);
}
FIXTURE_VARIANT_ADD(file, S_IFIFO)
{
.name = "S_IFIFO",
.expected = EACCES,
.setup = setup_fifo,
};
FIXTURE_SETUP(file)
{
ASSERT_GT(asprintf(&self->pathname, "%s.test", variant->name), 6);
self->is_dir = variant->is_dir;
rm(_metadata, self->pathname, variant->is_dir);
variant->setup(_metadata, self, variant);
}
FIXTURE_TEARDOWN(file)
{
rm(_metadata, self->pathname, self->is_dir);
}
TEST_F(file, exec_errno)
{
char * const argv[2] = { (char * const)self->pathname, NULL };
EXPECT_LT(execv(argv[0], argv), 0);
EXPECT_EQ(errno, variant->expected);
}
/* S_IFSOCK */
FIXTURE(sock)
{
int fd;
};
FIXTURE_SETUP(sock)
{
self->fd = socket(AF_INET, SOCK_STREAM, 0);
ASSERT_GE(self->fd, 0);
}
FIXTURE_TEARDOWN(sock)
{
if (self->fd >= 0)
ASSERT_EQ(close(self->fd), 0);
}
TEST_F(sock, exec_errno)
{
char * const argv[2] = { " magic socket ", NULL };
char * const envp[1] = { NULL };
EXPECT_LT(fexecve(self->fd, argv, envp), 0);
EXPECT_EQ(errno, EACCES);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/exec/non-regular.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
int main(int argc, char **argv)
{
const char *path;
char buf[4];
int fd, rc;
if (argc < 2) {
fprintf(stderr, "usage: %s <path>\n", argv[0]);
return EXIT_FAILURE;
}
path = argv[1];
/* create a test variable */
fd = open(path, O_RDWR | O_CREAT, 0600);
if (fd < 0) {
perror("open(O_WRONLY)");
return EXIT_FAILURE;
}
rc = read(fd, buf, sizeof(buf));
if (rc != 0) {
fprintf(stderr, "Reading a new var should return EOF\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/efivarfs/create-read.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/fs.h>
static int set_immutable(const char *path, int immutable)
{
unsigned int flags;
int fd;
int rc;
int error;
fd = open(path, O_RDONLY);
if (fd < 0)
return fd;
rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
if (rc < 0) {
error = errno;
close(fd);
errno = error;
return rc;
}
if (immutable)
flags |= FS_IMMUTABLE_FL;
else
flags &= ~FS_IMMUTABLE_FL;
rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
error = errno;
close(fd);
errno = error;
return rc;
}
static int get_immutable(const char *path)
{
unsigned int flags;
int fd;
int rc;
int error;
fd = open(path, O_RDONLY);
if (fd < 0)
return fd;
rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
if (rc < 0) {
error = errno;
close(fd);
errno = error;
return rc;
}
close(fd);
if (flags & FS_IMMUTABLE_FL)
return 1;
return 0;
}
int main(int argc, char **argv)
{
const char *path;
char buf[5];
int fd, rc;
if (argc < 2) {
fprintf(stderr, "usage: %s <path>\n", argv[0]);
return EXIT_FAILURE;
}
path = argv[1];
/* attributes: EFI_VARIABLE_NON_VOLATILE |
* EFI_VARIABLE_BOOTSERVICE_ACCESS |
* EFI_VARIABLE_RUNTIME_ACCESS
*/
*(uint32_t *)buf = 0x7;
buf[4] = 0;
/* create a test variable */
fd = open(path, O_WRONLY | O_CREAT, 0600);
if (fd < 0) {
perror("open(O_WRONLY)");
return EXIT_FAILURE;
}
rc = write(fd, buf, sizeof(buf));
if (rc != sizeof(buf)) {
perror("write");
return EXIT_FAILURE;
}
close(fd);
rc = get_immutable(path);
if (rc < 0) {
perror("ioctl(FS_IOC_GETFLAGS)");
return EXIT_FAILURE;
} else if (rc) {
rc = set_immutable(path, 0);
if (rc < 0) {
perror("ioctl(FS_IOC_SETFLAGS)");
return EXIT_FAILURE;
}
}
fd = open(path, O_RDONLY);
if (fd < 0) {
perror("open");
return EXIT_FAILURE;
}
if (unlink(path) < 0) {
perror("unlink");
return EXIT_FAILURE;
}
rc = read(fd, buf, sizeof(buf));
if (rc > 0) {
fprintf(stderr, "reading from an unlinked variable "
"shouldn't be possible\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/efivarfs/open-unlink.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/vfs.h>
#include <sys/statvfs.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <grp.h>
#include <stdbool.h>
#include <stdarg.h>
#ifndef CLONE_NEWNS
# define CLONE_NEWNS 0x00020000
#endif
#ifndef CLONE_NEWUTS
# define CLONE_NEWUTS 0x04000000
#endif
#ifndef CLONE_NEWIPC
# define CLONE_NEWIPC 0x08000000
#endif
#ifndef CLONE_NEWNET
# define CLONE_NEWNET 0x40000000
#endif
#ifndef CLONE_NEWUSER
# define CLONE_NEWUSER 0x10000000
#endif
#ifndef CLONE_NEWPID
# define CLONE_NEWPID 0x20000000
#endif
#ifndef MS_REC
# define MS_REC 16384
#endif
#ifndef MS_RELATIME
# define MS_RELATIME (1 << 21)
#endif
#ifndef MS_STRICTATIME
# define MS_STRICTATIME (1 << 24)
#endif
static void die(char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(EXIT_FAILURE);
}
static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
{
char buf[4096];
int fd;
ssize_t written;
int buf_len;
buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
if (buf_len < 0) {
die("vsnprintf failed: %s\n",
strerror(errno));
}
if (buf_len >= sizeof(buf)) {
die("vsnprintf output truncated\n");
}
fd = open(filename, O_WRONLY);
if (fd < 0) {
if ((errno == ENOENT) && enoent_ok)
return;
die("open of %s failed: %s\n",
filename, strerror(errno));
}
written = write(fd, buf, buf_len);
if (written != buf_len) {
if (written >= 0) {
die("short write to %s\n", filename);
} else {
die("write to %s failed: %s\n",
filename, strerror(errno));
}
}
if (close(fd) != 0) {
die("close of %s failed: %s\n",
filename, strerror(errno));
}
}
static void maybe_write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(true, filename, fmt, ap);
va_end(ap);
}
static void write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(false, filename, fmt, ap);
va_end(ap);
}
static int read_mnt_flags(const char *path)
{
int ret;
struct statvfs stat;
int mnt_flags;
ret = statvfs(path, &stat);
if (ret != 0) {
die("statvfs of %s failed: %s\n",
path, strerror(errno));
}
if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
ST_SYNCHRONOUS | ST_MANDLOCK)) {
die("Unrecognized mount flags\n");
}
mnt_flags = 0;
if (stat.f_flag & ST_RDONLY)
mnt_flags |= MS_RDONLY;
if (stat.f_flag & ST_NOSUID)
mnt_flags |= MS_NOSUID;
if (stat.f_flag & ST_NODEV)
mnt_flags |= MS_NODEV;
if (stat.f_flag & ST_NOEXEC)
mnt_flags |= MS_NOEXEC;
if (stat.f_flag & ST_NOATIME)
mnt_flags |= MS_NOATIME;
if (stat.f_flag & ST_NODIRATIME)
mnt_flags |= MS_NODIRATIME;
if (stat.f_flag & ST_RELATIME)
mnt_flags |= MS_RELATIME;
if (stat.f_flag & ST_SYNCHRONOUS)
mnt_flags |= MS_SYNCHRONOUS;
if (stat.f_flag & ST_MANDLOCK)
mnt_flags |= ST_MANDLOCK;
return mnt_flags;
}
static void create_and_enter_userns(void)
{
uid_t uid;
gid_t gid;
uid = getuid();
gid = getgid();
if (unshare(CLONE_NEWUSER) !=0) {
die("unshare(CLONE_NEWUSER) failed: %s\n",
strerror(errno));
}
maybe_write_file("/proc/self/setgroups", "deny");
write_file("/proc/self/uid_map", "0 %d 1", uid);
write_file("/proc/self/gid_map", "0 %d 1", gid);
if (setgid(0) != 0) {
die ("setgid(0) failed %s\n",
strerror(errno));
}
if (setuid(0) != 0) {
die("setuid(0) failed %s\n",
strerror(errno));
}
}
static
bool test_unpriv_remount(const char *fstype, const char *mount_options,
int mount_flags, int remount_flags, int invalid_flags)
{
pid_t child;
child = fork();
if (child == -1) {
die("fork failed: %s\n",
strerror(errno));
}
if (child != 0) { /* parent */
pid_t pid;
int status;
pid = waitpid(child, &status, 0);
if (pid == -1) {
die("waitpid failed: %s\n",
strerror(errno));
}
if (pid != child) {
die("waited for %d got %d\n",
child, pid);
}
if (!WIFEXITED(status)) {
die("child did not terminate cleanly\n");
}
return WEXITSTATUS(status) == EXIT_SUCCESS;
}
create_and_enter_userns();
if (unshare(CLONE_NEWNS) != 0) {
die("unshare(CLONE_NEWNS) failed: %s\n",
strerror(errno));
}
if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
die("mount of %s with options '%s' on /tmp failed: %s\n",
fstype,
mount_options? mount_options : "",
strerror(errno));
}
create_and_enter_userns();
if (unshare(CLONE_NEWNS) != 0) {
die("unshare(CLONE_NEWNS) failed: %s\n",
strerror(errno));
}
if (mount("/tmp", "/tmp", "none",
MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
/* system("cat /proc/self/mounts"); */
die("remount of /tmp failed: %s\n",
strerror(errno));
}
if (mount("/tmp", "/tmp", "none",
MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
/* system("cat /proc/self/mounts"); */
die("remount of /tmp with invalid flags "
"succeeded unexpectedly\n");
}
exit(EXIT_SUCCESS);
}
static bool test_unpriv_remount_simple(int mount_flags)
{
return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
}
static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
{
return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
invalid_flags);
}
static bool test_priv_mount_unpriv_remount(void)
{
pid_t child;
int ret;
const char *orig_path = "/dev";
const char *dest_path = "/tmp";
int orig_mnt_flags, remount_mnt_flags;
child = fork();
if (child == -1) {
die("fork failed: %s\n",
strerror(errno));
}
if (child != 0) { /* parent */
pid_t pid;
int status;
pid = waitpid(child, &status, 0);
if (pid == -1) {
die("waitpid failed: %s\n",
strerror(errno));
}
if (pid != child) {
die("waited for %d got %d\n",
child, pid);
}
if (!WIFEXITED(status)) {
die("child did not terminate cleanly\n");
}
return WEXITSTATUS(status) == EXIT_SUCCESS;
}
orig_mnt_flags = read_mnt_flags(orig_path);
create_and_enter_userns();
ret = unshare(CLONE_NEWNS);
if (ret != 0) {
die("unshare(CLONE_NEWNS) failed: %s\n",
strerror(errno));
}
ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
if (ret != 0) {
die("recursive bind mount of %s onto %s failed: %s\n",
orig_path, dest_path, strerror(errno));
}
ret = mount(dest_path, dest_path, "none",
MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
if (ret != 0) {
/* system("cat /proc/self/mounts"); */
die("remount of /tmp failed: %s\n",
strerror(errno));
}
remount_mnt_flags = read_mnt_flags(dest_path);
if (orig_mnt_flags != remount_mnt_flags) {
die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
dest_path, orig_path);
}
exit(EXIT_SUCCESS);
}
int main(int argc, char **argv)
{
if (!test_unpriv_remount_simple(MS_RDONLY)) {
die("MS_RDONLY malfunctions\n");
}
if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
die("MS_NODEV malfunctions\n");
}
if (!test_unpriv_remount_simple(MS_NOSUID)) {
die("MS_NOSUID malfunctions\n");
}
if (!test_unpriv_remount_simple(MS_NOEXEC)) {
die("MS_NOEXEC malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_RELATIME,
MS_NOATIME))
{
die("MS_RELATIME malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_STRICTATIME,
MS_NOATIME))
{
die("MS_STRICTATIME malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_NOATIME,
MS_STRICTATIME))
{
die("MS_NOATIME malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
MS_NOATIME))
{
die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
MS_NOATIME))
{
die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
}
if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
MS_STRICTATIME))
{
die("MS_NOATIME|MS_DIRATIME malfunctions\n");
}
if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
{
die("Default atime malfunctions\n");
}
if (!test_priv_mount_unpriv_remount()) {
die("Mount flags unexpectedly changed after remount\n");
}
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/mount/unprivileged-remount-test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <unistd.h>
#ifndef MS_NOSYMFOLLOW
# define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */
#endif
#ifndef ST_NOSYMFOLLOW
# define ST_NOSYMFOLLOW 0x2000 /* Do not follow symlinks */
#endif
#define DATA "/tmp/data"
#define LINK "/tmp/symlink"
#define TMP "/tmp"
static void die(char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(EXIT_FAILURE);
}
static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt,
va_list ap)
{
ssize_t written;
char buf[4096];
int buf_len;
int fd;
buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
if (buf_len < 0)
die("vsnprintf failed: %s\n", strerror(errno));
if (buf_len >= sizeof(buf))
die("vsnprintf output truncated\n");
fd = open(filename, O_WRONLY);
if (fd < 0) {
if ((errno == ENOENT) && enoent_ok)
return;
die("open of %s failed: %s\n", filename, strerror(errno));
}
written = write(fd, buf, buf_len);
if (written != buf_len) {
if (written >= 0) {
die("short write to %s\n", filename);
} else {
die("write to %s failed: %s\n",
filename, strerror(errno));
}
}
if (close(fd) != 0)
die("close of %s failed: %s\n", filename, strerror(errno));
}
static void maybe_write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(true, filename, fmt, ap);
va_end(ap);
}
static void write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(false, filename, fmt, ap);
va_end(ap);
}
static void create_and_enter_ns(void)
{
uid_t uid = getuid();
gid_t gid = getgid();
if (unshare(CLONE_NEWUSER) != 0)
die("unshare(CLONE_NEWUSER) failed: %s\n", strerror(errno));
maybe_write_file("/proc/self/setgroups", "deny");
write_file("/proc/self/uid_map", "0 %d 1", uid);
write_file("/proc/self/gid_map", "0 %d 1", gid);
if (setgid(0) != 0)
die("setgid(0) failed %s\n", strerror(errno));
if (setuid(0) != 0)
die("setuid(0) failed %s\n", strerror(errno));
if (unshare(CLONE_NEWNS) != 0)
die("unshare(CLONE_NEWNS) failed: %s\n", strerror(errno));
}
static void setup_symlink(void)
{
int data, err;
data = creat(DATA, O_RDWR);
if (data < 0)
die("creat failed: %s\n", strerror(errno));
err = symlink(DATA, LINK);
if (err < 0)
die("symlink failed: %s\n", strerror(errno));
if (close(data) != 0)
die("close of %s failed: %s\n", DATA, strerror(errno));
}
static void test_link_traversal(bool nosymfollow)
{
int link;
link = open(LINK, 0, O_RDWR);
if (nosymfollow) {
if ((link != -1 || errno != ELOOP)) {
die("link traversal unexpected result: %d, %s\n",
link, strerror(errno));
}
} else {
if (link < 0)
die("link traversal failed: %s\n", strerror(errno));
if (close(link) != 0)
die("close of link failed: %s\n", strerror(errno));
}
}
static void test_readlink(void)
{
char buf[4096];
ssize_t ret;
bzero(buf, sizeof(buf));
ret = readlink(LINK, buf, sizeof(buf));
if (ret < 0)
die("readlink failed: %s\n", strerror(errno));
if (strcmp(buf, DATA) != 0)
die("readlink strcmp failed: '%s' '%s'\n", buf, DATA);
}
static void test_realpath(void)
{
char *path = realpath(LINK, NULL);
if (!path)
die("realpath failed: %s\n", strerror(errno));
if (strcmp(path, DATA) != 0)
die("realpath strcmp failed\n");
free(path);
}
static void test_statfs(bool nosymfollow)
{
struct statfs buf;
int ret;
ret = statfs(TMP, &buf);
if (ret)
die("statfs failed: %s\n", strerror(errno));
if (nosymfollow) {
if ((buf.f_flags & ST_NOSYMFOLLOW) == 0)
die("ST_NOSYMFOLLOW not set on %s\n", TMP);
} else {
if ((buf.f_flags & ST_NOSYMFOLLOW) != 0)
die("ST_NOSYMFOLLOW set on %s\n", TMP);
}
}
static void run_tests(bool nosymfollow)
{
test_link_traversal(nosymfollow);
test_readlink();
test_realpath();
test_statfs(nosymfollow);
}
int main(int argc, char **argv)
{
create_and_enter_ns();
if (mount("testing", TMP, "ramfs", 0, NULL) != 0)
die("mount failed: %s\n", strerror(errno));
setup_symlink();
run_tests(false);
if (mount("testing", TMP, "ramfs", MS_REMOUNT|MS_NOSYMFOLLOW, NULL) != 0)
die("remount failed: %s\n", strerror(errno));
run_tests(true);
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/mount/nosymfollow-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* selftest for sparc64's privileged ADI driver
*
* Author: Tom Hromatka <[email protected]>
*/
#include <linux/kernel.h>
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "../../kselftest.h"
#define DEBUG_LEVEL_1_BIT (0x0001)
#define DEBUG_LEVEL_2_BIT (0x0002)
#define DEBUG_LEVEL_3_BIT (0x0004)
#define DEBUG_LEVEL_4_BIT (0x0008)
#define DEBUG_TIMING_BIT (0x1000)
/* bit mask of enabled bits to print */
#define DEBUG 0x0001
#define DEBUG_PRINT_L1(...) debug_print(DEBUG_LEVEL_1_BIT, __VA_ARGS__)
#define DEBUG_PRINT_L2(...) debug_print(DEBUG_LEVEL_2_BIT, __VA_ARGS__)
#define DEBUG_PRINT_L3(...) debug_print(DEBUG_LEVEL_3_BIT, __VA_ARGS__)
#define DEBUG_PRINT_L4(...) debug_print(DEBUG_LEVEL_4_BIT, __VA_ARGS__)
#define DEBUG_PRINT_T(...) debug_print(DEBUG_TIMING_BIT, __VA_ARGS__)
static void debug_print(int level, const char *s, ...)
{
va_list args;
va_start(args, s);
if (DEBUG & level)
vfprintf(stdout, s, args);
va_end(args);
}
#ifndef min
#define min(x, y) ((x) < (y) ? x : y)
#endif
#define RETURN_FROM_TEST(_ret) \
do { \
DEBUG_PRINT_L1( \
"\tTest %s returned %d\n", __func__, _ret); \
return _ret; \
} while (0)
#define ADI_BLKSZ 64
#define ADI_MAX_VERSION 15
#define TEST_STEP_FAILURE(_ret) \
do { \
fprintf(stderr, "\tTest step failure: %d at %s:%d\n", \
_ret, __func__, __LINE__); \
goto out; \
} while (0)
#define RDTICK(_x) \
asm volatile(" rd %%tick, %0\n" : "=r" (_x))
static int random_version(void)
{
long tick;
RDTICK(tick);
return tick % (ADI_MAX_VERSION + 1);
}
#define MAX_RANGES_SUPPORTED 5
static const char system_ram_str[] = "System RAM\n";
static int range_count;
static unsigned long long int start_addr[MAX_RANGES_SUPPORTED];
static unsigned long long int end_addr[MAX_RANGES_SUPPORTED];
struct stats {
char name[16];
unsigned long total;
unsigned long count;
unsigned long bytes;
};
static struct stats read_stats = {
.name = "read", .total = 0, .count = 0, .bytes = 0};
static struct stats pread_stats = {
.name = "pread", .total = 0, .count = 0, .bytes = 0};
static struct stats write_stats = {
.name = "write", .total = 0, .count = 0, .bytes = 0};
static struct stats pwrite_stats = {
.name = "pwrite", .total = 0, .count = 0, .bytes = 0};
static struct stats seek_stats = {
.name = "seek", .total = 0, .count = 0, .bytes = 0};
static void update_stats(struct stats * const ustats,
unsigned long measurement, unsigned long bytes)
{
ustats->total += measurement;
ustats->bytes += bytes;
ustats->count++;
}
static void print_ustats(const struct stats * const ustats)
{
DEBUG_PRINT_L1("%s\t%7d\t%7.0f\t%7.0f\n",
ustats->name, ustats->count,
(float)ustats->total / (float)ustats->count,
(float)ustats->bytes / (float)ustats->count);
}
static void print_stats(void)
{
DEBUG_PRINT_L1("\nSyscall\tCall\tAvgTime\tAvgSize\n"
"\tCount\t(ticks)\t(bytes)\n"
"-------------------------------\n");
print_ustats(&read_stats);
print_ustats(&pread_stats);
print_ustats(&write_stats);
print_ustats(&pwrite_stats);
print_ustats(&seek_stats);
}
static int build_memory_map(void)
{
char line[256];
FILE *fp;
int i;
range_count = 0;
fp = fopen("/proc/iomem", "r");
if (!fp) {
fprintf(stderr, "/proc/iomem: error %d: %s\n",
errno, strerror(errno));
return -errno;
}
while (fgets(line, sizeof(line), fp) != 0) {
if (strstr(line, system_ram_str)) {
char *dash, *end_ptr;
/* Given a line like this:
* d0400000-10ffaffff : System RAM
* replace the "-" with a space
*/
dash = strstr(line, "-");
dash[0] = 0x20;
start_addr[range_count] = strtoull(line, &end_ptr, 16);
end_addr[range_count] = strtoull(end_ptr, NULL, 16);
range_count++;
}
}
fclose(fp);
DEBUG_PRINT_L1("RAM Ranges\n");
for (i = 0; i < range_count; i++)
DEBUG_PRINT_L1("\trange %d: 0x%llx\t- 0x%llx\n",
i, start_addr[i], end_addr[i]);
if (range_count == 0) {
fprintf(stderr, "No valid address ranges found. Error.\n");
return -1;
}
return 0;
}
static int read_adi(int fd, unsigned char *buf, int buf_sz)
{
int ret, bytes_read = 0;
long start, end, elapsed_time = 0;
do {
RDTICK(start);
ret = read(fd, buf + bytes_read, buf_sz - bytes_read);
RDTICK(end);
if (ret < 0)
return -errno;
elapsed_time += end - start;
update_stats(&read_stats, elapsed_time, buf_sz);
bytes_read += ret;
} while (bytes_read < buf_sz);
DEBUG_PRINT_T("\tread elapsed timed = %ld\n", elapsed_time);
DEBUG_PRINT_L3("\tRead %d bytes\n", bytes_read);
return bytes_read;
}
static int pread_adi(int fd, unsigned char *buf,
int buf_sz, unsigned long offset)
{
int ret, i, bytes_read = 0;
unsigned long cur_offset;
long start, end, elapsed_time = 0;
cur_offset = offset;
do {
RDTICK(start);
ret = pread(fd, buf + bytes_read, buf_sz - bytes_read,
cur_offset);
RDTICK(end);
if (ret < 0)
return -errno;
elapsed_time += end - start;
update_stats(&pread_stats, elapsed_time, buf_sz);
bytes_read += ret;
cur_offset += ret;
} while (bytes_read < buf_sz);
DEBUG_PRINT_T("\tpread elapsed timed = %ld\n", elapsed_time);
DEBUG_PRINT_L3("\tRead %d bytes starting at offset 0x%lx\n",
bytes_read, offset);
for (i = 0; i < bytes_read; i++)
DEBUG_PRINT_L4("\t\t0x%lx\t%d\n", offset + i, buf[i]);
return bytes_read;
}
static int write_adi(int fd, const unsigned char * const buf, int buf_sz)
{
int ret, bytes_written = 0;
long start, end, elapsed_time = 0;
do {
RDTICK(start);
ret = write(fd, buf + bytes_written, buf_sz - bytes_written);
RDTICK(end);
if (ret < 0)
return -errno;
elapsed_time += (end - start);
update_stats(&write_stats, elapsed_time, buf_sz);
bytes_written += ret;
} while (bytes_written < buf_sz);
DEBUG_PRINT_T("\twrite elapsed timed = %ld\n", elapsed_time);
DEBUG_PRINT_L3("\tWrote %d of %d bytes\n", bytes_written, buf_sz);
return bytes_written;
}
static int pwrite_adi(int fd, const unsigned char * const buf,
int buf_sz, unsigned long offset)
{
int ret, bytes_written = 0;
unsigned long cur_offset;
long start, end, elapsed_time = 0;
cur_offset = offset;
do {
RDTICK(start);
ret = pwrite(fd, buf + bytes_written,
buf_sz - bytes_written, cur_offset);
RDTICK(end);
if (ret < 0) {
fprintf(stderr, "pwrite(): error %d: %s\n",
errno, strerror(errno));
return -errno;
}
elapsed_time += (end - start);
update_stats(&pwrite_stats, elapsed_time, buf_sz);
bytes_written += ret;
cur_offset += ret;
} while (bytes_written < buf_sz);
DEBUG_PRINT_T("\tpwrite elapsed timed = %ld\n", elapsed_time);
DEBUG_PRINT_L3("\tWrote %d of %d bytes starting at address 0x%lx\n",
bytes_written, buf_sz, offset);
return bytes_written;
}
static off_t seek_adi(int fd, off_t offset, int whence)
{
long start, end;
off_t ret;
RDTICK(start);
ret = lseek(fd, offset, whence);
RDTICK(end);
DEBUG_PRINT_L2("\tlseek ret = 0x%llx\n", ret);
if (ret < 0)
goto out;
DEBUG_PRINT_T("\tlseek elapsed timed = %ld\n", end - start);
update_stats(&seek_stats, end - start, 0);
out:
(void)lseek(fd, 0, SEEK_END);
return ret;
}
static int test0_prpw_aligned_1byte(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
(end_addr[range_count - 1] - 0x1000) & ~(ADI_BLKSZ - 1);
unsigned char version[1], expected_version;
loff_t offset;
int ret;
version[0] = random_version();
expected_version = version[0];
offset = paddr / ADI_BLKSZ;
ret = pwrite_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
ret = pread_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
if (expected_version != version[0]) {
DEBUG_PRINT_L2("\tExpected version %d but read version %d\n",
expected_version, version[0]);
TEST_STEP_FAILURE(-expected_version);
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
#define TEST1_VERSION_SZ 4096
static int test1_prpw_aligned_4096bytes(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
(end_addr[range_count - 1] - 0x6000) & ~(ADI_BLKSZ - 1);
unsigned char version[TEST1_VERSION_SZ],
expected_version[TEST1_VERSION_SZ];
loff_t offset;
int ret, i;
for (i = 0; i < TEST1_VERSION_SZ; i++) {
version[i] = random_version();
expected_version[i] = version[i];
}
offset = paddr / ADI_BLKSZ;
ret = pwrite_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
ret = pread_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
for (i = 0; i < TEST1_VERSION_SZ; i++) {
if (expected_version[i] != version[i]) {
DEBUG_PRINT_L2(
"\tExpected version %d but read version %d\n",
expected_version, version[0]);
TEST_STEP_FAILURE(-expected_version[i]);
}
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
#define TEST2_VERSION_SZ 10327
static int test2_prpw_aligned_10327bytes(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
(start_addr[0] + 0x6000) & ~(ADI_BLKSZ - 1);
unsigned char version[TEST2_VERSION_SZ],
expected_version[TEST2_VERSION_SZ];
loff_t offset;
int ret, i;
for (i = 0; i < TEST2_VERSION_SZ; i++) {
version[i] = random_version();
expected_version[i] = version[i];
}
offset = paddr / ADI_BLKSZ;
ret = pwrite_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
ret = pread_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
for (i = 0; i < TEST2_VERSION_SZ; i++) {
if (expected_version[i] != version[i]) {
DEBUG_PRINT_L2(
"\tExpected version %d but read version %d\n",
expected_version, version[0]);
TEST_STEP_FAILURE(-expected_version[i]);
}
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
#define TEST3_VERSION_SZ 12541
static int test3_prpw_unaligned_12541bytes(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
((start_addr[0] + 0xC000) & ~(ADI_BLKSZ - 1)) + 17;
unsigned char version[TEST3_VERSION_SZ],
expected_version[TEST3_VERSION_SZ];
loff_t offset;
int ret, i;
for (i = 0; i < TEST3_VERSION_SZ; i++) {
version[i] = random_version();
expected_version[i] = version[i];
}
offset = paddr / ADI_BLKSZ;
ret = pwrite_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
ret = pread_adi(fd, version, sizeof(version), offset);
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
for (i = 0; i < TEST3_VERSION_SZ; i++) {
if (expected_version[i] != version[i]) {
DEBUG_PRINT_L2(
"\tExpected version %d but read version %d\n",
expected_version, version[0]);
TEST_STEP_FAILURE(-expected_version[i]);
}
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
static int test4_lseek(int fd)
{
#define OFFSET_ADD (0x100)
#define OFFSET_SUBTRACT (0xFFFFFFF000000000)
off_t offset_out, offset_in;
int ret;
offset_in = 0x123456789abcdef0;
offset_out = seek_adi(fd, offset_in, SEEK_SET);
if (offset_out != offset_in) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
/* seek to the current offset. this should return EINVAL */
offset_out = seek_adi(fd, offset_in, SEEK_SET);
if (offset_out < 0 && errno == EINVAL)
DEBUG_PRINT_L2(
"\tSEEK_SET failed as designed. Not an error\n");
else {
ret = -2;
TEST_STEP_FAILURE(ret);
}
offset_out = seek_adi(fd, 0, SEEK_CUR);
if (offset_out != offset_in) {
ret = -3;
TEST_STEP_FAILURE(ret);
}
offset_out = seek_adi(fd, OFFSET_ADD, SEEK_CUR);
if (offset_out != (offset_in + OFFSET_ADD)) {
ret = -4;
TEST_STEP_FAILURE(ret);
}
offset_out = seek_adi(fd, OFFSET_SUBTRACT, SEEK_CUR);
if (offset_out != (offset_in + OFFSET_ADD + OFFSET_SUBTRACT)) {
ret = -5;
TEST_STEP_FAILURE(ret);
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
static int test5_rw_aligned_1byte(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
(end_addr[range_count - 1] - 0xF000) & ~(ADI_BLKSZ - 1);
unsigned char version, expected_version;
loff_t offset;
off_t oret;
int ret;
offset = paddr / ADI_BLKSZ;
version = expected_version = random_version();
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = write_adi(fd, &version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = read_adi(fd, &version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
if (expected_version != version) {
DEBUG_PRINT_L2("\tExpected version %d but read version %d\n",
expected_version, version);
TEST_STEP_FAILURE(-expected_version);
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
#define TEST6_VERSION_SZ 9434
static int test6_rw_aligned_9434bytes(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
(end_addr[range_count - 1] - 0x5F000) & ~(ADI_BLKSZ - 1);
unsigned char version[TEST6_VERSION_SZ],
expected_version[TEST6_VERSION_SZ];
loff_t offset;
off_t oret;
int ret, i;
offset = paddr / ADI_BLKSZ;
for (i = 0; i < TEST6_VERSION_SZ; i++)
version[i] = expected_version[i] = random_version();
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = write_adi(fd, version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
memset(version, 0, TEST6_VERSION_SZ);
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = read_adi(fd, version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
for (i = 0; i < TEST6_VERSION_SZ; i++) {
if (expected_version[i] != version[i]) {
DEBUG_PRINT_L2(
"\tExpected version %d but read version %d\n",
expected_version[i], version[i]);
TEST_STEP_FAILURE(-expected_version[i]);
}
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
#define TEST7_VERSION_SZ 14963
static int test7_rw_aligned_14963bytes(int fd)
{
/* somewhat arbitrarily chosen address */
unsigned long paddr =
((start_addr[range_count - 1] + 0xF000) & ~(ADI_BLKSZ - 1)) + 39;
unsigned char version[TEST7_VERSION_SZ],
expected_version[TEST7_VERSION_SZ];
loff_t offset;
off_t oret;
int ret, i;
offset = paddr / ADI_BLKSZ;
for (i = 0; i < TEST7_VERSION_SZ; i++) {
version[i] = random_version();
expected_version[i] = version[i];
}
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = write_adi(fd, version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
memset(version, 0, TEST7_VERSION_SZ);
oret = seek_adi(fd, offset, SEEK_SET);
if (oret != offset) {
ret = -1;
TEST_STEP_FAILURE(ret);
}
ret = read_adi(fd, version, sizeof(version));
if (ret != sizeof(version))
TEST_STEP_FAILURE(ret);
for (i = 0; i < TEST7_VERSION_SZ; i++) {
if (expected_version[i] != version[i]) {
DEBUG_PRINT_L2(
"\tExpected version %d but read version %d\n",
expected_version[i], version[i]);
TEST_STEP_FAILURE(-expected_version[i]);
}
paddr += ADI_BLKSZ;
}
ret = 0;
out:
RETURN_FROM_TEST(ret);
}
static int (*tests[])(int fd) = {
test0_prpw_aligned_1byte,
test1_prpw_aligned_4096bytes,
test2_prpw_aligned_10327bytes,
test3_prpw_unaligned_12541bytes,
test4_lseek,
test5_rw_aligned_1byte,
test6_rw_aligned_9434bytes,
test7_rw_aligned_14963bytes,
};
#define TEST_COUNT ARRAY_SIZE(tests)
int main(int argc, char *argv[])
{
int fd, ret, test;
ret = build_memory_map();
if (ret < 0)
return ret;
fd = open("/dev/adi", O_RDWR);
if (fd < 0) {
fprintf(stderr, "open: error %d: %s\n",
errno, strerror(errno));
return -errno;
}
for (test = 0; test < TEST_COUNT; test++) {
DEBUG_PRINT_L1("Running test #%d\n", test);
ret = (*tests[test])(fd);
if (ret != 0)
ksft_test_result_fail("Test #%d failed: error %d\n",
test, ret);
else
ksft_test_result_pass("Test #%d passed\n", test);
}
print_stats();
close(fd);
if (ksft_get_fail_cnt() > 0)
ksft_exit_fail();
else
ksft_exit_pass();
/* it's impossible to get here, but the compiler throws a warning
* about control reaching the end of non-void function. bah.
*/
return 0;
}
| linux-master | tools/testing/selftests/sparc64/drivers/adi-test.c |
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <pthread.h>
#include <assert.h>
#include <mm/gup_test.h>
#include "../kselftest.h"
#include "vm_util.h"
#define MB (1UL << 20)
/* Just the flags we need, copied from mm.h: */
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define GUP_TEST_FILE "/sys/kernel/debug/gup_test"
static unsigned long cmd = GUP_FAST_BENCHMARK;
static int gup_fd, repeats = 1;
static unsigned long size = 128 * MB;
/* Serialize prints */
static pthread_mutex_t print_mutex = PTHREAD_MUTEX_INITIALIZER;
static char *cmd_to_str(unsigned long cmd)
{
switch (cmd) {
case GUP_FAST_BENCHMARK:
return "GUP_FAST_BENCHMARK";
case PIN_FAST_BENCHMARK:
return "PIN_FAST_BENCHMARK";
case PIN_LONGTERM_BENCHMARK:
return "PIN_LONGTERM_BENCHMARK";
case GUP_BASIC_TEST:
return "GUP_BASIC_TEST";
case PIN_BASIC_TEST:
return "PIN_BASIC_TEST";
case DUMP_USER_PAGES_TEST:
return "DUMP_USER_PAGES_TEST";
}
return "Unknown command";
}
void *gup_thread(void *data)
{
struct gup_test gup = *(struct gup_test *)data;
int i;
/* Only report timing information on the *_BENCHMARK commands: */
if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
(cmd == PIN_LONGTERM_BENCHMARK)) {
for (i = 0; i < repeats; i++) {
gup.size = size;
if (ioctl(gup_fd, cmd, &gup))
perror("ioctl"), exit(1);
pthread_mutex_lock(&print_mutex);
printf("%s: Time: get:%lld put:%lld us",
cmd_to_str(cmd), gup.get_delta_usec,
gup.put_delta_usec);
if (gup.size != size)
printf(", truncated (size: %lld)", gup.size);
printf("\n");
pthread_mutex_unlock(&print_mutex);
}
} else {
gup.size = size;
if (ioctl(gup_fd, cmd, &gup)) {
perror("ioctl");
exit(1);
}
pthread_mutex_lock(&print_mutex);
printf("%s: done\n", cmd_to_str(cmd));
if (gup.size != size)
printf("Truncated (size: %lld)\n", gup.size);
pthread_mutex_unlock(&print_mutex);
}
return NULL;
}
int main(int argc, char **argv)
{
struct gup_test gup = { 0 };
int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret;
int flags = MAP_PRIVATE, touch = 0;
char *file = "/dev/zero";
pthread_t *tid;
char *p;
while ((opt = getopt(argc, argv, "m:r:n:F:f:abcj:tTLUuwWSHpz")) != -1) {
switch (opt) {
case 'a':
cmd = PIN_FAST_BENCHMARK;
break;
case 'b':
cmd = PIN_BASIC_TEST;
break;
case 'L':
cmd = PIN_LONGTERM_BENCHMARK;
break;
case 'c':
cmd = DUMP_USER_PAGES_TEST;
/*
* Dump page 0 (index 1). May be overridden later, by
* user's non-option arguments.
*
* .which_pages is zero-based, so that zero can mean "do
* nothing".
*/
gup.which_pages[0] = 1;
break;
case 'p':
/* works only with DUMP_USER_PAGES_TEST */
gup.test_flags |= GUP_TEST_FLAG_DUMP_PAGES_USE_PIN;
break;
case 'F':
/* strtol, so you can pass flags in hex form */
gup.gup_flags = strtol(optarg, 0, 0);
break;
case 'j':
nthreads = atoi(optarg);
break;
case 'm':
size = atoi(optarg) * MB;
break;
case 'r':
repeats = atoi(optarg);
break;
case 'n':
nr_pages = atoi(optarg);
break;
case 't':
thp = 1;
break;
case 'T':
thp = 0;
break;
case 'U':
cmd = GUP_BASIC_TEST;
break;
case 'u':
cmd = GUP_FAST_BENCHMARK;
break;
case 'w':
write = 1;
break;
case 'W':
write = 0;
break;
case 'f':
file = optarg;
break;
case 'S':
flags &= ~MAP_PRIVATE;
flags |= MAP_SHARED;
break;
case 'H':
flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
break;
case 'z':
/* fault pages in gup, do not fault in userland */
touch = 1;
break;
default:
return -1;
}
}
if (optind < argc) {
int extra_arg_count = 0;
/*
* For example:
*
* ./gup_test -c 0 1 0x1001
*
* ...to dump pages 0, 1, and 4097
*/
while ((optind < argc) &&
(extra_arg_count < GUP_TEST_MAX_PAGES_TO_DUMP)) {
/*
* Do the 1-based indexing here, so that the user can
* use normal 0-based indexing on the command line.
*/
long page_index = strtol(argv[optind], 0, 0) + 1;
gup.which_pages[extra_arg_count] = page_index;
extra_arg_count++;
optind++;
}
}
filed = open(file, O_RDWR|O_CREAT);
if (filed < 0) {
perror("open");
exit(filed);
}
gup.nr_pages_per_call = nr_pages;
if (write)
gup.gup_flags |= FOLL_WRITE;
gup_fd = open(GUP_TEST_FILE, O_RDWR);
if (gup_fd == -1) {
switch (errno) {
case EACCES:
if (getuid())
printf("Please run this test as root\n");
break;
case ENOENT:
if (opendir("/sys/kernel/debug") == NULL) {
printf("mount debugfs at /sys/kernel/debug\n");
break;
}
printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
break;
default:
perror("failed to open " GUP_TEST_FILE);
break;
}
exit(KSFT_SKIP);
}
p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
if (p == MAP_FAILED) {
perror("mmap");
exit(1);
}
gup.addr = (unsigned long)p;
if (thp == 1)
madvise(p, size, MADV_HUGEPAGE);
else if (thp == 0)
madvise(p, size, MADV_NOHUGEPAGE);
/*
* FOLL_TOUCH, in gup_test, is used as an either/or case: either
* fault pages in from the kernel via FOLL_TOUCH, or fault them
* in here, from user space. This allows comparison of performance
* between those two cases.
*/
if (touch) {
gup.gup_flags |= FOLL_TOUCH;
} else {
for (; (unsigned long)p < gup.addr + size; p += psize())
p[0] = 0;
}
tid = malloc(sizeof(pthread_t) * nthreads);
assert(tid);
for (i = 0; i < nthreads; i++) {
ret = pthread_create(&tid[i], NULL, gup_thread, &gup);
assert(ret == 0);
}
for (i = 0; i < nthreads; i++) {
ret = pthread_join(tid[i], NULL);
assert(ret == 0);
}
free(tid);
return 0;
}
| linux-master | tools/testing/selftests/mm/gup_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <stdbool.h>
#include <time.h>
#include <string.h>
#include <numa.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <err.h>
#include "../kselftest.h"
#include <include/vdso/time64.h>
#include "vm_util.h"
#define KSM_SYSFS_PATH "/sys/kernel/mm/ksm/"
#define KSM_FP(s) (KSM_SYSFS_PATH s)
#define KSM_SCAN_LIMIT_SEC_DEFAULT 120
#define KSM_PAGE_COUNT_DEFAULT 10l
#define KSM_PROT_STR_DEFAULT "rw"
#define KSM_USE_ZERO_PAGES_DEFAULT false
#define KSM_MERGE_ACROSS_NODES_DEFAULT true
#define KSM_MERGE_TYPE_DEFAULT 0
#define MB (1ul << 20)
struct ksm_sysfs {
unsigned long max_page_sharing;
unsigned long merge_across_nodes;
unsigned long pages_to_scan;
unsigned long run;
unsigned long sleep_millisecs;
unsigned long stable_node_chains_prune_millisecs;
unsigned long use_zero_pages;
};
enum ksm_merge_type {
KSM_MERGE_MADVISE,
KSM_MERGE_PRCTL,
KSM_MERGE_LAST = KSM_MERGE_PRCTL
};
enum ksm_test_name {
CHECK_KSM_MERGE,
CHECK_KSM_UNMERGE,
CHECK_KSM_GET_MERGE_TYPE,
CHECK_KSM_ZERO_PAGE_MERGE,
CHECK_KSM_NUMA_MERGE,
KSM_MERGE_TIME,
KSM_MERGE_TIME_HUGE_PAGES,
KSM_UNMERGE_TIME,
KSM_COW_TIME
};
int debug;
static int ksm_write_sysfs(const char *file_path, unsigned long val)
{
FILE *f = fopen(file_path, "w");
if (!f) {
fprintf(stderr, "f %s\n", file_path);
perror("fopen");
return 1;
}
if (fprintf(f, "%lu", val) < 0) {
perror("fprintf");
fclose(f);
return 1;
}
fclose(f);
return 0;
}
static int ksm_read_sysfs(const char *file_path, unsigned long *val)
{
FILE *f = fopen(file_path, "r");
if (!f) {
fprintf(stderr, "f %s\n", file_path);
perror("fopen");
return 1;
}
if (fscanf(f, "%lu", val) != 1) {
perror("fscanf");
fclose(f);
return 1;
}
fclose(f);
return 0;
}
static void ksm_print_sysfs(void)
{
unsigned long max_page_sharing, pages_sharing, pages_shared;
unsigned long full_scans, pages_unshared, pages_volatile;
unsigned long stable_node_chains, stable_node_dups;
long general_profit;
if (ksm_read_sysfs(KSM_FP("pages_shared"), &pages_shared) ||
ksm_read_sysfs(KSM_FP("pages_sharing"), &pages_sharing) ||
ksm_read_sysfs(KSM_FP("max_page_sharing"), &max_page_sharing) ||
ksm_read_sysfs(KSM_FP("full_scans"), &full_scans) ||
ksm_read_sysfs(KSM_FP("pages_unshared"), &pages_unshared) ||
ksm_read_sysfs(KSM_FP("pages_volatile"), &pages_volatile) ||
ksm_read_sysfs(KSM_FP("stable_node_chains"), &stable_node_chains) ||
ksm_read_sysfs(KSM_FP("stable_node_dups"), &stable_node_dups) ||
ksm_read_sysfs(KSM_FP("general_profit"), (unsigned long *)&general_profit))
return;
printf("pages_shared : %lu\n", pages_shared);
printf("pages_sharing : %lu\n", pages_sharing);
printf("max_page_sharing : %lu\n", max_page_sharing);
printf("full_scans : %lu\n", full_scans);
printf("pages_unshared : %lu\n", pages_unshared);
printf("pages_volatile : %lu\n", pages_volatile);
printf("stable_node_chains: %lu\n", stable_node_chains);
printf("stable_node_dups : %lu\n", stable_node_dups);
printf("general_profit : %ld\n", general_profit);
}
static void ksm_print_procfs(void)
{
const char *file_name = "/proc/self/ksm_stat";
char buffer[512];
FILE *f = fopen(file_name, "r");
if (!f) {
fprintf(stderr, "f %s\n", file_name);
perror("fopen");
return;
}
while (fgets(buffer, sizeof(buffer), f))
printf("%s", buffer);
fclose(f);
}
static int str_to_prot(char *prot_str)
{
int prot = 0;
if ((strchr(prot_str, 'r')) != NULL)
prot |= PROT_READ;
if ((strchr(prot_str, 'w')) != NULL)
prot |= PROT_WRITE;
if ((strchr(prot_str, 'x')) != NULL)
prot |= PROT_EXEC;
return prot;
}
static void print_help(void)
{
printf("usage: ksm_tests [-h] <test type> [-a prot] [-p page_count] [-l timeout]\n"
"[-z use_zero_pages] [-m merge_across_nodes] [-s size]\n");
printf("Supported <test type>:\n"
" -M (page merging)\n"
" -Z (zero pages merging)\n"
" -N (merging of pages in different NUMA nodes)\n"
" -U (page unmerging)\n"
" -P evaluate merging time and speed.\n"
" For this test, the size of duplicated memory area (in MiB)\n"
" must be provided using -s option\n"
" -H evaluate merging time and speed of area allocated mostly with huge pages\n"
" For this test, the size of duplicated memory area (in MiB)\n"
" must be provided using -s option\n"
" -D evaluate unmerging time and speed when disabling KSM.\n"
" For this test, the size of duplicated memory area (in MiB)\n"
" must be provided using -s option\n"
" -C evaluate the time required to break COW of merged pages.\n\n");
printf(" -a: specify the access protections of pages.\n"
" <prot> must be of the form [rwx].\n"
" Default: %s\n", KSM_PROT_STR_DEFAULT);
printf(" -p: specify the number of pages to test.\n"
" Default: %ld\n", KSM_PAGE_COUNT_DEFAULT);
printf(" -l: limit the maximum running time (in seconds) for a test.\n"
" Default: %d seconds\n", KSM_SCAN_LIMIT_SEC_DEFAULT);
printf(" -z: change use_zero_pages tunable\n"
" Default: %d\n", KSM_USE_ZERO_PAGES_DEFAULT);
printf(" -m: change merge_across_nodes tunable\n"
" Default: %d\n", KSM_MERGE_ACROSS_NODES_DEFAULT);
printf(" -d: turn debugging output on\n");
printf(" -s: the size of duplicated memory area (in MiB)\n");
printf(" -t: KSM merge type\n"
" Default: 0\n"
" 0: madvise merging\n"
" 1: prctl merging\n");
exit(0);
}
static void *allocate_memory(void *ptr, int prot, int mapping, char data, size_t map_size)
{
void *map_ptr = mmap(ptr, map_size, PROT_WRITE, mapping, -1, 0);
if (!map_ptr) {
perror("mmap");
return NULL;
}
memset(map_ptr, data, map_size);
if (mprotect(map_ptr, map_size, prot)) {
perror("mprotect");
munmap(map_ptr, map_size);
return NULL;
}
return map_ptr;
}
static int ksm_do_scan(int scan_count, struct timespec start_time, int timeout)
{
struct timespec cur_time;
unsigned long cur_scan, init_scan;
if (ksm_read_sysfs(KSM_FP("full_scans"), &init_scan))
return 1;
cur_scan = init_scan;
while (cur_scan < init_scan + scan_count) {
if (ksm_read_sysfs(KSM_FP("full_scans"), &cur_scan))
return 1;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &cur_time)) {
perror("clock_gettime");
return 1;
}
if ((cur_time.tv_sec - start_time.tv_sec) > timeout) {
printf("Scan time limit exceeded\n");
return 1;
}
}
return 0;
}
static int ksm_merge_pages(int merge_type, void *addr, size_t size,
struct timespec start_time, int timeout)
{
if (merge_type == KSM_MERGE_MADVISE) {
if (madvise(addr, size, MADV_MERGEABLE)) {
perror("madvise");
return 1;
}
} else if (merge_type == KSM_MERGE_PRCTL) {
if (prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0)) {
perror("prctl");
return 1;
}
}
if (ksm_write_sysfs(KSM_FP("run"), 1))
return 1;
/* Since merging occurs only after 2 scans, make sure to get at least 2 full scans */
if (ksm_do_scan(2, start_time, timeout))
return 1;
return 0;
}
static int ksm_unmerge_pages(void *addr, size_t size,
struct timespec start_time, int timeout)
{
if (madvise(addr, size, MADV_UNMERGEABLE)) {
perror("madvise");
return 1;
}
return 0;
}
static bool assert_ksm_pages_count(long dupl_page_count)
{
unsigned long max_page_sharing, pages_sharing, pages_shared;
if (ksm_read_sysfs(KSM_FP("pages_shared"), &pages_shared) ||
ksm_read_sysfs(KSM_FP("pages_sharing"), &pages_sharing) ||
ksm_read_sysfs(KSM_FP("max_page_sharing"), &max_page_sharing))
return false;
if (debug) {
ksm_print_sysfs();
ksm_print_procfs();
}
/*
* Since there must be at least 2 pages for merging and 1 page can be
* shared with the limited number of pages (max_page_sharing), sometimes
* there are 'leftover' pages that cannot be merged. For example, if there
* are 11 pages and max_page_sharing = 10, then only 10 pages will be
* merged and the 11th page won't be affected. As a result, when the number
* of duplicate pages is divided by max_page_sharing and the remainder is 1,
* pages_shared and pages_sharing values will be equal between dupl_page_count
* and dupl_page_count - 1.
*/
if (dupl_page_count % max_page_sharing == 1 || dupl_page_count % max_page_sharing == 0) {
if (pages_shared == dupl_page_count / max_page_sharing &&
pages_sharing == pages_shared * (max_page_sharing - 1))
return true;
} else {
if (pages_shared == (dupl_page_count / max_page_sharing + 1) &&
pages_sharing == dupl_page_count - pages_shared)
return true;
}
return false;
}
static int ksm_save_def(struct ksm_sysfs *ksm_sysfs)
{
if (ksm_read_sysfs(KSM_FP("max_page_sharing"), &ksm_sysfs->max_page_sharing) ||
numa_available() ? 0 :
ksm_read_sysfs(KSM_FP("merge_across_nodes"), &ksm_sysfs->merge_across_nodes) ||
ksm_read_sysfs(KSM_FP("sleep_millisecs"), &ksm_sysfs->sleep_millisecs) ||
ksm_read_sysfs(KSM_FP("pages_to_scan"), &ksm_sysfs->pages_to_scan) ||
ksm_read_sysfs(KSM_FP("run"), &ksm_sysfs->run) ||
ksm_read_sysfs(KSM_FP("stable_node_chains_prune_millisecs"),
&ksm_sysfs->stable_node_chains_prune_millisecs) ||
ksm_read_sysfs(KSM_FP("use_zero_pages"), &ksm_sysfs->use_zero_pages))
return 1;
return 0;
}
static int ksm_restore(struct ksm_sysfs *ksm_sysfs)
{
if (ksm_write_sysfs(KSM_FP("max_page_sharing"), ksm_sysfs->max_page_sharing) ||
numa_available() ? 0 :
ksm_write_sysfs(KSM_FP("merge_across_nodes"), ksm_sysfs->merge_across_nodes) ||
ksm_write_sysfs(KSM_FP("pages_to_scan"), ksm_sysfs->pages_to_scan) ||
ksm_write_sysfs(KSM_FP("run"), ksm_sysfs->run) ||
ksm_write_sysfs(KSM_FP("sleep_millisecs"), ksm_sysfs->sleep_millisecs) ||
ksm_write_sysfs(KSM_FP("stable_node_chains_prune_millisecs"),
ksm_sysfs->stable_node_chains_prune_millisecs) ||
ksm_write_sysfs(KSM_FP("use_zero_pages"), ksm_sysfs->use_zero_pages))
return 1;
return 0;
}
static int check_ksm_merge(int merge_type, int mapping, int prot,
long page_count, int timeout, size_t page_size)
{
void *map_ptr;
struct timespec start_time;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
/* fill pages with the same data and merge them */
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
if (!map_ptr)
return KSFT_FAIL;
if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout))
goto err_out;
/* verify that the right number of pages are merged */
if (assert_ksm_pages_count(page_count)) {
printf("OK\n");
munmap(map_ptr, page_size * page_count);
if (merge_type == KSM_MERGE_PRCTL)
prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
return KSFT_PASS;
}
err_out:
printf("Not OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_FAIL;
}
static int check_ksm_unmerge(int merge_type, int mapping, int prot, int timeout, size_t page_size)
{
void *map_ptr;
struct timespec start_time;
int page_count = 2;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
/* fill pages with the same data and merge them */
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
if (!map_ptr)
return KSFT_FAIL;
if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout))
goto err_out;
/* change 1 byte in each of the 2 pages -- KSM must automatically unmerge them */
memset(map_ptr, '-', 1);
memset(map_ptr + page_size, '+', 1);
/* get at least 1 scan, so KSM can detect that the pages were modified */
if (ksm_do_scan(1, start_time, timeout))
goto err_out;
/* check that unmerging was successful and 0 pages are currently merged */
if (assert_ksm_pages_count(0)) {
printf("OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_PASS;
}
err_out:
printf("Not OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_FAIL;
}
static int check_ksm_zero_page_merge(int merge_type, int mapping, int prot, long page_count,
int timeout, bool use_zero_pages, size_t page_size)
{
void *map_ptr;
struct timespec start_time;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
if (ksm_write_sysfs(KSM_FP("use_zero_pages"), use_zero_pages))
return KSFT_FAIL;
/* fill pages with zero and try to merge them */
map_ptr = allocate_memory(NULL, prot, mapping, 0, page_size * page_count);
if (!map_ptr)
return KSFT_FAIL;
if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout))
goto err_out;
/*
* verify that the right number of pages are merged:
* 1) if use_zero_pages is set to 1, empty pages are merged
* with the kernel zero page instead of with each other;
* 2) if use_zero_pages is set to 0, empty pages are not treated specially
* and merged as usual.
*/
if (use_zero_pages && !assert_ksm_pages_count(0))
goto err_out;
else if (!use_zero_pages && !assert_ksm_pages_count(page_count))
goto err_out;
printf("OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_PASS;
err_out:
printf("Not OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_FAIL;
}
static int get_next_mem_node(int node)
{
long node_size;
int mem_node = 0;
int i, max_node = numa_max_node();
for (i = node + 1; i <= max_node + node; i++) {
mem_node = i % (max_node + 1);
node_size = numa_node_size(mem_node, NULL);
if (node_size > 0)
break;
}
return mem_node;
}
static int get_first_mem_node(void)
{
return get_next_mem_node(numa_max_node());
}
static int check_ksm_numa_merge(int merge_type, int mapping, int prot, int timeout,
bool merge_across_nodes, size_t page_size)
{
void *numa1_map_ptr, *numa2_map_ptr;
struct timespec start_time;
int page_count = 2;
int first_node;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
if (numa_available() < 0) {
perror("NUMA support not enabled");
return KSFT_SKIP;
}
if (numa_num_configured_nodes() <= 1) {
printf("At least 2 NUMA nodes must be available\n");
return KSFT_SKIP;
}
if (ksm_write_sysfs(KSM_FP("merge_across_nodes"), merge_across_nodes))
return KSFT_FAIL;
/* allocate 2 pages in 2 different NUMA nodes and fill them with the same data */
first_node = get_first_mem_node();
numa1_map_ptr = numa_alloc_onnode(page_size, first_node);
numa2_map_ptr = numa_alloc_onnode(page_size, get_next_mem_node(first_node));
if (!numa1_map_ptr || !numa2_map_ptr) {
perror("numa_alloc_onnode");
return KSFT_FAIL;
}
memset(numa1_map_ptr, '*', page_size);
memset(numa2_map_ptr, '*', page_size);
/* try to merge the pages */
if (ksm_merge_pages(merge_type, numa1_map_ptr, page_size, start_time, timeout) ||
ksm_merge_pages(merge_type, numa2_map_ptr, page_size, start_time, timeout))
goto err_out;
/*
* verify that the right number of pages are merged:
* 1) if merge_across_nodes was enabled, 2 duplicate pages will be merged;
* 2) if merge_across_nodes = 0, there must be 0 merged pages, since there is
* only 1 unique page in each node and they can't be shared.
*/
if (merge_across_nodes && !assert_ksm_pages_count(page_count))
goto err_out;
else if (!merge_across_nodes && !assert_ksm_pages_count(0))
goto err_out;
numa_free(numa1_map_ptr, page_size);
numa_free(numa2_map_ptr, page_size);
printf("OK\n");
return KSFT_PASS;
err_out:
numa_free(numa1_map_ptr, page_size);
numa_free(numa2_map_ptr, page_size);
printf("Not OK\n");
return KSFT_FAIL;
}
static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
int timeout, size_t map_size)
{
void *map_ptr, *map_ptr_orig;
struct timespec start_time, end_time;
unsigned long scan_time_ns;
int pagemap_fd, n_normal_pages, n_huge_pages;
map_size *= MB;
size_t len = map_size;
len -= len % HPAGE_SIZE;
map_ptr_orig = mmap(NULL, len + HPAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE, -1, 0);
map_ptr = map_ptr_orig + HPAGE_SIZE - (uintptr_t)map_ptr_orig % HPAGE_SIZE;
if (map_ptr_orig == MAP_FAILED)
err(2, "initial mmap");
if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
err(2, "MADV_HUGEPAGE");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
err(2, "open pagemap");
n_normal_pages = 0;
n_huge_pages = 0;
for (void *p = map_ptr; p < map_ptr + len; p += HPAGE_SIZE) {
if (allocate_transhuge(p, pagemap_fd) < 0)
n_normal_pages++;
else
n_huge_pages++;
}
printf("Number of normal pages: %d\n", n_normal_pages);
printf("Number of huge pages: %d\n", n_huge_pages);
memset(map_ptr, '*', len);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
goto err_out;
}
if (ksm_merge_pages(merge_type, map_ptr, map_size, start_time, timeout))
goto err_out;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
perror("clock_gettime");
goto err_out;
}
scan_time_ns = (end_time.tv_sec - start_time.tv_sec) * NSEC_PER_SEC +
(end_time.tv_nsec - start_time.tv_nsec);
printf("Total size: %lu MiB\n", map_size / MB);
printf("Total time: %ld.%09ld s\n", scan_time_ns / NSEC_PER_SEC,
scan_time_ns % NSEC_PER_SEC);
printf("Average speed: %.3f MiB/s\n", (map_size / MB) /
((double)scan_time_ns / NSEC_PER_SEC));
munmap(map_ptr_orig, len + HPAGE_SIZE);
return KSFT_PASS;
err_out:
printf("Not OK\n");
munmap(map_ptr_orig, len + HPAGE_SIZE);
return KSFT_FAIL;
}
static int ksm_merge_time(int merge_type, int mapping, int prot, int timeout, size_t map_size)
{
void *map_ptr;
struct timespec start_time, end_time;
unsigned long scan_time_ns;
map_size *= MB;
map_ptr = allocate_memory(NULL, prot, mapping, '*', map_size);
if (!map_ptr)
return KSFT_FAIL;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
goto err_out;
}
if (ksm_merge_pages(merge_type, map_ptr, map_size, start_time, timeout))
goto err_out;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
perror("clock_gettime");
goto err_out;
}
scan_time_ns = (end_time.tv_sec - start_time.tv_sec) * NSEC_PER_SEC +
(end_time.tv_nsec - start_time.tv_nsec);
printf("Total size: %lu MiB\n", map_size / MB);
printf("Total time: %ld.%09ld s\n", scan_time_ns / NSEC_PER_SEC,
scan_time_ns % NSEC_PER_SEC);
printf("Average speed: %.3f MiB/s\n", (map_size / MB) /
((double)scan_time_ns / NSEC_PER_SEC));
munmap(map_ptr, map_size);
return KSFT_PASS;
err_out:
printf("Not OK\n");
munmap(map_ptr, map_size);
return KSFT_FAIL;
}
static int ksm_unmerge_time(int merge_type, int mapping, int prot, int timeout, size_t map_size)
{
void *map_ptr;
struct timespec start_time, end_time;
unsigned long scan_time_ns;
map_size *= MB;
map_ptr = allocate_memory(NULL, prot, mapping, '*', map_size);
if (!map_ptr)
return KSFT_FAIL;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
goto err_out;
}
if (ksm_merge_pages(merge_type, map_ptr, map_size, start_time, timeout))
goto err_out;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
goto err_out;
}
if (ksm_unmerge_pages(map_ptr, map_size, start_time, timeout))
goto err_out;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
perror("clock_gettime");
goto err_out;
}
scan_time_ns = (end_time.tv_sec - start_time.tv_sec) * NSEC_PER_SEC +
(end_time.tv_nsec - start_time.tv_nsec);
printf("Total size: %lu MiB\n", map_size / MB);
printf("Total time: %ld.%09ld s\n", scan_time_ns / NSEC_PER_SEC,
scan_time_ns % NSEC_PER_SEC);
printf("Average speed: %.3f MiB/s\n", (map_size / MB) /
((double)scan_time_ns / NSEC_PER_SEC));
munmap(map_ptr, map_size);
return KSFT_PASS;
err_out:
printf("Not OK\n");
munmap(map_ptr, map_size);
return KSFT_FAIL;
}
static int ksm_cow_time(int merge_type, int mapping, int prot, int timeout, size_t page_size)
{
void *map_ptr;
struct timespec start_time, end_time;
unsigned long cow_time_ns;
/* page_count must be less than 2*page_size */
size_t page_count = 4000;
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
if (!map_ptr)
return KSFT_FAIL;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
for (size_t i = 0; i < page_count - 1; i = i + 2)
memset(map_ptr + page_size * i, '-', 1);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
perror("clock_gettime");
return KSFT_FAIL;
}
cow_time_ns = (end_time.tv_sec - start_time.tv_sec) * NSEC_PER_SEC +
(end_time.tv_nsec - start_time.tv_nsec);
printf("Total size: %lu MiB\n\n", (page_size * page_count) / MB);
printf("Not merged pages:\n");
printf("Total time: %ld.%09ld s\n", cow_time_ns / NSEC_PER_SEC,
cow_time_ns % NSEC_PER_SEC);
printf("Average speed: %.3f MiB/s\n\n", ((page_size * (page_count / 2)) / MB) /
((double)cow_time_ns / NSEC_PER_SEC));
/* Create 2000 pairs of duplicate pages */
for (size_t i = 0; i < page_count - 1; i = i + 2) {
memset(map_ptr + page_size * i, '+', i / 2 + 1);
memset(map_ptr + page_size * (i + 1), '+', i / 2 + 1);
}
if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout))
goto err_out;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &start_time)) {
perror("clock_gettime");
goto err_out;
}
for (size_t i = 0; i < page_count - 1; i = i + 2)
memset(map_ptr + page_size * i, '-', 1);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) {
perror("clock_gettime");
goto err_out;
}
cow_time_ns = (end_time.tv_sec - start_time.tv_sec) * NSEC_PER_SEC +
(end_time.tv_nsec - start_time.tv_nsec);
printf("Merged pages:\n");
printf("Total time: %ld.%09ld s\n", cow_time_ns / NSEC_PER_SEC,
cow_time_ns % NSEC_PER_SEC);
printf("Average speed: %.3f MiB/s\n", ((page_size * (page_count / 2)) / MB) /
((double)cow_time_ns / NSEC_PER_SEC));
munmap(map_ptr, page_size * page_count);
return KSFT_PASS;
err_out:
printf("Not OK\n");
munmap(map_ptr, page_size * page_count);
return KSFT_FAIL;
}
int main(int argc, char *argv[])
{
int ret, opt;
int prot = 0;
int ksm_scan_limit_sec = KSM_SCAN_LIMIT_SEC_DEFAULT;
int merge_type = KSM_MERGE_TYPE_DEFAULT;
long page_count = KSM_PAGE_COUNT_DEFAULT;
size_t page_size = sysconf(_SC_PAGESIZE);
struct ksm_sysfs ksm_sysfs_old;
int test_name = CHECK_KSM_MERGE;
bool use_zero_pages = KSM_USE_ZERO_PAGES_DEFAULT;
bool merge_across_nodes = KSM_MERGE_ACROSS_NODES_DEFAULT;
long size_MB = 0;
while ((opt = getopt(argc, argv, "dha:p:l:z:m:s:t:MUZNPCHD")) != -1) {
switch (opt) {
case 'a':
prot = str_to_prot(optarg);
break;
case 'p':
page_count = atol(optarg);
if (page_count <= 0) {
printf("The number of pages must be greater than 0\n");
return KSFT_FAIL;
}
break;
case 'l':
ksm_scan_limit_sec = atoi(optarg);
if (ksm_scan_limit_sec <= 0) {
printf("Timeout value must be greater than 0\n");
return KSFT_FAIL;
}
break;
case 'h':
print_help();
break;
case 'z':
if (strcmp(optarg, "0") == 0)
use_zero_pages = 0;
else
use_zero_pages = 1;
break;
case 'm':
if (strcmp(optarg, "0") == 0)
merge_across_nodes = 0;
else
merge_across_nodes = 1;
break;
case 'd':
debug = 1;
break;
case 's':
size_MB = atoi(optarg);
if (size_MB <= 0) {
printf("Size must be greater than 0\n");
return KSFT_FAIL;
}
break;
case 't':
{
int tmp = atoi(optarg);
if (tmp < 0 || tmp > KSM_MERGE_LAST) {
printf("Invalid merge type\n");
return KSFT_FAIL;
}
merge_type = tmp;
}
break;
case 'M':
break;
case 'U':
test_name = CHECK_KSM_UNMERGE;
break;
case 'Z':
test_name = CHECK_KSM_ZERO_PAGE_MERGE;
break;
case 'N':
test_name = CHECK_KSM_NUMA_MERGE;
break;
case 'P':
test_name = KSM_MERGE_TIME;
break;
case 'H':
test_name = KSM_MERGE_TIME_HUGE_PAGES;
break;
case 'D':
test_name = KSM_UNMERGE_TIME;
break;
case 'C':
test_name = KSM_COW_TIME;
break;
default:
return KSFT_FAIL;
}
}
if (prot == 0)
prot = str_to_prot(KSM_PROT_STR_DEFAULT);
if (access(KSM_SYSFS_PATH, F_OK)) {
printf("Config KSM not enabled\n");
return KSFT_SKIP;
}
if (ksm_save_def(&ksm_sysfs_old)) {
printf("Cannot save default tunables\n");
return KSFT_FAIL;
}
if (ksm_write_sysfs(KSM_FP("run"), 2) ||
ksm_write_sysfs(KSM_FP("sleep_millisecs"), 0) ||
numa_available() ? 0 :
ksm_write_sysfs(KSM_FP("merge_across_nodes"), 1) ||
ksm_write_sysfs(KSM_FP("pages_to_scan"), page_count))
return KSFT_FAIL;
switch (test_name) {
case CHECK_KSM_MERGE:
ret = check_ksm_merge(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot, page_count,
ksm_scan_limit_sec, page_size);
break;
case CHECK_KSM_UNMERGE:
ret = check_ksm_unmerge(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, page_size);
break;
case CHECK_KSM_ZERO_PAGE_MERGE:
ret = check_ksm_zero_page_merge(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
page_count, ksm_scan_limit_sec, use_zero_pages,
page_size);
break;
case CHECK_KSM_NUMA_MERGE:
ret = check_ksm_numa_merge(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, merge_across_nodes, page_size);
break;
case KSM_MERGE_TIME:
if (size_MB == 0) {
printf("Option '-s' is required.\n");
return KSFT_FAIL;
}
ret = ksm_merge_time(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, size_MB);
break;
case KSM_MERGE_TIME_HUGE_PAGES:
if (size_MB == 0) {
printf("Option '-s' is required.\n");
return KSFT_FAIL;
}
ret = ksm_merge_hugepages_time(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, size_MB);
break;
case KSM_UNMERGE_TIME:
if (size_MB == 0) {
printf("Option '-s' is required.\n");
return KSFT_FAIL;
}
ret = ksm_unmerge_time(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, size_MB);
break;
case KSM_COW_TIME:
ret = ksm_cow_time(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot,
ksm_scan_limit_sec, page_size);
break;
}
if (ksm_restore(&ksm_sysfs_old)) {
printf("Cannot restore default tunables\n");
return KSFT_FAIL;
}
return ret;
}
| linux-master | tools/testing/selftests/mm/ksm_tests.c |
// SPDX-License-Identifier: GPL-2.0
/*
* It tests the mlock/mlock2() when they are invoked
* on randomly memory region.
*/
#include <unistd.h>
#include <sys/resource.h>
#include <sys/capability.h>
#include <sys/mman.h>
#include <linux/mman.h>
#include <fcntl.h>
#include <string.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <time.h>
#include "mlock2.h"
#define CHUNK_UNIT (128 * 1024)
#define MLOCK_RLIMIT_SIZE (CHUNK_UNIT * 2)
#define MLOCK_WITHIN_LIMIT_SIZE CHUNK_UNIT
#define MLOCK_OUTOF_LIMIT_SIZE (CHUNK_UNIT * 3)
#define TEST_LOOP 100
#define PAGE_ALIGN(size, ps) (((size) + ((ps) - 1)) & ~((ps) - 1))
int set_cap_limits(rlim_t max)
{
struct rlimit new;
cap_t cap = cap_init();
new.rlim_cur = max;
new.rlim_max = max;
if (setrlimit(RLIMIT_MEMLOCK, &new)) {
perror("setrlimit() returns error\n");
return -1;
}
/* drop capabilities including CAP_IPC_LOCK */
if (cap_set_proc(cap)) {
perror("cap_set_proc() returns error\n");
return -2;
}
return 0;
}
int get_proc_locked_vm_size(void)
{
FILE *f;
int ret = -1;
char line[1024] = {0};
unsigned long lock_size = 0;
f = fopen("/proc/self/status", "r");
if (!f) {
perror("fopen");
return -1;
}
while (fgets(line, 1024, f)) {
if (strstr(line, "VmLck")) {
ret = sscanf(line, "VmLck:\t%8lu kB", &lock_size);
if (ret <= 0) {
printf("sscanf() on VmLck error: %s: %d\n",
line, ret);
fclose(f);
return -1;
}
fclose(f);
return (int)(lock_size << 10);
}
}
perror("cannot parse VmLck in /proc/self/status\n");
fclose(f);
return -1;
}
/*
* Get the MMUPageSize of the memory region including input
* address from proc file.
*
* return value: on error case, 0 will be returned.
* Otherwise the page size(in bytes) is returned.
*/
int get_proc_page_size(unsigned long addr)
{
FILE *smaps;
char *line;
unsigned long mmupage_size = 0;
size_t size;
smaps = seek_to_smaps_entry(addr);
if (!smaps) {
printf("Unable to parse /proc/self/smaps\n");
return 0;
}
while (getline(&line, &size, smaps) > 0) {
if (!strstr(line, "MMUPageSize")) {
free(line);
line = NULL;
size = 0;
continue;
}
/* found the MMUPageSize of this section */
if (sscanf(line, "MMUPageSize: %8lu kB",
&mmupage_size) < 1) {
printf("Unable to parse smaps entry for Size:%s\n",
line);
break;
}
}
free(line);
if (smaps)
fclose(smaps);
return mmupage_size << 10;
}
/*
* Test mlock/mlock2() on provided memory chunk.
* It expects the mlock/mlock2() to be successful (within rlimit)
*
* With allocated memory chunk [p, p + alloc_size), this
* test will choose start/len randomly to perform mlock/mlock2
* [start, start + len] memory range. The range is within range
* of the allocated chunk.
*
* The memory region size alloc_size is within the rlimit.
* So we always expect a success of mlock/mlock2.
*
* VmLck is assumed to be 0 before this test.
*
* return value: 0 - success
* else: failure
*/
int test_mlock_within_limit(char *p, int alloc_size)
{
int i;
int ret = 0;
int locked_vm_size = 0;
struct rlimit cur;
int page_size = 0;
getrlimit(RLIMIT_MEMLOCK, &cur);
if (cur.rlim_cur < alloc_size) {
printf("alloc_size[%d] < %u rlimit,lead to mlock failure\n",
alloc_size, (unsigned int)cur.rlim_cur);
return -1;
}
srand(time(NULL));
for (i = 0; i < TEST_LOOP; i++) {
/*
* - choose mlock/mlock2 randomly
* - choose lock_size randomly but lock_size < alloc_size
* - choose start_offset randomly but p+start_offset+lock_size
* < p+alloc_size
*/
int is_mlock = !!(rand() % 2);
int lock_size = rand() % alloc_size;
int start_offset = rand() % (alloc_size - lock_size);
if (is_mlock)
ret = mlock(p + start_offset, lock_size);
else
ret = mlock2_(p + start_offset, lock_size,
MLOCK_ONFAULT);
if (ret) {
printf("%s() failure at |%p(%d)| mlock:|%p(%d)|\n",
is_mlock ? "mlock" : "mlock2",
p, alloc_size,
p + start_offset, lock_size);
return ret;
}
}
/*
* Check VmLck left by the tests.
*/
locked_vm_size = get_proc_locked_vm_size();
page_size = get_proc_page_size((unsigned long)p);
if (page_size == 0) {
printf("cannot get proc MMUPageSize\n");
return -1;
}
if (locked_vm_size > PAGE_ALIGN(alloc_size, page_size) + page_size) {
printf("test_mlock_within_limit() left VmLck:%d on %d chunk\n",
locked_vm_size, alloc_size);
return -1;
}
return 0;
}
/*
* We expect the mlock/mlock2() to be fail (outof limitation)
*
* With allocated memory chunk [p, p + alloc_size), this
* test will randomly choose start/len and perform mlock/mlock2
* on [start, start+len] range.
*
* The memory region size alloc_size is above the rlimit.
* And the len to be locked is higher than rlimit.
* So we always expect a failure of mlock/mlock2.
* No locked page number should be increased as a side effect.
*
* return value: 0 - success
* else: failure
*/
int test_mlock_outof_limit(char *p, int alloc_size)
{
int i;
int ret = 0;
int locked_vm_size = 0, old_locked_vm_size = 0;
struct rlimit cur;
getrlimit(RLIMIT_MEMLOCK, &cur);
if (cur.rlim_cur >= alloc_size) {
printf("alloc_size[%d] >%u rlimit, violates test condition\n",
alloc_size, (unsigned int)cur.rlim_cur);
return -1;
}
old_locked_vm_size = get_proc_locked_vm_size();
srand(time(NULL));
for (i = 0; i < TEST_LOOP; i++) {
int is_mlock = !!(rand() % 2);
int lock_size = (rand() % (alloc_size - cur.rlim_cur))
+ cur.rlim_cur;
int start_offset = rand() % (alloc_size - lock_size);
if (is_mlock)
ret = mlock(p + start_offset, lock_size);
else
ret = mlock2_(p + start_offset, lock_size,
MLOCK_ONFAULT);
if (ret == 0) {
printf("%s() succeeds? on %p(%d) mlock%p(%d)\n",
is_mlock ? "mlock" : "mlock2",
p, alloc_size,
p + start_offset, lock_size);
return -1;
}
}
locked_vm_size = get_proc_locked_vm_size();
if (locked_vm_size != old_locked_vm_size) {
printf("tests leads to new mlocked page: old[%d], new[%d]\n",
old_locked_vm_size,
locked_vm_size);
return -1;
}
return 0;
}
int main(int argc, char **argv)
{
char *p = NULL;
int ret = 0;
if (set_cap_limits(MLOCK_RLIMIT_SIZE))
return -1;
p = malloc(MLOCK_WITHIN_LIMIT_SIZE);
if (p == NULL) {
perror("malloc() failure\n");
return -1;
}
ret = test_mlock_within_limit(p, MLOCK_WITHIN_LIMIT_SIZE);
if (ret)
return ret;
munlock(p, MLOCK_WITHIN_LIMIT_SIZE);
free(p);
p = malloc(MLOCK_OUTOF_LIMIT_SIZE);
if (p == NULL) {
perror("malloc() failure\n");
return -1;
}
ret = test_mlock_outof_limit(p, MLOCK_OUTOF_LIMIT_SIZE);
if (ret)
return ret;
munlock(p, MLOCK_OUTOF_LIMIT_SIZE);
free(p);
return 0;
}
| linux-master | tools/testing/selftests/mm/mlock-random-test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GUP long-term page pinning tests.
*
* Copyright 2023, Red Hat, Inc.
*
* Author(s): David Hildenbrand <[email protected]>
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/vfs.h>
#include <linux/magic.h>
#include <linux/memfd.h>
#include "local_config.h"
#ifdef LOCAL_CONFIG_HAVE_LIBURING
#include <liburing.h>
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
#include "../../../../mm/gup_test.h"
#include "../kselftest.h"
#include "vm_util.h"
static size_t pagesize;
static int nr_hugetlbsizes;
static size_t hugetlbsizes[10];
static int gup_fd;
static __fsword_t get_fs_type(int fd)
{
struct statfs fs;
int ret;
do {
ret = fstatfs(fd, &fs);
} while (ret && errno == EINTR);
return ret ? 0 : fs.f_type;
}
static bool fs_is_unknown(__fsword_t fs_type)
{
/*
* We only support some filesystems in our tests when dealing with
* R/W long-term pinning. For these filesystems, we can be fairly sure
* whether they support it or not.
*/
switch (fs_type) {
case TMPFS_MAGIC:
case HUGETLBFS_MAGIC:
case BTRFS_SUPER_MAGIC:
case EXT4_SUPER_MAGIC:
case XFS_SUPER_MAGIC:
return false;
default:
return true;
}
}
static bool fs_supports_writable_longterm_pinning(__fsword_t fs_type)
{
assert(!fs_is_unknown(fs_type));
switch (fs_type) {
case TMPFS_MAGIC:
case HUGETLBFS_MAGIC:
return true;
default:
return false;
}
}
enum test_type {
TEST_TYPE_RO,
TEST_TYPE_RO_FAST,
TEST_TYPE_RW,
TEST_TYPE_RW_FAST,
#ifdef LOCAL_CONFIG_HAVE_LIBURING
TEST_TYPE_IOURING,
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
};
static void do_test(int fd, size_t size, enum test_type type, bool shared)
{
__fsword_t fs_type = get_fs_type(fd);
bool should_work;
char *mem;
int ret;
if (ftruncate(fd, size)) {
ksft_test_result_fail("ftruncate() failed\n");
return;
}
if (fallocate(fd, 0, 0, size)) {
if (size == pagesize)
ksft_test_result_fail("fallocate() failed\n");
else
ksft_test_result_skip("need more free huge pages\n");
return;
}
mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
shared ? MAP_SHARED : MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
if (size == pagesize || shared)
ksft_test_result_fail("mmap() failed\n");
else
ksft_test_result_skip("need more free huge pages\n");
return;
}
/*
* Fault in the page writable such that GUP-fast can eventually pin
* it immediately.
*/
memset(mem, 0, size);
switch (type) {
case TEST_TYPE_RO:
case TEST_TYPE_RO_FAST:
case TEST_TYPE_RW:
case TEST_TYPE_RW_FAST: {
struct pin_longterm_test args;
const bool fast = type == TEST_TYPE_RO_FAST ||
type == TEST_TYPE_RW_FAST;
const bool rw = type == TEST_TYPE_RW ||
type == TEST_TYPE_RW_FAST;
if (gup_fd < 0) {
ksft_test_result_skip("gup_test not available\n");
break;
}
if (rw && shared && fs_is_unknown(fs_type)) {
ksft_test_result_skip("Unknown filesystem\n");
return;
}
/*
* R/O pinning or pinning in a private mapping is always
* expected to work. Otherwise, we expect long-term R/W pinning
* to only succeed for special fielesystems.
*/
should_work = !shared || !rw ||
fs_supports_writable_longterm_pinning(fs_type);
args.addr = (__u64)(uintptr_t)mem;
args.size = size;
args.flags = fast ? PIN_LONGTERM_TEST_FLAG_USE_FAST : 0;
args.flags |= rw ? PIN_LONGTERM_TEST_FLAG_USE_WRITE : 0;
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args);
if (ret && errno == EINVAL) {
ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n");
break;
} else if (ret && errno == EFAULT) {
ksft_test_result(!should_work, "Should have failed\n");
break;
} else if (ret) {
ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n");
break;
}
if (ioctl(gup_fd, PIN_LONGTERM_TEST_STOP))
ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n");
/*
* TODO: if the kernel ever supports long-term R/W pinning on
* some previously unsupported filesystems, we might want to
* perform some additional tests for possible data corruptions.
*/
ksft_test_result(should_work, "Should have worked\n");
break;
}
#ifdef LOCAL_CONFIG_HAVE_LIBURING
case TEST_TYPE_IOURING: {
struct io_uring ring;
struct iovec iov;
/* io_uring always pins pages writable. */
if (shared && fs_is_unknown(fs_type)) {
ksft_test_result_skip("Unknown filesystem\n");
return;
}
should_work = !shared ||
fs_supports_writable_longterm_pinning(fs_type);
/* Skip on errors, as we might just lack kernel support. */
ret = io_uring_queue_init(1, &ring, 0);
if (ret < 0) {
ksft_test_result_skip("io_uring_queue_init() failed\n");
break;
}
/*
* Register the range as a fixed buffer. This will FOLL_WRITE |
* FOLL_PIN | FOLL_LONGTERM the range.
*/
iov.iov_base = mem;
iov.iov_len = size;
ret = io_uring_register_buffers(&ring, &iov, 1);
/* Only new kernels return EFAULT. */
if (ret && (errno == ENOSPC || errno == EOPNOTSUPP ||
errno == EFAULT)) {
ksft_test_result(!should_work, "Should have failed\n");
} else if (ret) {
/*
* We might just lack support or have insufficient
* MEMLOCK limits.
*/
ksft_test_result_skip("io_uring_register_buffers() failed\n");
} else {
ksft_test_result(should_work, "Should have worked\n");
io_uring_unregister_buffers(&ring);
}
io_uring_queue_exit(&ring);
break;
}
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
default:
assert(false);
}
munmap(mem, size);
}
typedef void (*test_fn)(int fd, size_t size);
static void run_with_memfd(test_fn fn, const char *desc)
{
int fd;
ksft_print_msg("[RUN] %s ... with memfd\n", desc);
fd = memfd_create("test", 0);
if (fd < 0) {
ksft_test_result_fail("memfd_create() failed\n");
return;
}
fn(fd, pagesize);
close(fd);
}
static void run_with_tmpfile(test_fn fn, const char *desc)
{
FILE *file;
int fd;
ksft_print_msg("[RUN] %s ... with tmpfile\n", desc);
file = tmpfile();
if (!file) {
ksft_test_result_fail("tmpfile() failed\n");
return;
}
fd = fileno(file);
if (fd < 0) {
ksft_test_result_fail("fileno() failed\n");
return;
}
fn(fd, pagesize);
fclose(file);
}
static void run_with_local_tmpfile(test_fn fn, const char *desc)
{
char filename[] = __FILE__"_tmpfile_XXXXXX";
int fd;
ksft_print_msg("[RUN] %s ... with local tmpfile\n", desc);
fd = mkstemp(filename);
if (fd < 0) {
ksft_test_result_fail("mkstemp() failed\n");
return;
}
if (unlink(filename)) {
ksft_test_result_fail("unlink() failed\n");
goto close;
}
fn(fd, pagesize);
close:
close(fd);
}
static void run_with_memfd_hugetlb(test_fn fn, const char *desc,
size_t hugetlbsize)
{
int flags = MFD_HUGETLB;
int fd;
ksft_print_msg("[RUN] %s ... with memfd hugetlb (%zu kB)\n", desc,
hugetlbsize / 1024);
flags |= __builtin_ctzll(hugetlbsize) << MFD_HUGE_SHIFT;
fd = memfd_create("test", flags);
if (fd < 0) {
ksft_test_result_skip("memfd_create() failed\n");
return;
}
fn(fd, hugetlbsize);
close(fd);
}
struct test_case {
const char *desc;
test_fn fn;
};
static void test_shared_rw_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RW, true);
}
static void test_shared_rw_fast_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RW_FAST, true);
}
static void test_shared_ro_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RO, true);
}
static void test_shared_ro_fast_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RO_FAST, true);
}
static void test_private_rw_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RW, false);
}
static void test_private_rw_fast_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RW_FAST, false);
}
static void test_private_ro_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RO, false);
}
static void test_private_ro_fast_pin(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_RO_FAST, false);
}
#ifdef LOCAL_CONFIG_HAVE_LIBURING
static void test_shared_iouring(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_IOURING, true);
}
static void test_private_iouring(int fd, size_t size)
{
do_test(fd, size, TEST_TYPE_IOURING, false);
}
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
static const struct test_case test_cases[] = {
{
"R/W longterm GUP pin in MAP_SHARED file mapping",
test_shared_rw_pin,
},
{
"R/W longterm GUP-fast pin in MAP_SHARED file mapping",
test_shared_rw_fast_pin,
},
{
"R/O longterm GUP pin in MAP_SHARED file mapping",
test_shared_ro_pin,
},
{
"R/O longterm GUP-fast pin in MAP_SHARED file mapping",
test_shared_ro_fast_pin,
},
{
"R/W longterm GUP pin in MAP_PRIVATE file mapping",
test_private_rw_pin,
},
{
"R/W longterm GUP-fast pin in MAP_PRIVATE file mapping",
test_private_rw_fast_pin,
},
{
"R/O longterm GUP pin in MAP_PRIVATE file mapping",
test_private_ro_pin,
},
{
"R/O longterm GUP-fast pin in MAP_PRIVATE file mapping",
test_private_ro_fast_pin,
},
#ifdef LOCAL_CONFIG_HAVE_LIBURING
{
"io_uring fixed buffer with MAP_SHARED file mapping",
test_shared_iouring,
},
{
"io_uring fixed buffer with MAP_PRIVATE file mapping",
test_private_iouring,
},
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
};
static void run_test_case(struct test_case const *test_case)
{
int i;
run_with_memfd(test_case->fn, test_case->desc);
run_with_tmpfile(test_case->fn, test_case->desc);
run_with_local_tmpfile(test_case->fn, test_case->desc);
for (i = 0; i < nr_hugetlbsizes; i++)
run_with_memfd_hugetlb(test_case->fn, test_case->desc,
hugetlbsizes[i]);
}
static int tests_per_test_case(void)
{
return 3 + nr_hugetlbsizes;
}
int main(int argc, char **argv)
{
int i, err;
pagesize = getpagesize();
nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
ARRAY_SIZE(hugetlbsizes));
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(test_cases) * tests_per_test_case());
gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
for (i = 0; i < ARRAY_SIZE(test_cases); i++)
run_test_case(&test_cases[i]);
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/gup_longterm.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <linux/magic.h>
#include <sys/mman.h>
#include <sys/statfs.h>
#include <errno.h>
#include <stdbool.h>
#include "../kselftest.h"
#define PREFIX " ... "
#define ERROR_PREFIX " !!! "
#define MAX_WRITE_READ_CHUNK_SIZE (getpagesize() * 16)
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
enum test_status {
TEST_PASSED = 0,
TEST_FAILED = 1,
TEST_SKIPPED = 2,
};
static char *status_to_str(enum test_status status)
{
switch (status) {
case TEST_PASSED:
return "TEST_PASSED";
case TEST_FAILED:
return "TEST_FAILED";
case TEST_SKIPPED:
return "TEST_SKIPPED";
default:
return "TEST_???";
}
}
static int setup_filemap(char *filemap, size_t len, size_t wr_chunk_size)
{
char iter = 0;
for (size_t offset = 0; offset < len;
offset += wr_chunk_size) {
iter++;
memset(filemap + offset, iter, wr_chunk_size);
}
return 0;
}
static bool verify_chunk(char *buf, size_t len, char val)
{
size_t i;
for (i = 0; i < len; ++i) {
if (buf[i] != val) {
printf(PREFIX ERROR_PREFIX "check fail: buf[%lu] = %u != %u\n",
i, buf[i], val);
return false;
}
}
return true;
}
static bool seek_read_hugepage_filemap(int fd, size_t len, size_t wr_chunk_size,
off_t offset, size_t expected)
{
char buf[MAX_WRITE_READ_CHUNK_SIZE];
ssize_t ret_count = 0;
ssize_t total_ret_count = 0;
char val = offset / wr_chunk_size + offset % wr_chunk_size;
printf(PREFIX PREFIX "init val=%u with offset=0x%lx\n", val, offset);
printf(PREFIX PREFIX "expect to read 0x%lx bytes of data in total\n",
expected);
if (lseek(fd, offset, SEEK_SET) < 0) {
perror(PREFIX ERROR_PREFIX "seek failed");
return false;
}
while (offset + total_ret_count < len) {
ret_count = read(fd, buf, wr_chunk_size);
if (ret_count == 0) {
printf(PREFIX PREFIX "read reach end of the file\n");
break;
} else if (ret_count < 0) {
perror(PREFIX ERROR_PREFIX "read failed");
break;
}
++val;
if (!verify_chunk(buf, ret_count, val))
return false;
total_ret_count += ret_count;
}
printf(PREFIX PREFIX "actually read 0x%lx bytes of data in total\n",
total_ret_count);
return total_ret_count == expected;
}
static bool read_hugepage_filemap(int fd, size_t len,
size_t wr_chunk_size, size_t expected)
{
char buf[MAX_WRITE_READ_CHUNK_SIZE];
ssize_t ret_count = 0;
ssize_t total_ret_count = 0;
char val = 0;
printf(PREFIX PREFIX "expect to read 0x%lx bytes of data in total\n",
expected);
while (total_ret_count < len) {
ret_count = read(fd, buf, wr_chunk_size);
if (ret_count == 0) {
printf(PREFIX PREFIX "read reach end of the file\n");
break;
} else if (ret_count < 0) {
perror(PREFIX ERROR_PREFIX "read failed");
break;
}
++val;
if (!verify_chunk(buf, ret_count, val))
return false;
total_ret_count += ret_count;
}
printf(PREFIX PREFIX "actually read 0x%lx bytes of data in total\n",
total_ret_count);
return total_ret_count == expected;
}
static enum test_status
test_hugetlb_read(int fd, size_t len, size_t wr_chunk_size)
{
enum test_status status = TEST_SKIPPED;
char *filemap = NULL;
if (ftruncate(fd, len) < 0) {
perror(PREFIX ERROR_PREFIX "ftruncate failed");
return status;
}
filemap = mmap(NULL, len, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (filemap == MAP_FAILED) {
perror(PREFIX ERROR_PREFIX "mmap for primary mapping failed");
goto done;
}
setup_filemap(filemap, len, wr_chunk_size);
status = TEST_FAILED;
if (read_hugepage_filemap(fd, len, wr_chunk_size, len))
status = TEST_PASSED;
munmap(filemap, len);
done:
if (ftruncate(fd, 0) < 0) {
perror(PREFIX ERROR_PREFIX "ftruncate back to 0 failed");
status = TEST_FAILED;
}
return status;
}
static enum test_status
test_hugetlb_read_hwpoison(int fd, size_t len, size_t wr_chunk_size,
bool skip_hwpoison_page)
{
enum test_status status = TEST_SKIPPED;
char *filemap = NULL;
char *hwp_addr = NULL;
const unsigned long pagesize = getpagesize();
if (ftruncate(fd, len) < 0) {
perror(PREFIX ERROR_PREFIX "ftruncate failed");
return status;
}
filemap = mmap(NULL, len, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (filemap == MAP_FAILED) {
perror(PREFIX ERROR_PREFIX "mmap for primary mapping failed");
goto done;
}
setup_filemap(filemap, len, wr_chunk_size);
status = TEST_FAILED;
/*
* Poisoned hugetlb page layout (assume hugepagesize=2MB):
* |<---------------------- 1MB ---------------------->|
* |<---- healthy page ---->|<---- HWPOISON page ----->|
* |<------------------- (1MB - 8KB) ----------------->|
*/
hwp_addr = filemap + len / 2 + pagesize;
if (madvise(hwp_addr, pagesize, MADV_HWPOISON) < 0) {
perror(PREFIX ERROR_PREFIX "MADV_HWPOISON failed");
goto unmap;
}
if (!skip_hwpoison_page) {
/*
* Userspace should be able to read (1MB + 1 page) from
* the beginning of the HWPOISONed hugepage.
*/
if (read_hugepage_filemap(fd, len, wr_chunk_size,
len / 2 + pagesize))
status = TEST_PASSED;
} else {
/*
* Userspace should be able to read (1MB - 2 pages) from
* HWPOISONed hugepage.
*/
if (seek_read_hugepage_filemap(fd, len, wr_chunk_size,
len / 2 + MAX(2 * pagesize, wr_chunk_size),
len / 2 - MAX(2 * pagesize, wr_chunk_size)))
status = TEST_PASSED;
}
unmap:
munmap(filemap, len);
done:
if (ftruncate(fd, 0) < 0) {
perror(PREFIX ERROR_PREFIX "ftruncate back to 0 failed");
status = TEST_FAILED;
}
return status;
}
static int create_hugetlbfs_file(struct statfs *file_stat)
{
int fd;
fd = memfd_create("hugetlb_tmp", MFD_HUGETLB);
if (fd < 0) {
perror(PREFIX ERROR_PREFIX "could not open hugetlbfs file");
return -1;
}
memset(file_stat, 0, sizeof(*file_stat));
if (fstatfs(fd, file_stat)) {
perror(PREFIX ERROR_PREFIX "fstatfs failed");
goto close;
}
if (file_stat->f_type != HUGETLBFS_MAGIC) {
printf(PREFIX ERROR_PREFIX "not hugetlbfs file\n");
goto close;
}
return fd;
close:
close(fd);
return -1;
}
int main(void)
{
int fd;
struct statfs file_stat;
enum test_status status;
/* Test read() in different granularity. */
size_t wr_chunk_sizes[] = {
getpagesize() / 2, getpagesize(),
getpagesize() * 2, getpagesize() * 4
};
size_t i;
for (i = 0; i < ARRAY_SIZE(wr_chunk_sizes); ++i) {
printf("Write/read chunk size=0x%lx\n",
wr_chunk_sizes[i]);
fd = create_hugetlbfs_file(&file_stat);
if (fd < 0)
goto create_failure;
printf(PREFIX "HugeTLB read regression test...\n");
status = test_hugetlb_read(fd, file_stat.f_bsize,
wr_chunk_sizes[i]);
printf(PREFIX "HugeTLB read regression test...%s\n",
status_to_str(status));
close(fd);
if (status == TEST_FAILED)
return -1;
fd = create_hugetlbfs_file(&file_stat);
if (fd < 0)
goto create_failure;
printf(PREFIX "HugeTLB read HWPOISON test...\n");
status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize,
wr_chunk_sizes[i], false);
printf(PREFIX "HugeTLB read HWPOISON test...%s\n",
status_to_str(status));
close(fd);
if (status == TEST_FAILED)
return -1;
fd = create_hugetlbfs_file(&file_stat);
if (fd < 0)
goto create_failure;
printf(PREFIX "HugeTLB seek then read HWPOISON test...\n");
status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize,
wr_chunk_sizes[i], true);
printf(PREFIX "HugeTLB seek then read HWPOISON test...%s\n",
status_to_str(status));
close(fd);
if (status == TEST_FAILED)
return -1;
}
return 0;
create_failure:
printf(ERROR_PREFIX "Abort test: failed to create hugetlbfs file\n");
return -1;
}
| linux-master | tools/testing/selftests/mm/hugetlb-read-hwpoison.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Anshuman Khandual, IBM Corp.
*
* Works on architectures which support 128TB virtual
* address range and beyond.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include <sys/time.h>
/*
* Maximum address range mapped with a single mmap()
* call is little bit more than 1GB. Hence 1GB is
* chosen as the single chunk size for address space
* mapping.
*/
#define SZ_1GB (1024 * 1024 * 1024UL)
#define SZ_1TB (1024 * 1024 * 1024 * 1024UL)
#define MAP_CHUNK_SIZE SZ_1GB
/*
* Address space till 128TB is mapped without any hint
* and is enabled by default. Address space beyond 128TB
* till 512TB is obtained by passing hint address as the
* first argument into mmap() system call.
*
* The process heap address space is divided into two
* different areas one below 128TB and one above 128TB
* till it reaches 512TB. One with size 128TB and the
* other being 384TB.
*
* On Arm64 the address space is 256TB and support for
* high mappings up to 4PB virtual address space has
* been added.
*/
#define NR_CHUNKS_128TB ((128 * SZ_1TB) / MAP_CHUNK_SIZE) /* Number of chunks for 128TB */
#define NR_CHUNKS_256TB (NR_CHUNKS_128TB * 2UL)
#define NR_CHUNKS_384TB (NR_CHUNKS_128TB * 3UL)
#define NR_CHUNKS_3840TB (NR_CHUNKS_128TB * 30UL)
#define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */
#define ADDR_MARK_256TB (1UL << 48) /* First address beyond 256TB */
#ifdef __aarch64__
#define HIGH_ADDR_MARK ADDR_MARK_256TB
#define HIGH_ADDR_SHIFT 49
#define NR_CHUNKS_LOW NR_CHUNKS_256TB
#define NR_CHUNKS_HIGH NR_CHUNKS_3840TB
#else
#define HIGH_ADDR_MARK ADDR_MARK_128TB
#define HIGH_ADDR_SHIFT 48
#define NR_CHUNKS_LOW NR_CHUNKS_128TB
#define NR_CHUNKS_HIGH NR_CHUNKS_384TB
#endif
static char *hind_addr(void)
{
int bits = HIGH_ADDR_SHIFT + rand() % (63 - HIGH_ADDR_SHIFT);
return (char *) (1UL << bits);
}
static int validate_addr(char *ptr, int high_addr)
{
unsigned long addr = (unsigned long) ptr;
if (high_addr) {
if (addr < HIGH_ADDR_MARK) {
printf("Bad address %lx\n", addr);
return 1;
}
return 0;
}
if (addr > HIGH_ADDR_MARK) {
printf("Bad address %lx\n", addr);
return 1;
}
return 0;
}
static int validate_lower_address_hint(void)
{
char *ptr;
ptr = mmap((void *) (1UL << 45), MAP_CHUNK_SIZE, PROT_READ |
PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED)
return 0;
return 1;
}
int main(int argc, char *argv[])
{
char *ptr[NR_CHUNKS_LOW];
char **hptr;
char *hint;
unsigned long i, lchunks, hchunks;
for (i = 0; i < NR_CHUNKS_LOW; i++) {
ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr[i] == MAP_FAILED) {
if (validate_lower_address_hint())
return 1;
break;
}
if (validate_addr(ptr[i], 0))
return 1;
}
lchunks = i;
hptr = (char **) calloc(NR_CHUNKS_HIGH, sizeof(char *));
if (hptr == NULL)
return 1;
for (i = 0; i < NR_CHUNKS_HIGH; i++) {
hint = hind_addr();
hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (hptr[i] == MAP_FAILED)
break;
if (validate_addr(hptr[i], 1))
return 1;
}
hchunks = i;
for (i = 0; i < lchunks; i++)
munmap(ptr[i], MAP_CHUNK_SIZE);
for (i = 0; i < hchunks; i++)
munmap(hptr[i], MAP_CHUNK_SIZE);
free(hptr);
return 0;
}
| linux-master | tools/testing/selftests/mm/virtual_address_range.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* COW (Copy On Write) tests.
*
* Copyright 2022, Red Hat, Inc.
*
* Author(s): David Hildenbrand <[email protected]>
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <assert.h>
#include <linux/mman.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/wait.h>
#include <linux/memfd.h>
#include "local_config.h"
#ifdef LOCAL_CONFIG_HAVE_LIBURING
#include <liburing.h>
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
#include "../../../../mm/gup_test.h"
#include "../kselftest.h"
#include "vm_util.h"
static size_t pagesize;
static int pagemap_fd;
static size_t thpsize;
static int nr_hugetlbsizes;
static size_t hugetlbsizes[10];
static int gup_fd;
static bool has_huge_zeropage;
static void detect_huge_zeropage(void)
{
int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page",
O_RDONLY);
size_t enabled = 0;
char buf[15];
int ret;
if (fd < 0)
return;
ret = pread(fd, buf, sizeof(buf), 0);
if (ret > 0 && ret < sizeof(buf)) {
buf[ret] = 0;
enabled = strtoul(buf, NULL, 10);
if (enabled == 1) {
has_huge_zeropage = true;
ksft_print_msg("[INFO] huge zeropage is enabled\n");
}
}
close(fd);
}
static bool range_is_swapped(void *addr, size_t size)
{
for (; size; addr += pagesize, size -= pagesize)
if (!pagemap_is_swapped(pagemap_fd, addr))
return false;
return true;
}
struct comm_pipes {
int child_ready[2];
int parent_ready[2];
};
static int setup_comm_pipes(struct comm_pipes *comm_pipes)
{
if (pipe(comm_pipes->child_ready) < 0)
return -errno;
if (pipe(comm_pipes->parent_ready) < 0) {
close(comm_pipes->child_ready[0]);
close(comm_pipes->child_ready[1]);
return -errno;
}
return 0;
}
static void close_comm_pipes(struct comm_pipes *comm_pipes)
{
close(comm_pipes->child_ready[0]);
close(comm_pipes->child_ready[1]);
close(comm_pipes->parent_ready[0]);
close(comm_pipes->parent_ready[1]);
}
static int child_memcmp_fn(char *mem, size_t size,
struct comm_pipes *comm_pipes)
{
char *old = malloc(size);
char buf;
/* Backup the original content. */
memcpy(old, mem, size);
/* Wait until the parent modified the page. */
write(comm_pipes->child_ready[1], "0", 1);
while (read(comm_pipes->parent_ready[0], &buf, 1) != 1)
;
/* See if we still read the old values. */
return memcmp(old, mem, size);
}
static int child_vmsplice_memcmp_fn(char *mem, size_t size,
struct comm_pipes *comm_pipes)
{
struct iovec iov = {
.iov_base = mem,
.iov_len = size,
};
ssize_t cur, total, transferred;
char *old, *new;
int fds[2];
char buf;
old = malloc(size);
new = malloc(size);
/* Backup the original content. */
memcpy(old, mem, size);
if (pipe(fds) < 0)
return -errno;
/* Trigger a read-only pin. */
transferred = vmsplice(fds[1], &iov, 1, 0);
if (transferred < 0)
return -errno;
if (transferred == 0)
return -EINVAL;
/* Unmap it from our page tables. */
if (munmap(mem, size) < 0)
return -errno;
/* Wait until the parent modified it. */
write(comm_pipes->child_ready[1], "0", 1);
while (read(comm_pipes->parent_ready[0], &buf, 1) != 1)
;
/* See if we still read the old values via the pipe. */
for (total = 0; total < transferred; total += cur) {
cur = read(fds[0], new + total, transferred - total);
if (cur < 0)
return -errno;
}
return memcmp(old, new, transferred);
}
typedef int (*child_fn)(char *mem, size_t size, struct comm_pipes *comm_pipes);
static void do_test_cow_in_parent(char *mem, size_t size, bool do_mprotect,
child_fn fn)
{
struct comm_pipes comm_pipes;
char buf;
int ret;
ret = setup_comm_pipes(&comm_pipes);
if (ret) {
ksft_test_result_fail("pipe() failed\n");
return;
}
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto close_comm_pipes;
} else if (!ret) {
exit(fn(mem, size, &comm_pipes));
}
while (read(comm_pipes.child_ready[0], &buf, 1) != 1)
;
if (do_mprotect) {
/*
* mprotect() optimizations might try avoiding
* write-faults by directly mapping pages writable.
*/
ret = mprotect(mem, size, PROT_READ);
ret |= mprotect(mem, size, PROT_READ|PROT_WRITE);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
goto close_comm_pipes;
}
}
/* Modify the page. */
memset(mem, 0xff, size);
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
if (WIFEXITED(ret))
ret = WEXITSTATUS(ret);
else
ret = -EINVAL;
ksft_test_result(!ret, "No leak from parent into child\n");
close_comm_pipes:
close_comm_pipes(&comm_pipes);
}
static void test_cow_in_parent(char *mem, size_t size)
{
do_test_cow_in_parent(mem, size, false, child_memcmp_fn);
}
static void test_cow_in_parent_mprotect(char *mem, size_t size)
{
do_test_cow_in_parent(mem, size, true, child_memcmp_fn);
}
static void test_vmsplice_in_child(char *mem, size_t size)
{
do_test_cow_in_parent(mem, size, false, child_vmsplice_memcmp_fn);
}
static void test_vmsplice_in_child_mprotect(char *mem, size_t size)
{
do_test_cow_in_parent(mem, size, true, child_vmsplice_memcmp_fn);
}
static void do_test_vmsplice_in_parent(char *mem, size_t size,
bool before_fork)
{
struct iovec iov = {
.iov_base = mem,
.iov_len = size,
};
ssize_t cur, total, transferred;
struct comm_pipes comm_pipes;
char *old, *new;
int ret, fds[2];
char buf;
old = malloc(size);
new = malloc(size);
memcpy(old, mem, size);
ret = setup_comm_pipes(&comm_pipes);
if (ret) {
ksft_test_result_fail("pipe() failed\n");
goto free;
}
if (pipe(fds) < 0) {
ksft_test_result_fail("pipe() failed\n");
goto close_comm_pipes;
}
if (before_fork) {
transferred = vmsplice(fds[1], &iov, 1, 0);
if (transferred <= 0) {
ksft_test_result_fail("vmsplice() failed\n");
goto close_pipe;
}
}
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto close_pipe;
} else if (!ret) {
write(comm_pipes.child_ready[1], "0", 1);
while (read(comm_pipes.parent_ready[0], &buf, 1) != 1)
;
/* Modify page content in the child. */
memset(mem, 0xff, size);
exit(0);
}
if (!before_fork) {
transferred = vmsplice(fds[1], &iov, 1, 0);
if (transferred <= 0) {
ksft_test_result_fail("vmsplice() failed\n");
wait(&ret);
goto close_pipe;
}
}
while (read(comm_pipes.child_ready[0], &buf, 1) != 1)
;
if (munmap(mem, size) < 0) {
ksft_test_result_fail("munmap() failed\n");
goto close_pipe;
}
write(comm_pipes.parent_ready[1], "0", 1);
/* Wait until the child is done writing. */
wait(&ret);
if (!WIFEXITED(ret)) {
ksft_test_result_fail("wait() failed\n");
goto close_pipe;
}
/* See if we still read the old values. */
for (total = 0; total < transferred; total += cur) {
cur = read(fds[0], new + total, transferred - total);
if (cur < 0) {
ksft_test_result_fail("read() failed\n");
goto close_pipe;
}
}
ksft_test_result(!memcmp(old, new, transferred),
"No leak from child into parent\n");
close_pipe:
close(fds[0]);
close(fds[1]);
close_comm_pipes:
close_comm_pipes(&comm_pipes);
free:
free(old);
free(new);
}
static void test_vmsplice_before_fork(char *mem, size_t size)
{
do_test_vmsplice_in_parent(mem, size, true);
}
static void test_vmsplice_after_fork(char *mem, size_t size)
{
do_test_vmsplice_in_parent(mem, size, false);
}
#ifdef LOCAL_CONFIG_HAVE_LIBURING
static void do_test_iouring(char *mem, size_t size, bool use_fork)
{
struct comm_pipes comm_pipes;
struct io_uring_cqe *cqe;
struct io_uring_sqe *sqe;
struct io_uring ring;
ssize_t cur, total;
struct iovec iov;
char *buf, *tmp;
int ret, fd;
FILE *file;
ret = setup_comm_pipes(&comm_pipes);
if (ret) {
ksft_test_result_fail("pipe() failed\n");
return;
}
file = tmpfile();
if (!file) {
ksft_test_result_fail("tmpfile() failed\n");
goto close_comm_pipes;
}
fd = fileno(file);
assert(fd);
tmp = malloc(size);
if (!tmp) {
ksft_test_result_fail("malloc() failed\n");
goto close_file;
}
/* Skip on errors, as we might just lack kernel support. */
ret = io_uring_queue_init(1, &ring, 0);
if (ret < 0) {
ksft_test_result_skip("io_uring_queue_init() failed\n");
goto free_tmp;
}
/*
* Register the range as a fixed buffer. This will FOLL_WRITE | FOLL_PIN
* | FOLL_LONGTERM the range.
*
* Skip on errors, as we might just lack kernel support or might not
* have sufficient MEMLOCK permissions.
*/
iov.iov_base = mem;
iov.iov_len = size;
ret = io_uring_register_buffers(&ring, &iov, 1);
if (ret) {
ksft_test_result_skip("io_uring_register_buffers() failed\n");
goto queue_exit;
}
if (use_fork) {
/*
* fork() and keep the child alive until we're done. Note that
* we expect the pinned page to not get shared with the child.
*/
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto unregister_buffers;
} else if (!ret) {
write(comm_pipes.child_ready[1], "0", 1);
while (read(comm_pipes.parent_ready[0], &buf, 1) != 1)
;
exit(0);
}
while (read(comm_pipes.child_ready[0], &buf, 1) != 1)
;
} else {
/*
* Map the page R/O into the page table. Enable softdirty
* tracking to stop the page from getting mapped R/W immediately
* again by mprotect() optimizations. Note that we don't have an
* easy way to test if that worked (the pagemap does not export
* if the page is mapped R/O vs. R/W).
*/
ret = mprotect(mem, size, PROT_READ);
clear_softdirty();
ret |= mprotect(mem, size, PROT_READ | PROT_WRITE);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto unregister_buffers;
}
}
/*
* Modify the page and write page content as observed by the fixed
* buffer pin to the file so we can verify it.
*/
memset(mem, 0xff, size);
sqe = io_uring_get_sqe(&ring);
if (!sqe) {
ksft_test_result_fail("io_uring_get_sqe() failed\n");
goto quit_child;
}
io_uring_prep_write_fixed(sqe, fd, mem, size, 0, 0);
ret = io_uring_submit(&ring);
if (ret < 0) {
ksft_test_result_fail("io_uring_submit() failed\n");
goto quit_child;
}
ret = io_uring_wait_cqe(&ring, &cqe);
if (ret < 0) {
ksft_test_result_fail("io_uring_wait_cqe() failed\n");
goto quit_child;
}
if (cqe->res != size) {
ksft_test_result_fail("write_fixed failed\n");
goto quit_child;
}
io_uring_cqe_seen(&ring, cqe);
/* Read back the file content to the temporary buffer. */
total = 0;
while (total < size) {
cur = pread(fd, tmp + total, size - total, total);
if (cur < 0) {
ksft_test_result_fail("pread() failed\n");
goto quit_child;
}
total += cur;
}
/* Finally, check if we read what we expected. */
ksft_test_result(!memcmp(mem, tmp, size),
"Longterm R/W pin is reliable\n");
quit_child:
if (use_fork) {
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
}
unregister_buffers:
io_uring_unregister_buffers(&ring);
queue_exit:
io_uring_queue_exit(&ring);
free_tmp:
free(tmp);
close_file:
fclose(file);
close_comm_pipes:
close_comm_pipes(&comm_pipes);
}
static void test_iouring_ro(char *mem, size_t size)
{
do_test_iouring(mem, size, false);
}
static void test_iouring_fork(char *mem, size_t size)
{
do_test_iouring(mem, size, true);
}
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
enum ro_pin_test {
RO_PIN_TEST,
RO_PIN_TEST_SHARED,
RO_PIN_TEST_PREVIOUSLY_SHARED,
RO_PIN_TEST_RO_EXCLUSIVE,
};
static void do_test_ro_pin(char *mem, size_t size, enum ro_pin_test test,
bool fast)
{
struct pin_longterm_test args;
struct comm_pipes comm_pipes;
char *tmp, buf;
__u64 tmp_val;
int ret;
if (gup_fd < 0) {
ksft_test_result_skip("gup_test not available\n");
return;
}
tmp = malloc(size);
if (!tmp) {
ksft_test_result_fail("malloc() failed\n");
return;
}
ret = setup_comm_pipes(&comm_pipes);
if (ret) {
ksft_test_result_fail("pipe() failed\n");
goto free_tmp;
}
switch (test) {
case RO_PIN_TEST:
break;
case RO_PIN_TEST_SHARED:
case RO_PIN_TEST_PREVIOUSLY_SHARED:
/*
* Share the pages with our child. As the pages are not pinned,
* this should just work.
*/
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto close_comm_pipes;
} else if (!ret) {
write(comm_pipes.child_ready[1], "0", 1);
while (read(comm_pipes.parent_ready[0], &buf, 1) != 1)
;
exit(0);
}
/* Wait until our child is ready. */
while (read(comm_pipes.child_ready[0], &buf, 1) != 1)
;
if (test == RO_PIN_TEST_PREVIOUSLY_SHARED) {
/*
* Tell the child to quit now and wait until it quit.
* The pages should now be mapped R/O into our page
* tables, but they are no longer shared.
*/
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
if (!WIFEXITED(ret))
ksft_print_msg("[INFO] wait() failed\n");
}
break;
case RO_PIN_TEST_RO_EXCLUSIVE:
/*
* Map the page R/O into the page table. Enable softdirty
* tracking to stop the page from getting mapped R/W immediately
* again by mprotect() optimizations. Note that we don't have an
* easy way to test if that worked (the pagemap does not export
* if the page is mapped R/O vs. R/W).
*/
ret = mprotect(mem, size, PROT_READ);
clear_softdirty();
ret |= mprotect(mem, size, PROT_READ | PROT_WRITE);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto close_comm_pipes;
}
break;
default:
assert(false);
}
/* Take a R/O pin. This should trigger unsharing. */
args.addr = (__u64)(uintptr_t)mem;
args.size = size;
args.flags = fast ? PIN_LONGTERM_TEST_FLAG_USE_FAST : 0;
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args);
if (ret) {
if (errno == EINVAL)
ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n");
else
ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n");
goto wait;
}
/* Modify the page. */
memset(mem, 0xff, size);
/*
* Read back the content via the pin to the temporary buffer and
* test if we observed the modification.
*/
tmp_val = (__u64)(uintptr_t)tmp;
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_READ, &tmp_val);
if (ret)
ksft_test_result_fail("PIN_LONGTERM_TEST_READ failed\n");
else
ksft_test_result(!memcmp(mem, tmp, size),
"Longterm R/O pin is reliable\n");
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_STOP);
if (ret)
ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n");
wait:
switch (test) {
case RO_PIN_TEST_SHARED:
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
if (!WIFEXITED(ret))
ksft_print_msg("[INFO] wait() failed\n");
break;
default:
break;
}
close_comm_pipes:
close_comm_pipes(&comm_pipes);
free_tmp:
free(tmp);
}
static void test_ro_pin_on_shared(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, false);
}
static void test_ro_fast_pin_on_shared(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, true);
}
static void test_ro_pin_on_ro_previously_shared(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, false);
}
static void test_ro_fast_pin_on_ro_previously_shared(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, true);
}
static void test_ro_pin_on_ro_exclusive(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, false);
}
static void test_ro_fast_pin_on_ro_exclusive(char *mem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, true);
}
typedef void (*test_fn)(char *mem, size_t size);
static void do_run_with_base_page(test_fn fn, bool swapout)
{
char *mem;
int ret;
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
ret = madvise(mem, pagesize, MADV_NOHUGEPAGE);
/* Ignore if not around on a kernel. */
if (ret && errno != EINVAL) {
ksft_test_result_fail("MADV_NOHUGEPAGE failed\n");
goto munmap;
}
/* Populate a base page. */
memset(mem, 0, pagesize);
if (swapout) {
madvise(mem, pagesize, MADV_PAGEOUT);
if (!pagemap_is_swapped(pagemap_fd, mem)) {
ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n");
goto munmap;
}
}
fn(mem, pagesize);
munmap:
munmap(mem, pagesize);
}
static void run_with_base_page(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with base page\n", desc);
do_run_with_base_page(fn, false);
}
static void run_with_base_page_swap(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with swapped out base page\n", desc);
do_run_with_base_page(fn, true);
}
enum thp_run {
THP_RUN_PMD,
THP_RUN_PMD_SWAPOUT,
THP_RUN_PTE,
THP_RUN_PTE_SWAPOUT,
THP_RUN_SINGLE_PTE,
THP_RUN_SINGLE_PTE_SWAPOUT,
THP_RUN_PARTIAL_MREMAP,
THP_RUN_PARTIAL_SHARED,
};
static void do_run_with_thp(test_fn fn, enum thp_run thp_run)
{
char *mem, *mmap_mem, *tmp, *mremap_mem = MAP_FAILED;
size_t size, mmap_size, mremap_size;
int ret;
/* For alignment purposes, we need twice the thp size. */
mmap_size = 2 * thpsize;
mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mmap_mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
/* We need a THP-aligned memory area. */
mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1));
ret = madvise(mem, thpsize, MADV_HUGEPAGE);
if (ret) {
ksft_test_result_fail("MADV_HUGEPAGE failed\n");
goto munmap;
}
/*
* Try to populate a THP. Touch the first sub-page and test if we get
* another sub-page populated automatically.
*/
mem[0] = 0;
if (!pagemap_is_populated(pagemap_fd, mem + pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
memset(mem, 0, thpsize);
size = thpsize;
switch (thp_run) {
case THP_RUN_PMD:
case THP_RUN_PMD_SWAPOUT:
break;
case THP_RUN_PTE:
case THP_RUN_PTE_SWAPOUT:
/*
* Trigger PTE-mapping the THP by temporarily mapping a single
* subpage R/O.
*/
ret = mprotect(mem + pagesize, pagesize, PROT_READ);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
ret = mprotect(mem + pagesize, pagesize, PROT_READ | PROT_WRITE);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
break;
case THP_RUN_SINGLE_PTE:
case THP_RUN_SINGLE_PTE_SWAPOUT:
/*
* Discard all but a single subpage of that PTE-mapped THP. What
* remains is a single PTE mapping a single subpage.
*/
ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DONTNEED);
if (ret) {
ksft_test_result_fail("MADV_DONTNEED failed\n");
goto munmap;
}
size = pagesize;
break;
case THP_RUN_PARTIAL_MREMAP:
/*
* Remap half of the THP. We need some new memory location
* for that.
*/
mremap_size = thpsize / 2;
mremap_mem = mmap(NULL, mremap_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
tmp = mremap(mem + mremap_size, mremap_size, mremap_size,
MREMAP_MAYMOVE | MREMAP_FIXED, mremap_mem);
if (tmp != mremap_mem) {
ksft_test_result_fail("mremap() failed\n");
goto munmap;
}
size = mremap_size;
break;
case THP_RUN_PARTIAL_SHARED:
/*
* Share the first page of the THP with a child and quit the
* child. This will result in some parts of the THP never
* have been shared.
*/
ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DONTFORK);
if (ret) {
ksft_test_result_fail("MADV_DONTFORK failed\n");
goto munmap;
}
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto munmap;
} else if (!ret) {
exit(0);
}
wait(&ret);
/* Allow for sharing all pages again. */
ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DOFORK);
if (ret) {
ksft_test_result_fail("MADV_DOFORK failed\n");
goto munmap;
}
break;
default:
assert(false);
}
switch (thp_run) {
case THP_RUN_PMD_SWAPOUT:
case THP_RUN_PTE_SWAPOUT:
case THP_RUN_SINGLE_PTE_SWAPOUT:
madvise(mem, size, MADV_PAGEOUT);
if (!range_is_swapped(mem, size)) {
ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n");
goto munmap;
}
break;
default:
break;
}
fn(mem, size);
munmap:
munmap(mmap_mem, mmap_size);
if (mremap_mem != MAP_FAILED)
munmap(mremap_mem, mremap_size);
}
static void run_with_thp(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with THP\n", desc);
do_run_with_thp(fn, THP_RUN_PMD);
}
static void run_with_thp_swap(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with swapped-out THP\n", desc);
do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT);
}
static void run_with_pte_mapped_thp(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with PTE-mapped THP\n", desc);
do_run_with_thp(fn, THP_RUN_PTE);
}
static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP\n", desc);
do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT);
}
static void run_with_single_pte_of_thp(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with single PTE of THP\n", desc);
do_run_with_thp(fn, THP_RUN_SINGLE_PTE);
}
static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP\n", desc);
do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT);
}
static void run_with_partial_mremap_thp(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP\n", desc);
do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP);
}
static void run_with_partial_shared_thp(test_fn fn, const char *desc)
{
ksft_print_msg("[RUN] %s ... with partially shared THP\n", desc);
do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED);
}
static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize)
{
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
char *mem, *dummy;
ksft_print_msg("[RUN] %s ... with hugetlb (%zu kB)\n", desc,
hugetlbsize / 1024);
flags |= __builtin_ctzll(hugetlbsize) << MAP_HUGE_SHIFT;
mem = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, flags, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_skip("need more free huge pages\n");
return;
}
/* Populate an huge page. */
memset(mem, 0, hugetlbsize);
/*
* We need a total of two hugetlb pages to handle COW/unsharing
* properly, otherwise we might get zapped by a SIGBUS.
*/
dummy = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, flags, -1, 0);
if (dummy == MAP_FAILED) {
ksft_test_result_skip("need more free huge pages\n");
goto munmap;
}
munmap(dummy, hugetlbsize);
fn(mem, hugetlbsize);
munmap:
munmap(mem, hugetlbsize);
}
struct test_case {
const char *desc;
test_fn fn;
};
/*
* Test cases that are specific to anonymous pages: pages in private mappings
* that may get shared via COW during fork().
*/
static const struct test_case anon_test_cases[] = {
/*
* Basic COW tests for fork() without any GUP. If we miss to break COW,
* either the child can observe modifications by the parent or the
* other way around.
*/
{
"Basic COW after fork()",
test_cow_in_parent,
},
/*
* Basic test, but do an additional mprotect(PROT_READ)+
* mprotect(PROT_READ|PROT_WRITE) in the parent before write access.
*/
{
"Basic COW after fork() with mprotect() optimization",
test_cow_in_parent_mprotect,
},
/*
* vmsplice() [R/O GUP] + unmap in the child; modify in the parent. If
* we miss to break COW, the child observes modifications by the parent.
* This is CVE-2020-29374 reported by Jann Horn.
*/
{
"vmsplice() + unmap in child",
test_vmsplice_in_child
},
/*
* vmsplice() test, but do an additional mprotect(PROT_READ)+
* mprotect(PROT_READ|PROT_WRITE) in the parent before write access.
*/
{
"vmsplice() + unmap in child with mprotect() optimization",
test_vmsplice_in_child_mprotect
},
/*
* vmsplice() [R/O GUP] in parent before fork(), unmap in parent after
* fork(); modify in the child. If we miss to break COW, the parent
* observes modifications by the child.
*/
{
"vmsplice() before fork(), unmap in parent after fork()",
test_vmsplice_before_fork,
},
/*
* vmsplice() [R/O GUP] + unmap in parent after fork(); modify in the
* child. If we miss to break COW, the parent observes modifications by
* the child.
*/
{
"vmsplice() + unmap in parent after fork()",
test_vmsplice_after_fork,
},
#ifdef LOCAL_CONFIG_HAVE_LIBURING
/*
* Take a R/W longterm pin and then map the page R/O into the page
* table to trigger a write fault on next access. When modifying the
* page, the page content must be visible via the pin.
*/
{
"R/O-mapping a page registered as iouring fixed buffer",
test_iouring_ro,
},
/*
* Take a R/W longterm pin and then fork() a child. When modifying the
* page, the page content must be visible via the pin. We expect the
* pinned page to not get shared with the child.
*/
{
"fork() with an iouring fixed buffer",
test_iouring_fork,
},
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
/*
* Take a R/O longterm pin on a R/O-mapped shared anonymous page.
* When modifying the page via the page table, the page content change
* must be visible via the pin.
*/
{
"R/O GUP pin on R/O-mapped shared page",
test_ro_pin_on_shared,
},
/* Same as above, but using GUP-fast. */
{
"R/O GUP-fast pin on R/O-mapped shared page",
test_ro_fast_pin_on_shared,
},
/*
* Take a R/O longterm pin on a R/O-mapped exclusive anonymous page that
* was previously shared. When modifying the page via the page table,
* the page content change must be visible via the pin.
*/
{
"R/O GUP pin on R/O-mapped previously-shared page",
test_ro_pin_on_ro_previously_shared,
},
/* Same as above, but using GUP-fast. */
{
"R/O GUP-fast pin on R/O-mapped previously-shared page",
test_ro_fast_pin_on_ro_previously_shared,
},
/*
* Take a R/O longterm pin on a R/O-mapped exclusive anonymous page.
* When modifying the page via the page table, the page content change
* must be visible via the pin.
*/
{
"R/O GUP pin on R/O-mapped exclusive page",
test_ro_pin_on_ro_exclusive,
},
/* Same as above, but using GUP-fast. */
{
"R/O GUP-fast pin on R/O-mapped exclusive page",
test_ro_fast_pin_on_ro_exclusive,
},
};
static void run_anon_test_case(struct test_case const *test_case)
{
int i;
run_with_base_page(test_case->fn, test_case->desc);
run_with_base_page_swap(test_case->fn, test_case->desc);
if (thpsize) {
run_with_thp(test_case->fn, test_case->desc);
run_with_thp_swap(test_case->fn, test_case->desc);
run_with_pte_mapped_thp(test_case->fn, test_case->desc);
run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc);
run_with_single_pte_of_thp(test_case->fn, test_case->desc);
run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc);
run_with_partial_mremap_thp(test_case->fn, test_case->desc);
run_with_partial_shared_thp(test_case->fn, test_case->desc);
}
for (i = 0; i < nr_hugetlbsizes; i++)
run_with_hugetlb(test_case->fn, test_case->desc,
hugetlbsizes[i]);
}
static void run_anon_test_cases(void)
{
int i;
ksft_print_msg("[INFO] Anonymous memory tests in private mappings\n");
for (i = 0; i < ARRAY_SIZE(anon_test_cases); i++)
run_anon_test_case(&anon_test_cases[i]);
}
static int tests_per_anon_test_case(void)
{
int tests = 2 + nr_hugetlbsizes;
if (thpsize)
tests += 8;
return tests;
}
enum anon_thp_collapse_test {
ANON_THP_COLLAPSE_UNSHARED,
ANON_THP_COLLAPSE_FULLY_SHARED,
ANON_THP_COLLAPSE_LOWER_SHARED,
ANON_THP_COLLAPSE_UPPER_SHARED,
};
static void do_test_anon_thp_collapse(char *mem, size_t size,
enum anon_thp_collapse_test test)
{
struct comm_pipes comm_pipes;
char buf;
int ret;
ret = setup_comm_pipes(&comm_pipes);
if (ret) {
ksft_test_result_fail("pipe() failed\n");
return;
}
/*
* Trigger PTE-mapping the THP by temporarily mapping a single subpage
* R/O, such that we can try collapsing it later.
*/
ret = mprotect(mem + pagesize, pagesize, PROT_READ);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto close_comm_pipes;
}
ret = mprotect(mem + pagesize, pagesize, PROT_READ | PROT_WRITE);
if (ret) {
ksft_test_result_fail("mprotect() failed\n");
goto close_comm_pipes;
}
switch (test) {
case ANON_THP_COLLAPSE_UNSHARED:
/* Collapse before actually COW-sharing the page. */
ret = madvise(mem, size, MADV_COLLAPSE);
if (ret) {
ksft_test_result_skip("MADV_COLLAPSE failed: %s\n",
strerror(errno));
goto close_comm_pipes;
}
break;
case ANON_THP_COLLAPSE_FULLY_SHARED:
/* COW-share the full PTE-mapped THP. */
break;
case ANON_THP_COLLAPSE_LOWER_SHARED:
/* Don't COW-share the upper part of the THP. */
ret = madvise(mem + size / 2, size / 2, MADV_DONTFORK);
if (ret) {
ksft_test_result_fail("MADV_DONTFORK failed\n");
goto close_comm_pipes;
}
break;
case ANON_THP_COLLAPSE_UPPER_SHARED:
/* Don't COW-share the lower part of the THP. */
ret = madvise(mem, size / 2, MADV_DONTFORK);
if (ret) {
ksft_test_result_fail("MADV_DONTFORK failed\n");
goto close_comm_pipes;
}
break;
default:
assert(false);
}
ret = fork();
if (ret < 0) {
ksft_test_result_fail("fork() failed\n");
goto close_comm_pipes;
} else if (!ret) {
switch (test) {
case ANON_THP_COLLAPSE_UNSHARED:
case ANON_THP_COLLAPSE_FULLY_SHARED:
exit(child_memcmp_fn(mem, size, &comm_pipes));
break;
case ANON_THP_COLLAPSE_LOWER_SHARED:
exit(child_memcmp_fn(mem, size / 2, &comm_pipes));
break;
case ANON_THP_COLLAPSE_UPPER_SHARED:
exit(child_memcmp_fn(mem + size / 2, size / 2,
&comm_pipes));
break;
default:
assert(false);
}
}
while (read(comm_pipes.child_ready[0], &buf, 1) != 1)
;
switch (test) {
case ANON_THP_COLLAPSE_UNSHARED:
break;
case ANON_THP_COLLAPSE_UPPER_SHARED:
case ANON_THP_COLLAPSE_LOWER_SHARED:
/*
* Revert MADV_DONTFORK such that we merge the VMAs and are
* able to actually collapse.
*/
ret = madvise(mem, size, MADV_DOFORK);
if (ret) {
ksft_test_result_fail("MADV_DOFORK failed\n");
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
goto close_comm_pipes;
}
/* FALLTHROUGH */
case ANON_THP_COLLAPSE_FULLY_SHARED:
/* Collapse before anyone modified the COW-shared page. */
ret = madvise(mem, size, MADV_COLLAPSE);
if (ret) {
ksft_test_result_skip("MADV_COLLAPSE failed: %s\n",
strerror(errno));
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
goto close_comm_pipes;
}
break;
default:
assert(false);
}
/* Modify the page. */
memset(mem, 0xff, size);
write(comm_pipes.parent_ready[1], "0", 1);
wait(&ret);
if (WIFEXITED(ret))
ret = WEXITSTATUS(ret);
else
ret = -EINVAL;
ksft_test_result(!ret, "No leak from parent into child\n");
close_comm_pipes:
close_comm_pipes(&comm_pipes);
}
static void test_anon_thp_collapse_unshared(char *mem, size_t size)
{
do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_UNSHARED);
}
static void test_anon_thp_collapse_fully_shared(char *mem, size_t size)
{
do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_FULLY_SHARED);
}
static void test_anon_thp_collapse_lower_shared(char *mem, size_t size)
{
do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_LOWER_SHARED);
}
static void test_anon_thp_collapse_upper_shared(char *mem, size_t size)
{
do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_UPPER_SHARED);
}
/*
* Test cases that are specific to anonymous THP: pages in private mappings
* that may get shared via COW during fork().
*/
static const struct test_case anon_thp_test_cases[] = {
/*
* Basic COW test for fork() without any GUP when collapsing a THP
* before fork().
*
* Re-mapping a PTE-mapped anon THP using a single PMD ("in-place
* collapse") might easily get COW handling wrong when not collapsing
* exclusivity information properly.
*/
{
"Basic COW after fork() when collapsing before fork()",
test_anon_thp_collapse_unshared,
},
/* Basic COW test, but collapse after COW-sharing a full THP. */
{
"Basic COW after fork() when collapsing after fork() (fully shared)",
test_anon_thp_collapse_fully_shared,
},
/*
* Basic COW test, but collapse after COW-sharing the lower half of a
* THP.
*/
{
"Basic COW after fork() when collapsing after fork() (lower shared)",
test_anon_thp_collapse_lower_shared,
},
/*
* Basic COW test, but collapse after COW-sharing the upper half of a
* THP.
*/
{
"Basic COW after fork() when collapsing after fork() (upper shared)",
test_anon_thp_collapse_upper_shared,
},
};
static void run_anon_thp_test_cases(void)
{
int i;
if (!thpsize)
return;
ksft_print_msg("[INFO] Anonymous THP tests\n");
for (i = 0; i < ARRAY_SIZE(anon_thp_test_cases); i++) {
struct test_case const *test_case = &anon_thp_test_cases[i];
ksft_print_msg("[RUN] %s\n", test_case->desc);
do_run_with_thp(test_case->fn, THP_RUN_PMD);
}
}
static int tests_per_anon_thp_test_case(void)
{
return thpsize ? 1 : 0;
}
typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size);
static void test_cow(char *mem, const char *smem, size_t size)
{
char *old = malloc(size);
/* Backup the original content. */
memcpy(old, smem, size);
/* Modify the page. */
memset(mem, 0xff, size);
/* See if we still read the old values via the other mapping. */
ksft_test_result(!memcmp(smem, old, size),
"Other mapping not modified\n");
free(old);
}
static void test_ro_pin(char *mem, const char *smem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST, false);
}
static void test_ro_fast_pin(char *mem, const char *smem, size_t size)
{
do_test_ro_pin(mem, size, RO_PIN_TEST, true);
}
static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
ksft_print_msg("[RUN] %s ... with shared zeropage\n", desc);
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
smem = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
/* Read from the page to populate the shared zeropage. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
fn(mem, smem, pagesize);
munmap:
munmap(mem, pagesize);
if (smem != MAP_FAILED)
munmap(smem, pagesize);
}
static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
size_t mmap_size;
int ret;
ksft_print_msg("[RUN] %s ... with huge zeropage\n", desc);
if (!has_huge_zeropage) {
ksft_test_result_skip("Huge zeropage not enabled\n");
return;
}
/* For alignment purposes, we need twice the thp size. */
mmap_size = 2 * thpsize;
mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mmap_mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
mmap_smem = mmap(NULL, mmap_size, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mmap_smem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
/* We need a THP-aligned memory area. */
mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1));
smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1));
ret = madvise(mem, thpsize, MADV_HUGEPAGE);
ret |= madvise(smem, thpsize, MADV_HUGEPAGE);
if (ret) {
ksft_test_result_fail("MADV_HUGEPAGE failed\n");
goto munmap;
}
/*
* Read from the memory to populate the huge shared zeropage. Read from
* the first sub-page and test if we get another sub-page populated
* automatically.
*/
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
!pagemap_is_populated(pagemap_fd, smem + pagesize)) {
ksft_test_result_skip("Did not get THPs populated\n");
goto munmap;
}
fn(mem, smem, thpsize);
munmap:
munmap(mmap_mem, mmap_size);
if (mmap_smem != MAP_FAILED)
munmap(mmap_smem, mmap_size);
}
static void run_with_memfd(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
int fd;
ksft_print_msg("[RUN] %s ... with memfd\n", desc);
fd = memfd_create("test", 0);
if (fd < 0) {
ksft_test_result_fail("memfd_create() failed\n");
return;
}
/* File consists of a single page filled with zeroes. */
if (fallocate(fd, 0, 0, pagesize)) {
ksft_test_result_fail("fallocate() failed\n");
goto close;
}
/* Create a private mapping of the memfd. */
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto close;
}
smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
fn(mem, smem, pagesize);
munmap:
munmap(mem, pagesize);
if (smem != MAP_FAILED)
munmap(smem, pagesize);
close:
close(fd);
}
static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
FILE *file;
int fd;
ksft_print_msg("[RUN] %s ... with tmpfile\n", desc);
file = tmpfile();
if (!file) {
ksft_test_result_fail("tmpfile() failed\n");
return;
}
fd = fileno(file);
if (fd < 0) {
ksft_test_result_skip("fileno() failed\n");
return;
}
/* File consists of a single page filled with zeroes. */
if (fallocate(fd, 0, 0, pagesize)) {
ksft_test_result_fail("fallocate() failed\n");
goto close;
}
/* Create a private mapping of the memfd. */
mem = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto close;
}
smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
fn(mem, smem, pagesize);
munmap:
munmap(mem, pagesize);
if (smem != MAP_FAILED)
munmap(smem, pagesize);
close:
fclose(file);
}
static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
size_t hugetlbsize)
{
int flags = MFD_HUGETLB;
char *mem, *smem, tmp;
int fd;
ksft_print_msg("[RUN] %s ... with memfd hugetlb (%zu kB)\n", desc,
hugetlbsize / 1024);
flags |= __builtin_ctzll(hugetlbsize) << MFD_HUGE_SHIFT;
fd = memfd_create("test", flags);
if (fd < 0) {
ksft_test_result_skip("memfd_create() failed\n");
return;
}
/* File consists of a single page filled with zeroes. */
if (fallocate(fd, 0, 0, hugetlbsize)) {
ksft_test_result_skip("need more free huge pages\n");
goto close;
}
/* Create a private mapping of the memfd. */
mem = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd,
0);
if (mem == MAP_FAILED) {
ksft_test_result_skip("need more free huge pages\n");
goto close;
}
smem = mmap(NULL, hugetlbsize, PROT_READ, MAP_SHARED, fd, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
fn(mem, smem, hugetlbsize);
munmap:
munmap(mem, hugetlbsize);
if (mem != MAP_FAILED)
munmap(smem, hugetlbsize);
close:
close(fd);
}
struct non_anon_test_case {
const char *desc;
non_anon_test_fn fn;
};
/*
* Test cases that target any pages in private mappings that are not anonymous:
* pages that may get shared via COW ndependent of fork(). This includes
* the shared zeropage(s), pagecache pages, ...
*/
static const struct non_anon_test_case non_anon_test_cases[] = {
/*
* Basic COW test without any GUP. If we miss to break COW, changes are
* visible via other private/shared mappings.
*/
{
"Basic COW",
test_cow,
},
/*
* Take a R/O longterm pin. When modifying the page via the page table,
* the page content change must be visible via the pin.
*/
{
"R/O longterm GUP pin",
test_ro_pin,
},
/* Same as above, but using GUP-fast. */
{
"R/O longterm GUP-fast pin",
test_ro_fast_pin,
},
};
static void run_non_anon_test_case(struct non_anon_test_case const *test_case)
{
int i;
run_with_zeropage(test_case->fn, test_case->desc);
run_with_memfd(test_case->fn, test_case->desc);
run_with_tmpfile(test_case->fn, test_case->desc);
if (thpsize)
run_with_huge_zeropage(test_case->fn, test_case->desc);
for (i = 0; i < nr_hugetlbsizes; i++)
run_with_memfd_hugetlb(test_case->fn, test_case->desc,
hugetlbsizes[i]);
}
static void run_non_anon_test_cases(void)
{
int i;
ksft_print_msg("[RUN] Non-anonymous memory tests in private mappings\n");
for (i = 0; i < ARRAY_SIZE(non_anon_test_cases); i++)
run_non_anon_test_case(&non_anon_test_cases[i]);
}
static int tests_per_non_anon_test_case(void)
{
int tests = 3 + nr_hugetlbsizes;
if (thpsize)
tests += 1;
return tests;
}
int main(int argc, char **argv)
{
int err;
pagesize = getpagesize();
thpsize = read_pmd_pagesize();
if (thpsize)
ksft_print_msg("[INFO] detected THP size: %zu KiB\n",
thpsize / 1024);
nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
ARRAY_SIZE(hugetlbsizes));
detect_huge_zeropage();
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
ARRAY_SIZE(anon_thp_test_cases) * tests_per_anon_thp_test_case() +
ARRAY_SIZE(non_anon_test_cases) * tests_per_non_anon_test_case());
gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("opening pagemap failed\n");
run_anon_test_cases();
run_anon_thp_test_cases();
run_non_anon_test_cases();
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/cow.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors: Kirill A. Shutemov <[email protected]>
* Authors: Aneesh Kumar K.V <[email protected]>
*/
#include <stdio.h>
#include <sys/mman.h>
#include <string.h>
#include "../kselftest.h"
#ifdef __powerpc64__
#define PAGE_SIZE (64 << 10)
/*
* This will work with 16M and 2M hugepage size
*/
#define HUGETLB_SIZE (16 << 20)
#elif __aarch64__
/*
* The default hugepage size for 64k base pagesize
* is 512MB.
*/
#define PAGE_SIZE (64 << 10)
#define HUGETLB_SIZE (512 << 20)
#else
#define PAGE_SIZE (4 << 10)
#define HUGETLB_SIZE (2 << 20)
#endif
/*
* The hint addr value is used to allocate addresses
* beyond the high address switch boundary.
*/
#define ADDR_MARK_128TB (1UL << 47)
#define ADDR_MARK_256TB (1UL << 48)
#define HIGH_ADDR_128TB ((void *) (1UL << 48))
#define HIGH_ADDR_256TB ((void *) (1UL << 49))
#define LOW_ADDR ((void *) (1UL << 30))
#ifdef __aarch64__
#define ADDR_SWITCH_HINT ADDR_MARK_256TB
#define HIGH_ADDR HIGH_ADDR_256TB
#else
#define ADDR_SWITCH_HINT ADDR_MARK_128TB
#define HIGH_ADDR HIGH_ADDR_128TB
#endif
struct testcase {
void *addr;
unsigned long size;
unsigned long flags;
const char *msg;
unsigned int low_addr_required:1;
unsigned int keep_mapped:1;
};
static struct testcase testcases[] = {
{
/*
* If stack is moved, we could possibly allocate
* this at the requested address.
*/
.addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
.size = PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
.low_addr_required = 1,
},
{
/*
* Unless MAP_FIXED is specified, allocation based on hint
* addr is never at requested address or above it, which is
* beyond high address switch boundary in this case. Instead,
* a suitable allocation is found in lower address space.
*/
.addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, (2 * PAGE_SIZE))",
.low_addr_required = 1,
},
{
/*
* Exact mapping at high address switch boundary, should
* be obtained even without MAP_FIXED as area is free.
*/
.addr = ((void *)(ADDR_SWITCH_HINT)),
.size = PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
.keep_mapped = 1,
},
{
.addr = (void *)(ADDR_SWITCH_HINT),
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
.msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
},
{
.addr = NULL,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(NULL)",
.low_addr_required = 1,
},
{
.addr = LOW_ADDR,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(LOW_ADDR)",
.low_addr_required = 1,
},
{
.addr = HIGH_ADDR,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(HIGH_ADDR)",
.keep_mapped = 1,
},
{
.addr = HIGH_ADDR,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(HIGH_ADDR) again",
.keep_mapped = 1,
},
{
.addr = HIGH_ADDR,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
.msg = "mmap(HIGH_ADDR, MAP_FIXED)",
},
{
.addr = (void *) -1,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(-1)",
.keep_mapped = 1,
},
{
.addr = (void *) -1,
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(-1) again",
},
{
.addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
.size = PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
.low_addr_required = 1,
},
{
.addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2 * PAGE_SIZE)",
.low_addr_required = 1,
.keep_mapped = 1,
},
{
.addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE / 2),
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE/2 , 2 * PAGE_SIZE)",
.low_addr_required = 1,
.keep_mapped = 1,
},
{
.addr = ((void *)(ADDR_SWITCH_HINT)),
.size = PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
},
{
.addr = (void *)(ADDR_SWITCH_HINT),
.size = 2 * PAGE_SIZE,
.flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
.msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
},
};
static struct testcase hugetlb_testcases[] = {
{
.addr = NULL,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(NULL, MAP_HUGETLB)",
.low_addr_required = 1,
},
{
.addr = LOW_ADDR,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
.low_addr_required = 1,
},
{
.addr = HIGH_ADDR,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
.keep_mapped = 1,
},
{
.addr = HIGH_ADDR,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
.keep_mapped = 1,
},
{
.addr = HIGH_ADDR,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
.msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
},
{
.addr = (void *) -1,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(-1, MAP_HUGETLB)",
.keep_mapped = 1,
},
{
.addr = (void *) -1,
.size = HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(-1, MAP_HUGETLB) again",
},
{
.addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
.size = 2 * HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
.msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2*HUGETLB_SIZE, MAP_HUGETLB)",
.low_addr_required = 1,
.keep_mapped = 1,
},
{
.addr = (void *)(ADDR_SWITCH_HINT),
.size = 2 * HUGETLB_SIZE,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
.msg = "mmap(ADDR_SWITCH_HINT , 2*HUGETLB_SIZE, MAP_FIXED | MAP_HUGETLB)",
},
};
static int run_test(struct testcase *test, int count)
{
void *p;
int i, ret = KSFT_PASS;
for (i = 0; i < count; i++) {
struct testcase *t = test + i;
p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
printf("%s: %p - ", t->msg, p);
if (p == MAP_FAILED) {
printf("FAILED\n");
ret = KSFT_FAIL;
continue;
}
if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
printf("FAILED\n");
ret = KSFT_FAIL;
} else {
/*
* Do a dereference of the address returned so that we catch
* bugs in page fault handling
*/
memset(p, 0, t->size);
printf("OK\n");
}
if (!t->keep_mapped)
munmap(p, t->size);
}
return ret;
}
static int supported_arch(void)
{
#if defined(__powerpc64__)
return 1;
#elif defined(__x86_64__)
return 1;
#elif defined(__aarch64__)
return getpagesize() == PAGE_SIZE;
#else
return 0;
#endif
}
int main(int argc, char **argv)
{
int ret;
if (!supported_arch())
return KSFT_SKIP;
ret = run_test(testcases, ARRAY_SIZE(testcases));
if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
ret = run_test(hugetlb_testcases, ARRAY_SIZE(hugetlb_testcases));
return ret;
}
| linux-master | tools/testing/selftests/mm/va_high_addr_switch.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <fcntl.h>
#include <stdint.h>
#include <malloc.h>
#include <sys/mman.h>
#include "../kselftest.h"
#include "vm_util.h"
#define PAGEMAP_FILE_PATH "/proc/self/pagemap"
#define TEST_ITERATIONS 10000
static void test_simple(int pagemap_fd, int pagesize)
{
int i;
char *map;
map = aligned_alloc(pagesize, pagesize);
if (!map)
ksft_exit_fail_msg("mmap failed\n");
clear_softdirty();
for (i = 0 ; i < TEST_ITERATIONS; i++) {
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
break;
}
clear_softdirty();
// Write something to the page to get the dirty bit enabled on the page
map[0]++;
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
break;
}
clear_softdirty();
}
free(map);
ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__);
}
static void test_vma_reuse(int pagemap_fd, int pagesize)
{
char *map, *map2;
map = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
if (map == MAP_FAILED)
ksft_exit_fail_msg("mmap failed");
// The kernel always marks new regions as soft dirty
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s dirty bit of allocated page\n", __func__);
clear_softdirty();
munmap(map, pagesize);
map2 = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
if (map2 == MAP_FAILED)
ksft_exit_fail_msg("mmap failed");
// Dirty bit is set for new regions even if they are reused
if (map == map2)
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
"Test %s dirty bit of reused address page\n", __func__);
else
ksft_test_result_skip("Test %s dirty bit of reused address page\n", __func__);
munmap(map2, pagesize);
}
static void test_hugepage(int pagemap_fd, int pagesize)
{
char *map;
int i, ret;
size_t hpage_len = read_pmd_pagesize();
if (!hpage_len)
ksft_exit_fail_msg("Reading PMD pagesize failed");
map = memalign(hpage_len, hpage_len);
if (!map)
ksft_exit_fail_msg("memalign failed\n");
ret = madvise(map, hpage_len, MADV_HUGEPAGE);
if (ret)
ksft_exit_fail_msg("madvise failed %d\n", ret);
for (i = 0; i < hpage_len; i++)
map[i] = (char)i;
if (check_huge_anon(map, 1, hpage_len)) {
ksft_test_result_pass("Test %s huge page allocation\n", __func__);
clear_softdirty();
for (i = 0 ; i < TEST_ITERATIONS ; i++) {
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
break;
}
clear_softdirty();
// Write something to the page to get the dirty bit enabled on the page
map[0]++;
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
break;
}
clear_softdirty();
}
ksft_test_result(i == TEST_ITERATIONS, "Test %s huge page dirty bit\n", __func__);
} else {
// hugepage allocation failed. skip these tests
ksft_test_result_skip("Test %s huge page allocation\n", __func__);
ksft_test_result_skip("Test %s huge page dirty bit\n", __func__);
}
free(map);
}
static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
{
const char *type[] = {"file", "anon"};
const char *fname = "./soft-dirty-test-file";
int test_fd;
char *map;
if (anon) {
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
test_fd = open(fname, O_RDWR | O_CREAT);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
}
unlink(fname);
ftruncate(test_fd, pagesize);
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
MAP_SHARED, test_fd, 0);
if (!map)
ksft_exit_fail_msg("file mmap failed\n");
}
*map = 1;
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-%s dirty bit of new written page\n",
__func__, type[anon]);
clear_softdirty();
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after clear_refs\n",
__func__, type[anon]);
mprotect(map, pagesize, PROT_READ);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after marking RO\n",
__func__, type[anon]);
mprotect(map, pagesize, PROT_READ|PROT_WRITE);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after marking RW\n",
__func__, type[anon]);
*map = 2;
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-%s soft-dirty after rewritten\n",
__func__, type[anon]);
munmap(map, pagesize);
if (!anon)
close(test_fd);
}
static void test_mprotect_anon(int pagemap_fd, int pagesize)
{
test_mprotect(pagemap_fd, pagesize, true);
}
static void test_mprotect_file(int pagemap_fd, int pagesize)
{
test_mprotect(pagemap_fd, pagesize, false);
}
int main(int argc, char **argv)
{
int pagemap_fd;
int pagesize;
ksft_print_header();
ksft_set_plan(15);
pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
pagesize = getpagesize();
test_simple(pagemap_fd, pagesize);
test_vma_reuse(pagemap_fd, pagesize);
test_hugepage(pagemap_fd, pagesize);
test_mprotect_anon(pagemap_fd, pagesize);
test_mprotect_file(pagemap_fd, pagesize);
close(pagemap_fd);
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/soft-dirty.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Userfaultfd unit tests.
*
* Copyright (C) 2015-2023 Red Hat, Inc.
*/
#include "uffd-common.h"
#include "../../../../mm/gup_test.h"
#ifdef __NR_userfaultfd
/* The unit test doesn't need a large or random size, make it 32MB for now */
#define UFFD_TEST_MEM_SIZE (32UL << 20)
#define MEM_ANON BIT_ULL(0)
#define MEM_SHMEM BIT_ULL(1)
#define MEM_SHMEM_PRIVATE BIT_ULL(2)
#define MEM_HUGETLB BIT_ULL(3)
#define MEM_HUGETLB_PRIVATE BIT_ULL(4)
#define MEM_ALL (MEM_ANON | MEM_SHMEM | MEM_SHMEM_PRIVATE | \
MEM_HUGETLB | MEM_HUGETLB_PRIVATE)
struct mem_type {
const char *name;
unsigned int mem_flag;
uffd_test_ops_t *mem_ops;
bool shared;
};
typedef struct mem_type mem_type_t;
mem_type_t mem_types[] = {
{
.name = "anon",
.mem_flag = MEM_ANON,
.mem_ops = &anon_uffd_test_ops,
.shared = false,
},
{
.name = "shmem",
.mem_flag = MEM_SHMEM,
.mem_ops = &shmem_uffd_test_ops,
.shared = true,
},
{
.name = "shmem-private",
.mem_flag = MEM_SHMEM_PRIVATE,
.mem_ops = &shmem_uffd_test_ops,
.shared = false,
},
{
.name = "hugetlb",
.mem_flag = MEM_HUGETLB,
.mem_ops = &hugetlb_uffd_test_ops,
.shared = true,
},
{
.name = "hugetlb-private",
.mem_flag = MEM_HUGETLB_PRIVATE,
.mem_ops = &hugetlb_uffd_test_ops,
.shared = false,
},
};
/* Arguments to be passed over to each uffd unit test */
struct uffd_test_args {
mem_type_t *mem_type;
};
typedef struct uffd_test_args uffd_test_args_t;
/* Returns: UFFD_TEST_* */
typedef void (*uffd_test_fn)(uffd_test_args_t *);
typedef struct {
const char *name;
uffd_test_fn uffd_fn;
unsigned int mem_targets;
uint64_t uffd_feature_required;
} uffd_test_case_t;
static void uffd_test_report(void)
{
printf("Userfaults unit tests: pass=%u, skip=%u, fail=%u (total=%u)\n",
ksft_get_pass_cnt(),
ksft_get_xskip_cnt(),
ksft_get_fail_cnt(),
ksft_test_num());
}
static void uffd_test_pass(void)
{
printf("done\n");
ksft_inc_pass_cnt();
}
#define uffd_test_start(...) do { \
printf("Testing "); \
printf(__VA_ARGS__); \
printf("... "); \
fflush(stdout); \
} while (0)
#define uffd_test_fail(...) do { \
printf("failed [reason: "); \
printf(__VA_ARGS__); \
printf("]\n"); \
ksft_inc_fail_cnt(); \
} while (0)
static void uffd_test_skip(const char *message)
{
printf("skipped [reason: %s]\n", message);
ksft_inc_xskip_cnt();
}
/*
* Returns 1 if specific userfaultfd supported, 0 otherwise. Note, we'll
* return 1 even if some test failed as long as uffd supported, because in
* that case we still want to proceed with the rest uffd unit tests.
*/
static int test_uffd_api(bool use_dev)
{
struct uffdio_api uffdio_api;
int uffd;
uffd_test_start("UFFDIO_API (with %s)",
use_dev ? "/dev/userfaultfd" : "syscall");
if (use_dev)
uffd = uffd_open_dev(UFFD_FLAGS);
else
uffd = uffd_open_sys(UFFD_FLAGS);
if (uffd < 0) {
uffd_test_skip("cannot open userfaultfd handle");
return 0;
}
/* Test wrong UFFD_API */
uffdio_api.api = 0xab;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) == 0) {
uffd_test_fail("UFFDIO_API should fail with wrong api but didn't");
goto out;
}
/* Test wrong feature bit */
uffdio_api.api = UFFD_API;
uffdio_api.features = BIT_ULL(63);
if (ioctl(uffd, UFFDIO_API, &uffdio_api) == 0) {
uffd_test_fail("UFFDIO_API should fail with wrong feature but didn't");
goto out;
}
/* Test normal UFFDIO_API */
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api)) {
uffd_test_fail("UFFDIO_API should succeed but failed");
goto out;
}
/* Test double requests of UFFDIO_API with a random feature set */
uffdio_api.features = BIT_ULL(0);
if (ioctl(uffd, UFFDIO_API, &uffdio_api) == 0) {
uffd_test_fail("UFFDIO_API should reject initialized uffd");
goto out;
}
uffd_test_pass();
out:
close(uffd);
/* We have a valid uffd handle */
return 1;
}
/*
* This function initializes the global variables. TODO: remove global
* vars and then remove this.
*/
static int
uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test,
mem_type_t *mem_type, const char **errmsg)
{
map_shared = mem_type->shared;
uffd_test_ops = mem_type->mem_ops;
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
page_size = default_huge_page_size();
else
page_size = psize();
nr_pages = UFFD_TEST_MEM_SIZE / page_size;
/* TODO: remove this global var.. it's so ugly */
nr_cpus = 1;
/* Initialize test arguments */
args->mem_type = mem_type;
return uffd_test_ctx_init(test->uffd_feature_required, errmsg);
}
static bool uffd_feature_supported(uffd_test_case_t *test)
{
uint64_t features;
if (uffd_get_features(&features))
return false;
return (features & test->uffd_feature_required) ==
test->uffd_feature_required;
}
static int pagemap_open(void)
{
int fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0)
err("open pagemap");
return fd;
}
/* This macro let __LINE__ works in err() */
#define pagemap_check_wp(value, wp) do { \
if (!!(value & PM_UFFD_WP) != wp) \
err("pagemap uffd-wp bit error: 0x%"PRIx64, value); \
} while (0)
typedef struct {
int parent_uffd, child_uffd;
} fork_event_args;
static void *fork_event_consumer(void *data)
{
fork_event_args *args = data;
struct uffd_msg msg = { 0 };
/* Read until a full msg received */
while (uffd_read_msg(args->parent_uffd, &msg));
if (msg.event != UFFD_EVENT_FORK)
err("wrong message: %u\n", msg.event);
/* Just to be properly freed later */
args->child_uffd = msg.arg.fork.ufd;
return NULL;
}
typedef struct {
int gup_fd;
bool pinned;
} pin_args;
/*
* Returns 0 if succeed, <0 for errors. pin_pages() needs to be paired
* with unpin_pages(). Currently it needs to be RO longterm pin to satisfy
* all needs of the test cases (e.g., trigger unshare, trigger fork() early
* CoW, etc.).
*/
static int pin_pages(pin_args *args, void *buffer, size_t size)
{
struct pin_longterm_test test = {
.addr = (uintptr_t)buffer,
.size = size,
/* Read-only pins */
.flags = 0,
};
if (args->pinned)
err("already pinned");
args->gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
if (args->gup_fd < 0)
return -errno;
if (ioctl(args->gup_fd, PIN_LONGTERM_TEST_START, &test)) {
/* Even if gup_test existed, can be an old gup_test / kernel */
close(args->gup_fd);
return -errno;
}
args->pinned = true;
return 0;
}
static void unpin_pages(pin_args *args)
{
if (!args->pinned)
err("unpin without pin first");
if (ioctl(args->gup_fd, PIN_LONGTERM_TEST_STOP))
err("PIN_LONGTERM_TEST_STOP");
close(args->gup_fd);
args->pinned = false;
}
static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
{
fork_event_args args = { .parent_uffd = uffd, .child_uffd = -1 };
pthread_t thread;
pid_t child;
uint64_t value;
int fd, result;
/* Prepare a thread to resolve EVENT_FORK */
if (with_event) {
if (pthread_create(&thread, NULL, fork_event_consumer, &args))
err("pthread_create()");
}
child = fork();
if (!child) {
/* Open the pagemap fd of the child itself */
pin_args args = {};
fd = pagemap_open();
if (test_pin && pin_pages(&args, area_dst, page_size))
/*
* Normally when reach here we have pinned in
* previous tests, so shouldn't fail anymore
*/
err("pin page failed in child");
value = pagemap_get_entry(fd, area_dst);
/*
* After fork(), we should handle uffd-wp bit differently:
*
* (1) when with EVENT_FORK, it should persist
* (2) when without EVENT_FORK, it should be dropped
*/
pagemap_check_wp(value, with_event);
if (test_pin)
unpin_pages(&args);
/* Succeed */
exit(0);
}
waitpid(child, &result, 0);
if (with_event) {
if (pthread_join(thread, NULL))
err("pthread_join()");
if (args.child_uffd < 0)
err("Didn't receive child uffd");
close(args.child_uffd);
}
return result;
}
static void uffd_wp_unpopulated_test(uffd_test_args_t *args)
{
uint64_t value;
int pagemap_fd;
if (uffd_register(uffd, area_dst, nr_pages * page_size,
false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Test applying pte marker to anon unpopulated */
wp_range(uffd, (uint64_t)area_dst, page_size, true);
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, true);
/* Test unprotect on anon pte marker */
wp_range(uffd, (uint64_t)area_dst, page_size, false);
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, false);
/* Test zap on anon marker */
wp_range(uffd, (uint64_t)area_dst, page_size, true);
if (madvise(area_dst, page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, false);
/* Test fault in after marker removed */
*area_dst = 1;
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, false);
/* Drop it to make pte none again */
if (madvise(area_dst, page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
/* Test read-zero-page upon pte marker */
wp_range(uffd, (uint64_t)area_dst, page_size, true);
*(volatile char *)area_dst;
/* Drop it to make pte none again */
if (madvise(area_dst, page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
uffd_test_pass();
}
static void uffd_wp_fork_test_common(uffd_test_args_t *args,
bool with_event)
{
int pagemap_fd;
uint64_t value;
if (uffd_register(uffd, area_dst, nr_pages * page_size,
false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Touch the page */
*area_dst = 1;
wp_range(uffd, (uint64_t)area_dst, page_size, true);
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, true);
if (pagemap_test_fork(uffd, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in child in present pte",
with_event ? "missing" : "stall");
goto out;
}
/*
* This is an attempt for zapping the pgtable so as to test the
* markers.
*
* For private mappings, PAGEOUT will only work on exclusive ptes
* (PM_MMAP_EXCLUSIVE) which we should satisfy.
*
* For shared, PAGEOUT may not work. Use DONTNEED instead which
* plays a similar role of zapping (rather than freeing the page)
* to expose pte markers.
*/
if (args->mem_type->shared) {
if (madvise(area_dst, page_size, MADV_DONTNEED))
err("MADV_DONTNEED");
} else {
/*
* NOTE: ignore retval because private-hugetlb doesn't yet
* support swapping, so it could fail.
*/
madvise(area_dst, page_size, MADV_PAGEOUT);
}
/* Uffd-wp should persist even swapped out */
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, true);
if (pagemap_test_fork(uffd, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in child in zapped pte",
with_event ? "missing" : "stall");
goto out;
}
/* Unprotect; this tests swap pte modifications */
wp_range(uffd, (uint64_t)area_dst, page_size, false);
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, false);
/* Fault in the page from disk */
*area_dst = 2;
value = pagemap_get_entry(pagemap_fd, area_dst);
pagemap_check_wp(value, false);
uffd_test_pass();
out:
if (uffd_unregister(uffd, area_dst, nr_pages * page_size))
err("unregister failed");
close(pagemap_fd);
}
static void uffd_wp_fork_test(uffd_test_args_t *args)
{
uffd_wp_fork_test_common(args, false);
}
static void uffd_wp_fork_with_event_test(uffd_test_args_t *args)
{
uffd_wp_fork_test_common(args, true);
}
static void uffd_wp_fork_pin_test_common(uffd_test_args_t *args,
bool with_event)
{
int pagemap_fd;
pin_args pin_args = {};
if (uffd_register(uffd, area_dst, page_size, false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Touch the page */
*area_dst = 1;
wp_range(uffd, (uint64_t)area_dst, page_size, true);
/*
* 1. First pin, then fork(). This tests fork() special path when
* doing early CoW if the page is private.
*/
if (pin_pages(&pin_args, area_dst, page_size)) {
uffd_test_skip("Possibly CONFIG_GUP_TEST missing "
"or unprivileged");
close(pagemap_fd);
uffd_unregister(uffd, area_dst, page_size);
return;
}
if (pagemap_test_fork(uffd, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in early CoW of fork()",
with_event ? "missing" : "stall");
unpin_pages(&pin_args);
goto out;
}
unpin_pages(&pin_args);
/*
* 2. First fork(), then pin (in the child, where test_pin==true).
* This tests COR, aka, page unsharing on private memories.
*/
if (pagemap_test_fork(uffd, with_event, true)) {
uffd_test_fail("Detected %s uffd-wp bit when RO pin",
with_event ? "missing" : "stall");
goto out;
}
uffd_test_pass();
out:
if (uffd_unregister(uffd, area_dst, page_size))
err("register failed");
close(pagemap_fd);
}
static void uffd_wp_fork_pin_test(uffd_test_args_t *args)
{
uffd_wp_fork_pin_test_common(args, false);
}
static void uffd_wp_fork_pin_with_event_test(uffd_test_args_t *args)
{
uffd_wp_fork_pin_test_common(args, true);
}
static void check_memory_contents(char *p)
{
unsigned long i, j;
uint8_t expected_byte;
for (i = 0; i < nr_pages; ++i) {
expected_byte = ~((uint8_t)(i % ((uint8_t)-1)));
for (j = 0; j < page_size; j++) {
uint8_t v = *(uint8_t *)(p + (i * page_size) + j);
if (v != expected_byte)
err("unexpected page contents");
}
}
}
static void uffd_minor_test_common(bool test_collapse, bool test_wp)
{
unsigned long p;
pthread_t uffd_mon;
char c;
struct uffd_args args = { 0 };
/*
* NOTE: MADV_COLLAPSE is not yet compatible with WP, so testing
* both do not make much sense.
*/
assert(!(test_collapse && test_wp));
if (uffd_register(uffd, area_dst_alias, nr_pages * page_size,
/* NOTE! MADV_COLLAPSE may not work with uffd-wp */
false, test_wp, true))
err("register failure");
/*
* After registering with UFFD, populate the non-UFFD-registered side of
* the shared mapping. This should *not* trigger any UFFD minor faults.
*/
for (p = 0; p < nr_pages; ++p)
memset(area_dst + (p * page_size), p % ((uint8_t)-1),
page_size);
args.apply_wp = test_wp;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
/*
* Read each of the pages back using the UFFD-registered mapping. We
* expect that the first time we touch a page, it will result in a minor
* fault. uffd_poll_thread will resolve the fault by bit-flipping the
* page's contents, and then issuing a CONTINUE ioctl.
*/
check_memory_contents(area_dst_alias);
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("join() failed");
if (test_collapse) {
if (madvise(area_dst_alias, nr_pages * page_size,
MADV_COLLAPSE)) {
/* It's fine to fail for this one... */
uffd_test_skip("MADV_COLLAPSE failed");
return;
}
uffd_test_ops->check_pmd_mapping(area_dst,
nr_pages * page_size /
read_pmd_pagesize());
/*
* This won't cause uffd-fault - it purely just makes sure there
* was no corruption.
*/
check_memory_contents(area_dst_alias);
}
if (args.missing_faults != 0 || args.minor_faults != nr_pages)
uffd_test_fail("stats check error");
else
uffd_test_pass();
}
void uffd_minor_test(uffd_test_args_t *args)
{
uffd_minor_test_common(false, false);
}
void uffd_minor_wp_test(uffd_test_args_t *args)
{
uffd_minor_test_common(false, true);
}
void uffd_minor_collapse_test(uffd_test_args_t *args)
{
uffd_minor_test_common(true, false);
}
static sigjmp_buf jbuf, *sigbuf;
static void sighndl(int sig, siginfo_t *siginfo, void *ptr)
{
if (sig == SIGBUS) {
if (sigbuf)
siglongjmp(*sigbuf, 1);
abort();
}
}
/*
* For non-cooperative userfaultfd test we fork() a process that will
* generate pagefaults, will mremap the area monitored by the
* userfaultfd and at last this process will release the monitored
* area.
* For the anonymous and shared memory the area is divided into two
* parts, the first part is accessed before mremap, and the second
* part is accessed after mremap. Since hugetlbfs does not support
* mremap, the entire monitored area is accessed in a single pass for
* HUGETLB_TEST.
* The release of the pages currently generates event for shmem and
* anonymous memory (UFFD_EVENT_REMOVE), hence it is not checked
* for hugetlb.
* For signal test(UFFD_FEATURE_SIGBUS), signal_test = 1, we register
* monitored area, generate pagefaults and test that signal is delivered.
* Use UFFDIO_COPY to allocate missing page and retry. For signal_test = 2
* test robustness use case - we release monitored area, fork a process
* that will generate pagefaults and verify signal is generated.
* This also tests UFFD_FEATURE_EVENT_FORK event along with the signal
* feature. Using monitor thread, verify no userfault events are generated.
*/
static int faulting_process(int signal_test, bool wp)
{
unsigned long nr, i;
unsigned long long count;
unsigned long split_nr_pages;
unsigned long lastnr;
struct sigaction act;
volatile unsigned long signalled = 0;
split_nr_pages = (nr_pages + 1) / 2;
if (signal_test) {
sigbuf = &jbuf;
memset(&act, 0, sizeof(act));
act.sa_sigaction = sighndl;
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGBUS, &act, 0))
err("sigaction");
lastnr = (unsigned long)-1;
}
for (nr = 0; nr < split_nr_pages; nr++) {
volatile int steps = 1;
unsigned long offset = nr * page_size;
if (signal_test) {
if (sigsetjmp(*sigbuf, 1) != 0) {
if (steps == 1 && nr == lastnr)
err("Signal repeated");
lastnr = nr;
if (signal_test == 1) {
if (steps == 1) {
/* This is a MISSING request */
steps++;
if (copy_page(uffd, offset, wp))
signalled++;
} else {
/* This is a WP request */
assert(steps == 2);
wp_range(uffd,
(__u64)area_dst +
offset,
page_size, false);
}
} else {
signalled++;
continue;
}
}
}
count = *area_count(area_dst, nr);
if (count != count_verify[nr])
err("nr %lu memory corruption %llu %llu\n",
nr, count, count_verify[nr]);
/*
* Trigger write protection if there is by writing
* the same value back.
*/
*area_count(area_dst, nr) = count;
}
if (signal_test)
return signalled != split_nr_pages;
area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, area_src);
if (area_dst == MAP_FAILED)
err("mremap");
/* Reset area_src since we just clobbered it */
area_src = NULL;
for (; nr < nr_pages; nr++) {
count = *area_count(area_dst, nr);
if (count != count_verify[nr]) {
err("nr %lu memory corruption %llu %llu\n",
nr, count, count_verify[nr]);
}
/*
* Trigger write protection if there is by writing
* the same value back.
*/
*area_count(area_dst, nr) = count;
}
uffd_test_ops->release_pages(area_dst);
for (nr = 0; nr < nr_pages; nr++)
for (i = 0; i < page_size; i++)
if (*(area_dst + nr * page_size + i) != 0)
err("page %lu offset %lu is not zero", nr, i);
return 0;
}
static void uffd_sigbus_test_common(bool wp)
{
unsigned long userfaults;
pthread_t uffd_mon;
pid_t pid;
int err;
char c;
struct uffd_args args = { 0 };
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
err("register failure");
if (faulting_process(1, wp))
err("faulting process failed");
uffd_test_ops->release_pages(area_dst);
args.apply_wp = wp;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
pid = fork();
if (pid < 0)
err("fork");
if (!pid)
exit(faulting_process(2, wp));
waitpid(pid, &err, 0);
if (err)
err("faulting process failed");
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, (void **)&userfaults))
err("pthread_join()");
if (userfaults)
uffd_test_fail("Signal test failed, userfaults: %ld", userfaults);
else
uffd_test_pass();
}
static void uffd_sigbus_test(uffd_test_args_t *args)
{
uffd_sigbus_test_common(false);
}
static void uffd_sigbus_wp_test(uffd_test_args_t *args)
{
uffd_sigbus_test_common(true);
}
static void uffd_events_test_common(bool wp)
{
pthread_t uffd_mon;
pid_t pid;
int err;
char c;
struct uffd_args args = { 0 };
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
err("register failure");
args.apply_wp = wp;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
pid = fork();
if (pid < 0)
err("fork");
if (!pid)
exit(faulting_process(0, wp));
waitpid(pid, &err, 0);
if (err)
err("faulting process failed");
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("pthread_join()");
if (args.missing_faults != nr_pages)
uffd_test_fail("Fault counts wrong");
else
uffd_test_pass();
}
static void uffd_events_test(uffd_test_args_t *args)
{
uffd_events_test_common(false);
}
static void uffd_events_wp_test(uffd_test_args_t *args)
{
uffd_events_test_common(true);
}
static void retry_uffdio_zeropage(int ufd,
struct uffdio_zeropage *uffdio_zeropage)
{
uffd_test_ops->alias_mapping(&uffdio_zeropage->range.start,
uffdio_zeropage->range.len,
0);
if (ioctl(ufd, UFFDIO_ZEROPAGE, uffdio_zeropage)) {
if (uffdio_zeropage->zeropage != -EEXIST)
err("UFFDIO_ZEROPAGE error: %"PRId64,
(int64_t)uffdio_zeropage->zeropage);
} else {
err("UFFDIO_ZEROPAGE error: %"PRId64,
(int64_t)uffdio_zeropage->zeropage);
}
}
static bool do_uffdio_zeropage(int ufd, bool has_zeropage)
{
struct uffdio_zeropage uffdio_zeropage = { 0 };
int ret;
__s64 res;
uffdio_zeropage.range.start = (unsigned long) area_dst;
uffdio_zeropage.range.len = page_size;
uffdio_zeropage.mode = 0;
ret = ioctl(ufd, UFFDIO_ZEROPAGE, &uffdio_zeropage);
res = uffdio_zeropage.zeropage;
if (ret) {
/* real retval in ufdio_zeropage.zeropage */
if (has_zeropage)
err("UFFDIO_ZEROPAGE error: %"PRId64, (int64_t)res);
else if (res != -EINVAL)
err("UFFDIO_ZEROPAGE not -EINVAL");
} else if (has_zeropage) {
if (res != page_size)
err("UFFDIO_ZEROPAGE unexpected size");
else
retry_uffdio_zeropage(ufd, &uffdio_zeropage);
return true;
} else
err("UFFDIO_ZEROPAGE succeeded");
return false;
}
/*
* Registers a range with MISSING mode only for zeropage test. Return true
* if UFFDIO_ZEROPAGE supported, false otherwise. Can't use uffd_register()
* because we want to detect .ioctls along the way.
*/
static bool
uffd_register_detect_zeropage(int uffd, void *addr, uint64_t len)
{
uint64_t ioctls = 0;
if (uffd_register_with_ioctls(uffd, addr, len, true,
false, false, &ioctls))
err("zeropage register fail");
return ioctls & (1 << _UFFDIO_ZEROPAGE);
}
/* exercise UFFDIO_ZEROPAGE */
static void uffd_zeropage_test(uffd_test_args_t *args)
{
bool has_zeropage;
int i;
has_zeropage = uffd_register_detect_zeropage(uffd, area_dst, page_size);
if (area_dst_alias)
/* Ignore the retval; we already have it */
uffd_register_detect_zeropage(uffd, area_dst_alias, page_size);
if (do_uffdio_zeropage(uffd, has_zeropage))
for (i = 0; i < page_size; i++)
if (area_dst[i] != 0)
err("data non-zero at offset %d\n", i);
if (uffd_unregister(uffd, area_dst, page_size))
err("unregister");
if (area_dst_alias && uffd_unregister(uffd, area_dst_alias, page_size))
err("unregister");
uffd_test_pass();
}
static void uffd_register_poison(int uffd, void *addr, uint64_t len)
{
uint64_t ioctls = 0;
uint64_t expected = (1 << _UFFDIO_COPY) | (1 << _UFFDIO_POISON);
if (uffd_register_with_ioctls(uffd, addr, len, true,
false, false, &ioctls))
err("poison register fail");
if ((ioctls & expected) != expected)
err("registered area doesn't support COPY and POISON ioctls");
}
static void do_uffdio_poison(int uffd, unsigned long offset)
{
struct uffdio_poison uffdio_poison = { 0 };
int ret;
__s64 res;
uffdio_poison.range.start = (unsigned long) area_dst + offset;
uffdio_poison.range.len = page_size;
uffdio_poison.mode = 0;
ret = ioctl(uffd, UFFDIO_POISON, &uffdio_poison);
res = uffdio_poison.updated;
if (ret)
err("UFFDIO_POISON error: %"PRId64, (int64_t)res);
else if (res != page_size)
err("UFFDIO_POISON unexpected size: %"PRId64, (int64_t)res);
}
static void uffd_poison_handle_fault(
struct uffd_msg *msg, struct uffd_args *args)
{
unsigned long offset;
if (msg->event != UFFD_EVENT_PAGEFAULT)
err("unexpected msg event %u", msg->event);
if (msg->arg.pagefault.flags &
(UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR))
err("unexpected fault type %llu", msg->arg.pagefault.flags);
offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
offset &= ~(page_size-1);
/* Odd pages -> copy zeroed page; even pages -> poison. */
if (offset & page_size)
copy_page(uffd, offset, false);
else
do_uffdio_poison(uffd, offset);
}
static void uffd_poison_test(uffd_test_args_t *targs)
{
pthread_t uffd_mon;
char c;
struct uffd_args args = { 0 };
struct sigaction act = { 0 };
unsigned long nr_sigbus = 0;
unsigned long nr;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffd_register_poison(uffd, area_dst, nr_pages * page_size);
memset(area_src, 0, nr_pages * page_size);
args.handle_fault = uffd_poison_handle_fault;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
sigbuf = &jbuf;
act.sa_sigaction = sighndl;
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGBUS, &act, 0))
err("sigaction");
for (nr = 0; nr < nr_pages; ++nr) {
unsigned long offset = nr * page_size;
const char *bytes = (const char *) area_dst + offset;
const char *i;
if (sigsetjmp(*sigbuf, 1)) {
/*
* Access below triggered a SIGBUS, which was caught by
* sighndl, which then jumped here. Count this SIGBUS,
* and move on to next page.
*/
++nr_sigbus;
continue;
}
for (i = bytes; i < bytes + page_size; ++i) {
if (*i)
err("nonzero byte in area_dst (%p) at %p: %u",
area_dst, i, *i);
}
}
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("pthread_join()");
if (nr_sigbus != nr_pages / 2)
err("expected to receive %lu SIGBUS, actually received %lu",
nr_pages / 2, nr_sigbus);
uffd_test_pass();
}
/*
* Test the returned uffdio_register.ioctls with different register modes.
* Note that _UFFDIO_ZEROPAGE is tested separately in the zeropage test.
*/
static void
do_register_ioctls_test(uffd_test_args_t *args, bool miss, bool wp, bool minor)
{
uint64_t ioctls = 0, expected = BIT_ULL(_UFFDIO_WAKE);
mem_type_t *mem_type = args->mem_type;
int ret;
ret = uffd_register_with_ioctls(uffd, area_dst, page_size,
miss, wp, minor, &ioctls);
/*
* Handle special cases of UFFDIO_REGISTER here where it should
* just fail with -EINVAL first..
*
* Case 1: register MINOR on anon
* Case 2: register with no mode selected
*/
if ((minor && (mem_type->mem_flag == MEM_ANON)) ||
(!miss && !wp && !minor)) {
if (ret != -EINVAL)
err("register (miss=%d, wp=%d, minor=%d) failed "
"with wrong errno=%d", miss, wp, minor, ret);
return;
}
/* UFFDIO_REGISTER should succeed, then check ioctls returned */
if (miss)
expected |= BIT_ULL(_UFFDIO_COPY);
if (wp)
expected |= BIT_ULL(_UFFDIO_WRITEPROTECT);
if (minor)
expected |= BIT_ULL(_UFFDIO_CONTINUE);
if ((ioctls & expected) != expected)
err("unexpected uffdio_register.ioctls "
"(miss=%d, wp=%d, minor=%d): expected=0x%"PRIx64", "
"returned=0x%"PRIx64, miss, wp, minor, expected, ioctls);
if (uffd_unregister(uffd, area_dst, page_size))
err("unregister");
}
static void uffd_register_ioctls_test(uffd_test_args_t *args)
{
int miss, wp, minor;
for (miss = 0; miss <= 1; miss++)
for (wp = 0; wp <= 1; wp++)
for (minor = 0; minor <= 1; minor++)
do_register_ioctls_test(args, miss, wp, minor);
uffd_test_pass();
}
uffd_test_case_t uffd_tests[] = {
{
/* Test returned uffdio_register.ioctls. */
.name = "register-ioctls",
.uffd_fn = uffd_register_ioctls_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_MISSING_HUGETLBFS |
UFFD_FEATURE_MISSING_SHMEM |
UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM |
UFFD_FEATURE_MINOR_HUGETLBFS |
UFFD_FEATURE_MINOR_SHMEM,
},
{
.name = "zeropage",
.uffd_fn = uffd_zeropage_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = 0,
},
{
.name = "wp-fork",
.uffd_fn = uffd_wp_fork_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "wp-fork-with-event",
.uffd_fn = uffd_wp_fork_with_event_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM |
/* when set, child process should inherit uffd-wp bits */
UFFD_FEATURE_EVENT_FORK,
},
{
.name = "wp-fork-pin",
.uffd_fn = uffd_wp_fork_pin_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "wp-fork-pin-with-event",
.uffd_fn = uffd_wp_fork_pin_with_event_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM |
/* when set, child process should inherit uffd-wp bits */
UFFD_FEATURE_EVENT_FORK,
},
{
.name = "wp-unpopulated",
.uffd_fn = uffd_wp_unpopulated_test,
.mem_targets = MEM_ANON,
.uffd_feature_required =
UFFD_FEATURE_PAGEFAULT_FLAG_WP | UFFD_FEATURE_WP_UNPOPULATED,
},
{
.name = "minor",
.uffd_fn = uffd_minor_test,
.mem_targets = MEM_SHMEM | MEM_HUGETLB,
.uffd_feature_required =
UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM,
},
{
.name = "minor-wp",
.uffd_fn = uffd_minor_wp_test,
.mem_targets = MEM_SHMEM | MEM_HUGETLB,
.uffd_feature_required =
UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM |
UFFD_FEATURE_PAGEFAULT_FLAG_WP |
/*
* HACK: here we leveraged WP_UNPOPULATED to detect whether
* minor mode supports wr-protect. There's no feature flag
* for it so this is the best we can test against.
*/
UFFD_FEATURE_WP_UNPOPULATED,
},
{
.name = "minor-collapse",
.uffd_fn = uffd_minor_collapse_test,
/* MADV_COLLAPSE only works with shmem */
.mem_targets = MEM_SHMEM,
/* We can't test MADV_COLLAPSE, so try our luck */
.uffd_feature_required = UFFD_FEATURE_MINOR_SHMEM,
},
{
.name = "sigbus",
.uffd_fn = uffd_sigbus_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
UFFD_FEATURE_EVENT_FORK,
},
{
.name = "sigbus-wp",
.uffd_fn = uffd_sigbus_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
},
{
.name = "events",
.uffd_fn = uffd_events_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_EVENT_FORK |
UFFD_FEATURE_EVENT_REMAP | UFFD_FEATURE_EVENT_REMOVE,
},
{
.name = "events-wp",
.uffd_fn = uffd_events_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_EVENT_FORK |
UFFD_FEATURE_EVENT_REMAP | UFFD_FEATURE_EVENT_REMOVE |
UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "poison",
.uffd_fn = uffd_poison_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_POISON,
},
};
static void usage(const char *prog)
{
printf("usage: %s [-f TESTNAME]\n", prog);
puts("");
puts(" -f: test name to filter (e.g., event)");
puts(" -h: show the help msg");
puts(" -l: list tests only");
puts("");
exit(KSFT_FAIL);
}
int main(int argc, char *argv[])
{
int n_tests = sizeof(uffd_tests) / sizeof(uffd_test_case_t);
int n_mems = sizeof(mem_types) / sizeof(mem_type_t);
const char *test_filter = NULL;
bool list_only = false;
uffd_test_case_t *test;
mem_type_t *mem_type;
uffd_test_args_t args;
const char *errmsg;
int has_uffd, opt;
int i, j;
while ((opt = getopt(argc, argv, "f:hl")) != -1) {
switch (opt) {
case 'f':
test_filter = optarg;
break;
case 'l':
list_only = true;
break;
case 'h':
default:
/* Unknown */
usage(argv[0]);
break;
}
}
if (!test_filter && !list_only) {
has_uffd = test_uffd_api(false);
has_uffd |= test_uffd_api(true);
if (!has_uffd) {
printf("Userfaultfd not supported or unprivileged, skip all tests\n");
exit(KSFT_SKIP);
}
}
for (i = 0; i < n_tests; i++) {
test = &uffd_tests[i];
if (test_filter && !strstr(test->name, test_filter))
continue;
if (list_only) {
printf("%s\n", test->name);
continue;
}
for (j = 0; j < n_mems; j++) {
mem_type = &mem_types[j];
if (!(test->mem_targets & mem_type->mem_flag))
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;
}
if (uffd_setup_environment(&args, test, mem_type,
&errmsg)) {
uffd_test_skip(errmsg);
continue;
}
test->uffd_fn(&args);
}
}
if (!list_only)
uffd_test_report();
return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
}
#else /* __NR_userfaultfd */
#warning "missing __NR_userfaultfd definition"
int main(void)
{
printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
return KSFT_SKIP;
}
#endif /* __NR_userfaultfd */
| linux-master | tools/testing/selftests/mm/uffd-unit-tests.c |
#define _GNU_SOURCE
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <dirent.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <linux/mman.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include "linux/magic.h"
#include "vm_util.h"
#define BASE_ADDR ((void *)(1UL << 30))
static unsigned long hpage_pmd_size;
static unsigned long page_size;
static int hpage_pmd_nr;
#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
#define PID_SMAPS "/proc/self/smaps"
#define TEST_FILE "collapse_test_file"
#define MAX_LINE_LENGTH 500
enum vma_type {
VMA_ANON,
VMA_FILE,
VMA_SHMEM,
};
struct mem_ops {
void *(*setup_area)(int nr_hpages);
void (*cleanup_area)(void *p, unsigned long size);
void (*fault)(void *p, unsigned long start, unsigned long end);
bool (*check_huge)(void *addr, int nr_hpages);
const char *name;
};
static struct mem_ops *file_ops;
static struct mem_ops *anon_ops;
static struct mem_ops *shmem_ops;
struct collapse_context {
void (*collapse)(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect);
bool enforce_pte_scan_limits;
const char *name;
};
static struct collapse_context *khugepaged_context;
static struct collapse_context *madvise_context;
struct file_info {
const char *dir;
char path[PATH_MAX];
enum vma_type type;
int fd;
char dev_queue_read_ahead_path[PATH_MAX];
};
static struct file_info finfo;
enum thp_enabled {
THP_ALWAYS,
THP_MADVISE,
THP_NEVER,
};
static const char *thp_enabled_strings[] = {
"always",
"madvise",
"never",
NULL
};
enum thp_defrag {
THP_DEFRAG_ALWAYS,
THP_DEFRAG_DEFER,
THP_DEFRAG_DEFER_MADVISE,
THP_DEFRAG_MADVISE,
THP_DEFRAG_NEVER,
};
static const char *thp_defrag_strings[] = {
"always",
"defer",
"defer+madvise",
"madvise",
"never",
NULL
};
enum shmem_enabled {
SHMEM_ALWAYS,
SHMEM_WITHIN_SIZE,
SHMEM_ADVISE,
SHMEM_NEVER,
SHMEM_DENY,
SHMEM_FORCE,
};
static const char *shmem_enabled_strings[] = {
"always",
"within_size",
"advise",
"never",
"deny",
"force",
NULL
};
struct khugepaged_settings {
bool defrag;
unsigned int alloc_sleep_millisecs;
unsigned int scan_sleep_millisecs;
unsigned int max_ptes_none;
unsigned int max_ptes_swap;
unsigned int max_ptes_shared;
unsigned long pages_to_scan;
};
struct settings {
enum thp_enabled thp_enabled;
enum thp_defrag thp_defrag;
enum shmem_enabled shmem_enabled;
bool use_zero_page;
struct khugepaged_settings khugepaged;
unsigned long read_ahead_kb;
};
static struct settings saved_settings;
static bool skip_settings_restore;
static int exit_status;
static void success(const char *msg)
{
printf(" \e[32m%s\e[0m\n", msg);
}
static void fail(const char *msg)
{
printf(" \e[31m%s\e[0m\n", msg);
exit_status++;
}
static void skip(const char *msg)
{
printf(" \e[33m%s\e[0m\n", msg);
}
static int read_file(const char *path, char *buf, size_t buflen)
{
int fd;
ssize_t numread;
fd = open(path, O_RDONLY);
if (fd == -1)
return 0;
numread = read(fd, buf, buflen - 1);
if (numread < 1) {
close(fd);
return 0;
}
buf[numread] = '\0';
close(fd);
return (unsigned int) numread;
}
static int write_file(const char *path, const char *buf, size_t buflen)
{
int fd;
ssize_t numwritten;
fd = open(path, O_WRONLY);
if (fd == -1) {
printf("open(%s)\n", path);
exit(EXIT_FAILURE);
return 0;
}
numwritten = write(fd, buf, buflen - 1);
close(fd);
if (numwritten < 1) {
printf("write(%s)\n", buf);
exit(EXIT_FAILURE);
return 0;
}
return (unsigned int) numwritten;
}
static int read_string(const char *name, const char *strings[])
{
char path[PATH_MAX];
char buf[256];
char *c;
int ret;
ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
if (ret >= PATH_MAX) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
if (!read_file(path, buf, sizeof(buf))) {
perror(path);
exit(EXIT_FAILURE);
}
c = strchr(buf, '[');
if (!c) {
printf("%s: Parse failure\n", __func__);
exit(EXIT_FAILURE);
}
c++;
memmove(buf, c, sizeof(buf) - (c - buf));
c = strchr(buf, ']');
if (!c) {
printf("%s: Parse failure\n", __func__);
exit(EXIT_FAILURE);
}
*c = '\0';
ret = 0;
while (strings[ret]) {
if (!strcmp(strings[ret], buf))
return ret;
ret++;
}
printf("Failed to parse %s\n", name);
exit(EXIT_FAILURE);
}
static void write_string(const char *name, const char *val)
{
char path[PATH_MAX];
int ret;
ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
if (ret >= PATH_MAX) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
if (!write_file(path, val, strlen(val) + 1)) {
perror(path);
exit(EXIT_FAILURE);
}
}
static const unsigned long _read_num(const char *path)
{
char buf[21];
if (read_file(path, buf, sizeof(buf)) < 0) {
perror("read_file(read_num)");
exit(EXIT_FAILURE);
}
return strtoul(buf, NULL, 10);
}
static const unsigned long read_num(const char *name)
{
char path[PATH_MAX];
int ret;
ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
if (ret >= PATH_MAX) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
return _read_num(path);
}
static void _write_num(const char *path, unsigned long num)
{
char buf[21];
sprintf(buf, "%ld", num);
if (!write_file(path, buf, strlen(buf) + 1)) {
perror(path);
exit(EXIT_FAILURE);
}
}
static void write_num(const char *name, unsigned long num)
{
char path[PATH_MAX];
int ret;
ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
if (ret >= PATH_MAX) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
_write_num(path, num);
}
static void write_settings(struct settings *settings)
{
struct khugepaged_settings *khugepaged = &settings->khugepaged;
write_string("enabled", thp_enabled_strings[settings->thp_enabled]);
write_string("defrag", thp_defrag_strings[settings->thp_defrag]);
write_string("shmem_enabled",
shmem_enabled_strings[settings->shmem_enabled]);
write_num("use_zero_page", settings->use_zero_page);
write_num("khugepaged/defrag", khugepaged->defrag);
write_num("khugepaged/alloc_sleep_millisecs",
khugepaged->alloc_sleep_millisecs);
write_num("khugepaged/scan_sleep_millisecs",
khugepaged->scan_sleep_millisecs);
write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none);
write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared);
write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
if (file_ops && finfo.type == VMA_FILE)
_write_num(finfo.dev_queue_read_ahead_path,
settings->read_ahead_kb);
}
#define MAX_SETTINGS_DEPTH 4
static struct settings settings_stack[MAX_SETTINGS_DEPTH];
static int settings_index;
static struct settings *current_settings(void)
{
if (!settings_index) {
printf("Fail: No settings set");
exit(EXIT_FAILURE);
}
return settings_stack + settings_index - 1;
}
static void push_settings(struct settings *settings)
{
if (settings_index >= MAX_SETTINGS_DEPTH) {
printf("Fail: Settings stack exceeded");
exit(EXIT_FAILURE);
}
settings_stack[settings_index++] = *settings;
write_settings(current_settings());
}
static void pop_settings(void)
{
if (settings_index <= 0) {
printf("Fail: Settings stack empty");
exit(EXIT_FAILURE);
}
--settings_index;
write_settings(current_settings());
}
static void restore_settings(int sig)
{
if (skip_settings_restore)
goto out;
printf("Restore THP and khugepaged settings...");
write_settings(&saved_settings);
success("OK");
if (sig)
exit(EXIT_FAILURE);
out:
exit(exit_status);
}
static void save_settings(void)
{
printf("Save THP and khugepaged settings...");
saved_settings = (struct settings) {
.thp_enabled = read_string("enabled", thp_enabled_strings),
.thp_defrag = read_string("defrag", thp_defrag_strings),
.shmem_enabled =
read_string("shmem_enabled", shmem_enabled_strings),
.use_zero_page = read_num("use_zero_page"),
};
saved_settings.khugepaged = (struct khugepaged_settings) {
.defrag = read_num("khugepaged/defrag"),
.alloc_sleep_millisecs =
read_num("khugepaged/alloc_sleep_millisecs"),
.scan_sleep_millisecs =
read_num("khugepaged/scan_sleep_millisecs"),
.max_ptes_none = read_num("khugepaged/max_ptes_none"),
.max_ptes_swap = read_num("khugepaged/max_ptes_swap"),
.max_ptes_shared = read_num("khugepaged/max_ptes_shared"),
.pages_to_scan = read_num("khugepaged/pages_to_scan"),
};
if (file_ops && finfo.type == VMA_FILE)
saved_settings.read_ahead_kb =
_read_num(finfo.dev_queue_read_ahead_path);
success("OK");
signal(SIGTERM, restore_settings);
signal(SIGINT, restore_settings);
signal(SIGHUP, restore_settings);
signal(SIGQUIT, restore_settings);
}
static void get_finfo(const char *dir)
{
struct stat path_stat;
struct statfs fs;
char buf[1 << 10];
char path[PATH_MAX];
char *str, *end;
finfo.dir = dir;
stat(finfo.dir, &path_stat);
if (!S_ISDIR(path_stat.st_mode)) {
printf("%s: Not a directory (%s)\n", __func__, finfo.dir);
exit(EXIT_FAILURE);
}
if (snprintf(finfo.path, sizeof(finfo.path), "%s/" TEST_FILE,
finfo.dir) >= sizeof(finfo.path)) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
if (statfs(finfo.dir, &fs)) {
perror("statfs()");
exit(EXIT_FAILURE);
}
finfo.type = fs.f_type == TMPFS_MAGIC ? VMA_SHMEM : VMA_FILE;
if (finfo.type == VMA_SHMEM)
return;
/* Find owning device's queue/read_ahead_kb control */
if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d/uevent",
major(path_stat.st_dev), minor(path_stat.st_dev))
>= sizeof(path)) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
if (read_file(path, buf, sizeof(buf)) < 0) {
perror("read_file(read_num)");
exit(EXIT_FAILURE);
}
if (strstr(buf, "DEVTYPE=disk")) {
/* Found it */
if (snprintf(finfo.dev_queue_read_ahead_path,
sizeof(finfo.dev_queue_read_ahead_path),
"/sys/dev/block/%d:%d/queue/read_ahead_kb",
major(path_stat.st_dev), minor(path_stat.st_dev))
>= sizeof(finfo.dev_queue_read_ahead_path)) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
return;
}
if (!strstr(buf, "DEVTYPE=partition")) {
printf("%s: Unknown device type: %s\n", __func__, path);
exit(EXIT_FAILURE);
}
/*
* Partition of block device - need to find actual device.
* Using naming convention that devnameN is partition of
* device devname.
*/
str = strstr(buf, "DEVNAME=");
if (!str) {
printf("%s: Could not read: %s", __func__, path);
exit(EXIT_FAILURE);
}
str += 8;
end = str;
while (*end) {
if (isdigit(*end)) {
*end = '\0';
if (snprintf(finfo.dev_queue_read_ahead_path,
sizeof(finfo.dev_queue_read_ahead_path),
"/sys/block/%s/queue/read_ahead_kb",
str) >= sizeof(finfo.dev_queue_read_ahead_path)) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
return;
}
++end;
}
printf("%s: Could not read: %s\n", __func__, path);
exit(EXIT_FAILURE);
}
static bool check_swap(void *addr, unsigned long size)
{
bool swap = false;
int ret;
FILE *fp;
char buffer[MAX_LINE_LENGTH];
char addr_pattern[MAX_LINE_LENGTH];
ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
(unsigned long) addr);
if (ret >= MAX_LINE_LENGTH) {
printf("%s: Pattern is too long\n", __func__);
exit(EXIT_FAILURE);
}
fp = fopen(PID_SMAPS, "r");
if (!fp) {
printf("%s: Failed to open file %s\n", __func__, PID_SMAPS);
exit(EXIT_FAILURE);
}
if (!check_for_pattern(fp, addr_pattern, buffer, sizeof(buffer)))
goto err_out;
ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "Swap:%19ld kB",
size >> 10);
if (ret >= MAX_LINE_LENGTH) {
printf("%s: Pattern is too long\n", __func__);
exit(EXIT_FAILURE);
}
/*
* Fetch the Swap: in the same block and check whether it got
* the expected number of hugeepages next.
*/
if (!check_for_pattern(fp, "Swap:", buffer, sizeof(buffer)))
goto err_out;
if (strncmp(buffer, addr_pattern, strlen(addr_pattern)))
goto err_out;
swap = true;
err_out:
fclose(fp);
return swap;
}
static void *alloc_mapping(int nr)
{
void *p;
p = mmap(BASE_ADDR, nr * hpage_pmd_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p != BASE_ADDR) {
printf("Failed to allocate VMA at %p\n", BASE_ADDR);
exit(EXIT_FAILURE);
}
return p;
}
static void fill_memory(int *p, unsigned long start, unsigned long end)
{
int i;
for (i = start / page_size; i < end / page_size; i++)
p[i * page_size / sizeof(*p)] = i + 0xdead0000;
}
/*
* MADV_COLLAPSE is a best-effort request and may fail if an internal
* resource is temporarily unavailable, in which case it will set errno to
* EAGAIN. In such a case, immediately reattempt the operation one more
* time.
*/
static int madvise_collapse_retry(void *p, unsigned long size)
{
bool retry = true;
int ret;
retry:
ret = madvise(p, size, MADV_COLLAPSE);
if (ret && errno == EAGAIN && retry) {
retry = false;
goto retry;
}
return ret;
}
/*
* Returns pmd-mapped hugepage in VMA marked VM_HUGEPAGE, filled with
* validate_memory()'able contents.
*/
static void *alloc_hpage(struct mem_ops *ops)
{
void *p = ops->setup_area(1);
ops->fault(p, 0, hpage_pmd_size);
/*
* VMA should be neither VM_HUGEPAGE nor VM_NOHUGEPAGE.
* The latter is ineligible for collapse by MADV_COLLAPSE
* while the former might cause MADV_COLLAPSE to race with
* khugepaged on low-load system (like a test machine), which
* would cause MADV_COLLAPSE to fail with EAGAIN.
*/
printf("Allocate huge page...");
if (madvise_collapse_retry(p, hpage_pmd_size)) {
perror("madvise(MADV_COLLAPSE)");
exit(EXIT_FAILURE);
}
if (!ops->check_huge(p, 1)) {
perror("madvise(MADV_COLLAPSE)");
exit(EXIT_FAILURE);
}
if (madvise(p, hpage_pmd_size, MADV_HUGEPAGE)) {
perror("madvise(MADV_HUGEPAGE)");
exit(EXIT_FAILURE);
}
success("OK");
return p;
}
static void validate_memory(int *p, unsigned long start, unsigned long end)
{
int i;
for (i = start / page_size; i < end / page_size; i++) {
if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) {
printf("Page %d is corrupted: %#x\n",
i, p[i * page_size / sizeof(*p)]);
exit(EXIT_FAILURE);
}
}
}
static void *anon_setup_area(int nr_hpages)
{
return alloc_mapping(nr_hpages);
}
static void anon_cleanup_area(void *p, unsigned long size)
{
munmap(p, size);
}
static void anon_fault(void *p, unsigned long start, unsigned long end)
{
fill_memory(p, start, end);
}
static bool anon_check_huge(void *addr, int nr_hpages)
{
return check_huge_anon(addr, nr_hpages, hpage_pmd_size);
}
static void *file_setup_area(int nr_hpages)
{
int fd;
void *p;
unsigned long size;
unlink(finfo.path); /* Cleanup from previous failed tests */
printf("Creating %s for collapse%s...", finfo.path,
finfo.type == VMA_SHMEM ? " (tmpfs)" : "");
fd = open(finfo.path, O_DSYNC | O_CREAT | O_RDWR | O_TRUNC | O_EXCL,
777);
if (fd < 0) {
perror("open()");
exit(EXIT_FAILURE);
}
size = nr_hpages * hpage_pmd_size;
p = alloc_mapping(nr_hpages);
fill_memory(p, 0, size);
write(fd, p, size);
close(fd);
munmap(p, size);
success("OK");
printf("Opening %s read only for collapse...", finfo.path);
finfo.fd = open(finfo.path, O_RDONLY, 777);
if (finfo.fd < 0) {
perror("open()");
exit(EXIT_FAILURE);
}
p = mmap(BASE_ADDR, size, PROT_READ | PROT_EXEC,
MAP_PRIVATE, finfo.fd, 0);
if (p == MAP_FAILED || p != BASE_ADDR) {
perror("mmap()");
exit(EXIT_FAILURE);
}
/* Drop page cache */
write_file("/proc/sys/vm/drop_caches", "3", 2);
success("OK");
return p;
}
static void file_cleanup_area(void *p, unsigned long size)
{
munmap(p, size);
close(finfo.fd);
unlink(finfo.path);
}
static void file_fault(void *p, unsigned long start, unsigned long end)
{
if (madvise(((char *)p) + start, end - start, MADV_POPULATE_READ)) {
perror("madvise(MADV_POPULATE_READ");
exit(EXIT_FAILURE);
}
}
static bool file_check_huge(void *addr, int nr_hpages)
{
switch (finfo.type) {
case VMA_FILE:
return check_huge_file(addr, nr_hpages, hpage_pmd_size);
case VMA_SHMEM:
return check_huge_shmem(addr, nr_hpages, hpage_pmd_size);
default:
exit(EXIT_FAILURE);
return false;
}
}
static void *shmem_setup_area(int nr_hpages)
{
void *p;
unsigned long size = nr_hpages * hpage_pmd_size;
finfo.fd = memfd_create("khugepaged-selftest-collapse-shmem", 0);
if (finfo.fd < 0) {
perror("memfd_create()");
exit(EXIT_FAILURE);
}
if (ftruncate(finfo.fd, size)) {
perror("ftruncate()");
exit(EXIT_FAILURE);
}
p = mmap(BASE_ADDR, size, PROT_READ | PROT_WRITE, MAP_SHARED, finfo.fd,
0);
if (p != BASE_ADDR) {
perror("mmap()");
exit(EXIT_FAILURE);
}
return p;
}
static void shmem_cleanup_area(void *p, unsigned long size)
{
munmap(p, size);
close(finfo.fd);
}
static bool shmem_check_huge(void *addr, int nr_hpages)
{
return check_huge_shmem(addr, nr_hpages, hpage_pmd_size);
}
static struct mem_ops __anon_ops = {
.setup_area = &anon_setup_area,
.cleanup_area = &anon_cleanup_area,
.fault = &anon_fault,
.check_huge = &anon_check_huge,
.name = "anon",
};
static struct mem_ops __file_ops = {
.setup_area = &file_setup_area,
.cleanup_area = &file_cleanup_area,
.fault = &file_fault,
.check_huge = &file_check_huge,
.name = "file",
};
static struct mem_ops __shmem_ops = {
.setup_area = &shmem_setup_area,
.cleanup_area = &shmem_cleanup_area,
.fault = &anon_fault,
.check_huge = &shmem_check_huge,
.name = "shmem",
};
static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect)
{
int ret;
struct settings settings = *current_settings();
printf("%s...", msg);
/*
* Prevent khugepaged interference and tests that MADV_COLLAPSE
* ignores /sys/kernel/mm/transparent_hugepage/enabled
*/
settings.thp_enabled = THP_NEVER;
settings.shmem_enabled = SHMEM_NEVER;
push_settings(&settings);
/* Clear VM_NOHUGEPAGE */
madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
ret = madvise_collapse_retry(p, nr_hpages * hpage_pmd_size);
if (((bool)ret) == expect)
fail("Fail: Bad return value");
else if (!ops->check_huge(p, expect ? nr_hpages : 0))
fail("Fail: check_huge()");
else
success("OK");
pop_settings();
}
static void madvise_collapse(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect)
{
/* Sanity check */
if (!ops->check_huge(p, 0)) {
printf("Unexpected huge page\n");
exit(EXIT_FAILURE);
}
__madvise_collapse(msg, p, nr_hpages, ops, expect);
}
#define TICK 500000
static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops)
{
int full_scans;
int timeout = 6; /* 3 seconds */
/* Sanity check */
if (!ops->check_huge(p, 0)) {
printf("Unexpected huge page\n");
exit(EXIT_FAILURE);
}
madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
/* Wait until the second full_scan completed */
full_scans = read_num("khugepaged/full_scans") + 2;
printf("%s...", msg);
while (timeout--) {
if (ops->check_huge(p, nr_hpages))
break;
if (read_num("khugepaged/full_scans") >= full_scans)
break;
printf(".");
usleep(TICK);
}
madvise(p, nr_hpages * hpage_pmd_size, MADV_NOHUGEPAGE);
return timeout == -1;
}
static void khugepaged_collapse(const char *msg, char *p, int nr_hpages,
struct mem_ops *ops, bool expect)
{
if (wait_for_scan(msg, p, nr_hpages, ops)) {
if (expect)
fail("Timeout");
else
success("OK");
return;
}
/*
* For file and shmem memory, khugepaged only retracts pte entries after
* putting the new hugepage in the page cache. The hugepage must be
* subsequently refaulted to install the pmd mapping for the mm.
*/
if (ops != &__anon_ops)
ops->fault(p, 0, nr_hpages * hpage_pmd_size);
if (ops->check_huge(p, expect ? nr_hpages : 0))
success("OK");
else
fail("Fail");
}
static struct collapse_context __khugepaged_context = {
.collapse = &khugepaged_collapse,
.enforce_pte_scan_limits = true,
.name = "khugepaged",
};
static struct collapse_context __madvise_context = {
.collapse = &madvise_collapse,
.enforce_pte_scan_limits = false,
.name = "madvise",
};
static bool is_tmpfs(struct mem_ops *ops)
{
return ops == &__file_ops && finfo.type == VMA_SHMEM;
}
static void alloc_at_fault(void)
{
struct settings settings = *current_settings();
char *p;
settings.thp_enabled = THP_ALWAYS;
push_settings(&settings);
p = alloc_mapping(1);
*p = 1;
printf("Allocate huge page on fault...");
if (check_huge_anon(p, 1, hpage_pmd_size))
success("OK");
else
fail("Fail");
pop_settings();
madvise(p, page_size, MADV_DONTNEED);
printf("Split huge PMD on MADV_DONTNEED...");
if (check_huge_anon(p, 0, hpage_pmd_size))
success("OK");
else
fail("Fail");
munmap(p, hpage_pmd_size);
}
static void collapse_full(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
int nr_hpages = 4;
unsigned long size = nr_hpages * hpage_pmd_size;
p = ops->setup_area(nr_hpages);
ops->fault(p, 0, size);
c->collapse("Collapse multiple fully populated PTE table", p, nr_hpages,
ops, true);
validate_memory(p, 0, size);
ops->cleanup_area(p, size);
}
static void collapse_empty(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
p = ops->setup_area(1);
c->collapse("Do not collapse empty PTE table", p, 1, ops, false);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
p = ops->setup_area(1);
ops->fault(p, 0, page_size);
c->collapse("Collapse PTE table with single PTE entry present", p,
1, ops, true);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_none = hpage_pmd_nr / 2;
struct settings settings = *current_settings();
void *p;
settings.khugepaged.max_ptes_none = max_ptes_none;
push_settings(&settings);
p = ops->setup_area(1);
if (is_tmpfs(ops)) {
/* shmem pages always in the page cache */
printf("tmpfs...");
skip("Skip");
goto skip;
}
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1,
ops, !c->enforce_pte_scan_limits);
validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
if (c->enforce_pte_scan_limits) {
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, ops,
true);
validate_memory(p, 0,
(hpage_pmd_nr - max_ptes_none) * page_size);
}
skip:
ops->cleanup_area(p, hpage_pmd_size);
pop_settings();
}
static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
p = ops->setup_area(1);
ops->fault(p, 0, hpage_pmd_size);
printf("Swapout one page...");
if (madvise(p, page_size, MADV_PAGEOUT)) {
perror("madvise(MADV_PAGEOUT)");
exit(EXIT_FAILURE);
}
if (check_swap(p, page_size)) {
success("OK");
} else {
fail("Fail");
goto out;
}
c->collapse("Collapse with swapping in single PTE entry", p, 1, ops,
true);
validate_memory(p, 0, hpage_pmd_size);
out:
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_swap = read_num("khugepaged/max_ptes_swap");
void *p;
p = ops->setup_area(1);
ops->fault(p, 0, hpage_pmd_size);
printf("Swapout %d of %d pages...", max_ptes_swap + 1, hpage_pmd_nr);
if (madvise(p, (max_ptes_swap + 1) * page_size, MADV_PAGEOUT)) {
perror("madvise(MADV_PAGEOUT)");
exit(EXIT_FAILURE);
}
if (check_swap(p, (max_ptes_swap + 1) * page_size)) {
success("OK");
} else {
fail("Fail");
goto out;
}
c->collapse("Maybe collapse with max_ptes_swap exceeded", p, 1, ops,
!c->enforce_pte_scan_limits);
validate_memory(p, 0, hpage_pmd_size);
if (c->enforce_pte_scan_limits) {
ops->fault(p, 0, hpage_pmd_size);
printf("Swapout %d of %d pages...", max_ptes_swap,
hpage_pmd_nr);
if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) {
perror("madvise(MADV_PAGEOUT)");
exit(EXIT_FAILURE);
}
if (check_swap(p, max_ptes_swap * page_size)) {
success("OK");
} else {
fail("Fail");
goto out;
}
c->collapse("Collapse with max_ptes_swap pages swapped out", p,
1, ops, true);
validate_memory(p, 0, hpage_pmd_size);
}
out:
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_single_pte_entry_compound(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
p = alloc_hpage(ops);
if (is_tmpfs(ops)) {
/* MADV_DONTNEED won't evict tmpfs pages */
printf("tmpfs...");
skip("Skip");
goto skip;
}
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
printf("Split huge page leaving single PTE mapping compound page...");
madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
c->collapse("Collapse PTE table with single PTE mapping compound page",
p, 1, ops, true);
validate_memory(p, 0, page_size);
skip:
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_full_of_compound(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
p = alloc_hpage(ops);
printf("Split huge page leaving single PTE page table full of compound pages...");
madvise(p, page_size, MADV_NOHUGEPAGE);
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
c->collapse("Collapse PTE table full of compound pages", p, 1, ops,
true);
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_compound_extreme(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
int i;
p = ops->setup_area(1);
for (i = 0; i < hpage_pmd_nr; i++) {
printf("\rConstruct PTE page table full of different PTE-mapped compound pages %3d/%d...",
i + 1, hpage_pmd_nr);
madvise(BASE_ADDR, hpage_pmd_size, MADV_HUGEPAGE);
ops->fault(BASE_ADDR, 0, hpage_pmd_size);
if (!ops->check_huge(BASE_ADDR, 1)) {
printf("Failed to allocate huge page\n");
exit(EXIT_FAILURE);
}
madvise(BASE_ADDR, hpage_pmd_size, MADV_NOHUGEPAGE);
p = mremap(BASE_ADDR - i * page_size,
i * page_size + hpage_pmd_size,
(i + 1) * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
BASE_ADDR + 2 * hpage_pmd_size);
if (p == MAP_FAILED) {
perror("mremap+unmap");
exit(EXIT_FAILURE);
}
p = mremap(BASE_ADDR + 2 * hpage_pmd_size,
(i + 1) * page_size,
(i + 1) * page_size + hpage_pmd_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
BASE_ADDR - (i + 1) * page_size);
if (p == MAP_FAILED) {
perror("mremap+alloc");
exit(EXIT_FAILURE);
}
}
ops->cleanup_area(BASE_ADDR, hpage_pmd_size);
ops->fault(p, 0, hpage_pmd_size);
if (!ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
c->collapse("Collapse PTE table full of different compound pages", p, 1,
ops, true);
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_fork(struct collapse_context *c, struct mem_ops *ops)
{
int wstatus;
void *p;
p = ops->setup_area(1);
printf("Allocate small page...");
ops->fault(p, 0, page_size);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
printf("Share small page over fork()...");
if (!fork()) {
/* Do not touch settings on child exit */
skip_settings_restore = true;
exit_status = 0;
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
ops->fault(p, page_size, 2 * page_size);
c->collapse("Collapse PTE table with single page shared with parent process",
p, 1, ops, true);
validate_memory(p, 0, page_size);
ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
wait(&wstatus);
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has small page...");
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
validate_memory(p, 0, page_size);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *ops)
{
int wstatus;
void *p;
p = alloc_hpage(ops);
printf("Share huge page over fork()...");
if (!fork()) {
/* Do not touch settings on child exit */
skip_settings_restore = true;
exit_status = 0;
if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
printf("Split huge page PMD in child process...");
madvise(p, page_size, MADV_NOHUGEPAGE);
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
ops->fault(p, 0, page_size);
write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1);
c->collapse("Collapse PTE table full of compound pages in child",
p, 1, ops, true);
write_num("khugepaged/max_ptes_shared",
current_settings()->khugepaged.max_ptes_shared);
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
wait(&wstatus);
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has huge page...");
if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
}
static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_shared = read_num("khugepaged/max_ptes_shared");
int wstatus;
void *p;
p = alloc_hpage(ops);
printf("Share huge page over fork()...");
if (!fork()) {
/* Do not touch settings on child exit */
skip_settings_restore = true;
exit_status = 0;
if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
printf("Trigger CoW on page %d of %d...",
hpage_pmd_nr - max_ptes_shared - 1, hpage_pmd_nr);
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
c->collapse("Maybe collapse with max_ptes_shared exceeded", p,
1, ops, !c->enforce_pte_scan_limits);
if (c->enforce_pte_scan_limits) {
printf("Trigger CoW on page %d of %d...",
hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr);
ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared) *
page_size);
if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
c->collapse("Collapse with max_ptes_shared PTEs shared",
p, 1, ops, true);
}
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
wait(&wstatus);
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has huge page...");
if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
}
static void madvise_collapse_existing_thps(struct collapse_context *c,
struct mem_ops *ops)
{
void *p;
p = ops->setup_area(1);
ops->fault(p, 0, hpage_pmd_size);
c->collapse("Collapse fully populated PTE table...", p, 1, ops, true);
validate_memory(p, 0, hpage_pmd_size);
/* c->collapse() will find a hugepage and complain - call directly. */
__madvise_collapse("Re-collapse PMD-mapped hugepage", p, 1, ops, true);
validate_memory(p, 0, hpage_pmd_size);
ops->cleanup_area(p, hpage_pmd_size);
}
/*
* Test race with khugepaged where page tables have been retracted and
* pmd cleared.
*/
static void madvise_retracted_page_tables(struct collapse_context *c,
struct mem_ops *ops)
{
void *p;
int nr_hpages = 1;
unsigned long size = nr_hpages * hpage_pmd_size;
p = ops->setup_area(nr_hpages);
ops->fault(p, 0, size);
/* Let khugepaged collapse and leave pmd cleared */
if (wait_for_scan("Collapse and leave PMD cleared", p, nr_hpages,
ops)) {
fail("Timeout");
return;
}
success("OK");
c->collapse("Install huge PMD from page cache", p, nr_hpages, ops,
true);
validate_memory(p, 0, size);
ops->cleanup_area(p, size);
}
static void usage(void)
{
fprintf(stderr, "\nUsage: ./khugepaged <test type> [dir]\n\n");
fprintf(stderr, "\t<test type>\t: <context>:<mem_type>\n");
fprintf(stderr, "\t<context>\t: [all|khugepaged|madvise]\n");
fprintf(stderr, "\t<mem_type>\t: [all|anon|file|shmem]\n");
fprintf(stderr, "\n\t\"file,all\" mem_type requires [dir] argument\n");
fprintf(stderr, "\n\t\"file,all\" mem_type requires kernel built with\n");
fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n");
fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n");
fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n");
exit(1);
}
static void parse_test_type(int argc, const char **argv)
{
char *buf;
const char *token;
if (argc == 1) {
/* Backwards compatibility */
khugepaged_context = &__khugepaged_context;
madvise_context = &__madvise_context;
anon_ops = &__anon_ops;
return;
}
buf = strdup(argv[1]);
token = strsep(&buf, ":");
if (!strcmp(token, "all")) {
khugepaged_context = &__khugepaged_context;
madvise_context = &__madvise_context;
} else if (!strcmp(token, "khugepaged")) {
khugepaged_context = &__khugepaged_context;
} else if (!strcmp(token, "madvise")) {
madvise_context = &__madvise_context;
} else {
usage();
}
if (!buf)
usage();
if (!strcmp(buf, "all")) {
file_ops = &__file_ops;
anon_ops = &__anon_ops;
shmem_ops = &__shmem_ops;
} else if (!strcmp(buf, "anon")) {
anon_ops = &__anon_ops;
} else if (!strcmp(buf, "file")) {
file_ops = &__file_ops;
} else if (!strcmp(buf, "shmem")) {
shmem_ops = &__shmem_ops;
} else {
usage();
}
if (!file_ops)
return;
if (argc != 3)
usage();
}
int main(int argc, const char **argv)
{
struct settings default_settings = {
.thp_enabled = THP_MADVISE,
.thp_defrag = THP_DEFRAG_ALWAYS,
.shmem_enabled = SHMEM_ADVISE,
.use_zero_page = 0,
.khugepaged = {
.defrag = 1,
.alloc_sleep_millisecs = 10,
.scan_sleep_millisecs = 10,
},
/*
* When testing file-backed memory, the collapse path
* looks at how many pages are found in the page cache, not
* what pages are mapped. Disable read ahead optimization so
* pages don't find their way into the page cache unless
* we mem_ops->fault() them in.
*/
.read_ahead_kb = 0,
};
parse_test_type(argc, argv);
if (file_ops)
get_finfo(argv[2]);
setbuf(stdout, NULL);
page_size = getpagesize();
hpage_pmd_size = read_pmd_pagesize();
if (!hpage_pmd_size) {
printf("Reading PMD pagesize failed");
exit(EXIT_FAILURE);
}
hpage_pmd_nr = hpage_pmd_size / page_size;
default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1;
default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8;
default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2;
default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
save_settings();
push_settings(&default_settings);
alloc_at_fault();
#define TEST(t, c, o) do { \
if (c && o) { \
printf("\nRun test: " #t " (%s:%s)\n", c->name, o->name); \
t(c, o); \
} \
} while (0)
TEST(collapse_full, khugepaged_context, anon_ops);
TEST(collapse_full, khugepaged_context, file_ops);
TEST(collapse_full, khugepaged_context, shmem_ops);
TEST(collapse_full, madvise_context, anon_ops);
TEST(collapse_full, madvise_context, file_ops);
TEST(collapse_full, madvise_context, shmem_ops);
TEST(collapse_empty, khugepaged_context, anon_ops);
TEST(collapse_empty, madvise_context, anon_ops);
TEST(collapse_single_pte_entry, khugepaged_context, anon_ops);
TEST(collapse_single_pte_entry, khugepaged_context, file_ops);
TEST(collapse_single_pte_entry, khugepaged_context, shmem_ops);
TEST(collapse_single_pte_entry, madvise_context, anon_ops);
TEST(collapse_single_pte_entry, madvise_context, file_ops);
TEST(collapse_single_pte_entry, madvise_context, shmem_ops);
TEST(collapse_max_ptes_none, khugepaged_context, anon_ops);
TEST(collapse_max_ptes_none, khugepaged_context, file_ops);
TEST(collapse_max_ptes_none, madvise_context, anon_ops);
TEST(collapse_max_ptes_none, madvise_context, file_ops);
TEST(collapse_single_pte_entry_compound, khugepaged_context, anon_ops);
TEST(collapse_single_pte_entry_compound, khugepaged_context, file_ops);
TEST(collapse_single_pte_entry_compound, madvise_context, anon_ops);
TEST(collapse_single_pte_entry_compound, madvise_context, file_ops);
TEST(collapse_full_of_compound, khugepaged_context, anon_ops);
TEST(collapse_full_of_compound, khugepaged_context, file_ops);
TEST(collapse_full_of_compound, khugepaged_context, shmem_ops);
TEST(collapse_full_of_compound, madvise_context, anon_ops);
TEST(collapse_full_of_compound, madvise_context, file_ops);
TEST(collapse_full_of_compound, madvise_context, shmem_ops);
TEST(collapse_compound_extreme, khugepaged_context, anon_ops);
TEST(collapse_compound_extreme, madvise_context, anon_ops);
TEST(collapse_swapin_single_pte, khugepaged_context, anon_ops);
TEST(collapse_swapin_single_pte, madvise_context, anon_ops);
TEST(collapse_max_ptes_swap, khugepaged_context, anon_ops);
TEST(collapse_max_ptes_swap, madvise_context, anon_ops);
TEST(collapse_fork, khugepaged_context, anon_ops);
TEST(collapse_fork, madvise_context, anon_ops);
TEST(collapse_fork_compound, khugepaged_context, anon_ops);
TEST(collapse_fork_compound, madvise_context, anon_ops);
TEST(collapse_max_ptes_shared, khugepaged_context, anon_ops);
TEST(collapse_max_ptes_shared, madvise_context, anon_ops);
TEST(madvise_collapse_existing_thps, madvise_context, anon_ops);
TEST(madvise_collapse_existing_thps, madvise_context, file_ops);
TEST(madvise_collapse_existing_thps, madvise_context, shmem_ops);
TEST(madvise_retracted_page_tables, madvise_context, file_ops);
TEST(madvise_retracted_page_tables, madvise_context, shmem_ops);
restore_settings(0);
}
| linux-master | tools/testing/selftests/mm/khugepaged.c |
// SPDX-License-Identifier: GPL-2.0
#include <sys/mman.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
static int test_limit(void)
{
int ret = 1;
struct rlimit lims;
void *map;
if (getrlimit(RLIMIT_MEMLOCK, &lims)) {
perror("getrlimit");
return ret;
}
if (mlockall(MCL_ONFAULT | MCL_FUTURE)) {
perror("mlockall");
return ret;
}
map = mmap(NULL, 2 * lims.rlim_max, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0);
if (map != MAP_FAILED)
printf("mmap should have failed, but didn't\n");
else {
ret = 0;
munmap(map, 2 * lims.rlim_max);
}
munlockall();
return ret;
}
int main(int argc, char **argv)
{
int ret = 0;
ret += test_limit();
return ret;
}
| linux-master | tools/testing/selftests/mm/on-fault-limit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corporation, 2021
*
* Author: Mike Rapoport <[email protected]>
*/
#define _GNU_SOURCE
#include <sys/uio.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <sys/resource.h>
#include <sys/capability.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include "../kselftest.h"
#define fail(fmt, ...) ksft_test_result_fail(fmt, ##__VA_ARGS__)
#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
#define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
#ifdef __NR_memfd_secret
#define PATTERN 0x55
static const int prot = PROT_READ | PROT_WRITE;
static const int mode = MAP_SHARED;
static unsigned long page_size;
static unsigned long mlock_limit_cur;
static unsigned long mlock_limit_max;
static int memfd_secret(unsigned int flags)
{
return syscall(__NR_memfd_secret, flags);
}
static void test_file_apis(int fd)
{
char buf[64];
if ((read(fd, buf, sizeof(buf)) >= 0) ||
(write(fd, buf, sizeof(buf)) >= 0) ||
(pread(fd, buf, sizeof(buf), 0) >= 0) ||
(pwrite(fd, buf, sizeof(buf), 0) >= 0))
fail("unexpected file IO\n");
else
pass("file IO is blocked as expected\n");
}
static void test_mlock_limit(int fd)
{
size_t len;
char *mem;
len = mlock_limit_cur;
mem = mmap(NULL, len, prot, mode, fd, 0);
if (mem == MAP_FAILED) {
fail("unable to mmap secret memory\n");
return;
}
munmap(mem, len);
len = mlock_limit_max * 2;
mem = mmap(NULL, len, prot, mode, fd, 0);
if (mem != MAP_FAILED) {
fail("unexpected mlock limit violation\n");
munmap(mem, len);
return;
}
pass("mlock limit is respected\n");
}
static void try_process_vm_read(int fd, int pipefd[2])
{
struct iovec liov, riov;
char buf[64];
char *mem;
if (read(pipefd[0], &mem, sizeof(mem)) < 0) {
fail("pipe write: %s\n", strerror(errno));
exit(KSFT_FAIL);
}
liov.iov_len = riov.iov_len = sizeof(buf);
liov.iov_base = buf;
riov.iov_base = mem;
if (process_vm_readv(getppid(), &liov, 1, &riov, 1, 0) < 0) {
if (errno == ENOSYS)
exit(KSFT_SKIP);
exit(KSFT_PASS);
}
exit(KSFT_FAIL);
}
static void try_ptrace(int fd, int pipefd[2])
{
pid_t ppid = getppid();
int status;
char *mem;
long ret;
if (read(pipefd[0], &mem, sizeof(mem)) < 0) {
perror("pipe write");
exit(KSFT_FAIL);
}
ret = ptrace(PTRACE_ATTACH, ppid, 0, 0);
if (ret) {
perror("ptrace_attach");
exit(KSFT_FAIL);
}
ret = waitpid(ppid, &status, WUNTRACED);
if ((ret != ppid) || !(WIFSTOPPED(status))) {
fprintf(stderr, "weird waitppid result %ld stat %x\n",
ret, status);
exit(KSFT_FAIL);
}
if (ptrace(PTRACE_PEEKDATA, ppid, mem, 0))
exit(KSFT_PASS);
exit(KSFT_FAIL);
}
static void check_child_status(pid_t pid, const char *name)
{
int status;
waitpid(pid, &status, 0);
if (WIFEXITED(status) && WEXITSTATUS(status) == KSFT_SKIP) {
skip("%s is not supported\n", name);
return;
}
if ((WIFEXITED(status) && WEXITSTATUS(status) == KSFT_PASS) ||
WIFSIGNALED(status)) {
pass("%s is blocked as expected\n", name);
return;
}
fail("%s: unexpected memory access\n", name);
}
static void test_remote_access(int fd, const char *name,
void (*func)(int fd, int pipefd[2]))
{
int pipefd[2];
pid_t pid;
char *mem;
if (pipe(pipefd)) {
fail("pipe failed: %s\n", strerror(errno));
return;
}
pid = fork();
if (pid < 0) {
fail("fork failed: %s\n", strerror(errno));
return;
}
if (pid == 0) {
func(fd, pipefd);
return;
}
mem = mmap(NULL, page_size, prot, mode, fd, 0);
if (mem == MAP_FAILED) {
fail("Unable to mmap secret memory\n");
return;
}
ftruncate(fd, page_size);
memset(mem, PATTERN, page_size);
if (write(pipefd[1], &mem, sizeof(mem)) < 0) {
fail("pipe write: %s\n", strerror(errno));
return;
}
check_child_status(pid, name);
}
static void test_process_vm_read(int fd)
{
test_remote_access(fd, "process_vm_read", try_process_vm_read);
}
static void test_ptrace(int fd)
{
test_remote_access(fd, "ptrace", try_ptrace);
}
static int set_cap_limits(rlim_t max)
{
struct rlimit new;
cap_t cap = cap_init();
new.rlim_cur = max;
new.rlim_max = max;
if (setrlimit(RLIMIT_MEMLOCK, &new)) {
perror("setrlimit() returns error");
return -1;
}
/* drop capabilities including CAP_IPC_LOCK */
if (cap_set_proc(cap)) {
perror("cap_set_proc() returns error");
return -2;
}
return 0;
}
static void prepare(void)
{
struct rlimit rlim;
page_size = sysconf(_SC_PAGE_SIZE);
if (!page_size)
ksft_exit_fail_msg("Failed to get page size %s\n",
strerror(errno));
if (getrlimit(RLIMIT_MEMLOCK, &rlim))
ksft_exit_fail_msg("Unable to detect mlock limit: %s\n",
strerror(errno));
mlock_limit_cur = rlim.rlim_cur;
mlock_limit_max = rlim.rlim_max;
printf("page_size: %ld, mlock.soft: %ld, mlock.hard: %ld\n",
page_size, mlock_limit_cur, mlock_limit_max);
if (page_size > mlock_limit_cur)
mlock_limit_cur = page_size;
if (page_size > mlock_limit_max)
mlock_limit_max = page_size;
if (set_cap_limits(mlock_limit_max))
ksft_exit_fail_msg("Unable to set mlock limit: %s\n",
strerror(errno));
}
#define NUM_TESTS 4
int main(int argc, char *argv[])
{
int fd;
prepare();
ksft_print_header();
ksft_set_plan(NUM_TESTS);
fd = memfd_secret(0);
if (fd < 0) {
if (errno == ENOSYS)
ksft_exit_skip("memfd_secret is not supported\n");
else
ksft_exit_fail_msg("memfd_secret failed: %s\n",
strerror(errno));
}
test_mlock_limit(fd);
test_file_apis(fd);
test_process_vm_read(fd);
test_ptrace(fd);
close(fd);
ksft_finished();
}
#else /* __NR_memfd_secret */
int main(int argc, char *argv[])
{
printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
return KSFT_SKIP;
}
#endif /* __NR_memfd_secret */
| linux-master | tools/testing/selftests/mm/memfd_secret.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC
*/
#define _GNU_SOURCE
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <time.h>
#include <stdbool.h>
#include "../kselftest.h"
#define EXPECT_SUCCESS 0
#define EXPECT_FAILURE 1
#define NON_OVERLAPPING 0
#define OVERLAPPING 1
#define NS_PER_SEC 1000000000ULL
#define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */
#define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
struct config {
unsigned long long src_alignment;
unsigned long long dest_alignment;
unsigned long long region_size;
int overlapping;
};
struct test {
const char *name;
struct config config;
int expect_failure;
};
enum {
_1KB = 1ULL << 10, /* 1KB -> not page aligned */
_4KB = 4ULL << 10,
_8KB = 8ULL << 10,
_1MB = 1ULL << 20,
_2MB = 2ULL << 20,
_4MB = 4ULL << 20,
_1GB = 1ULL << 30,
_2GB = 2ULL << 30,
PMD = _2MB,
PUD = _1GB,
};
#define PTE page_size
#define MAKE_TEST(source_align, destination_align, size, \
overlaps, should_fail, test_name) \
(struct test){ \
.name = test_name, \
.config = { \
.src_alignment = source_align, \
.dest_alignment = destination_align, \
.region_size = size, \
.overlapping = overlaps, \
}, \
.expect_failure = should_fail \
}
/*
* Returns false if the requested remap region overlaps with an
* existing mapping (e.g text, stack) else returns true.
*/
static bool is_remap_region_valid(void *addr, unsigned long long size)
{
void *remap_addr = NULL;
bool ret = true;
/* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
-1, 0);
if (remap_addr == MAP_FAILED) {
if (errno == EEXIST)
ret = false;
} else {
munmap(remap_addr, size);
}
return ret;
}
/* Returns mmap_min_addr sysctl tunable from procfs */
static unsigned long long get_mmap_min_addr(void)
{
FILE *fp;
int n_matched;
static unsigned long long addr;
if (addr)
return addr;
fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
if (fp == NULL) {
ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
strerror(errno));
exit(KSFT_SKIP);
}
n_matched = fscanf(fp, "%llu", &addr);
if (n_matched != 1) {
ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
strerror(errno));
fclose(fp);
exit(KSFT_SKIP);
}
fclose(fp);
return addr;
}
/*
* Using /proc/self/maps, assert that the specified address range is contained
* within a single mapping.
*/
static bool is_range_mapped(FILE *maps_fp, void *start, void *end)
{
char *line = NULL;
size_t len = 0;
bool success = false;
rewind(maps_fp);
while (getline(&line, &len, maps_fp) != -1) {
char *first = strtok(line, "- ");
void *first_val = (void *)strtol(first, NULL, 16);
char *second = strtok(NULL, "- ");
void *second_val = (void *) strtol(second, NULL, 16);
if (first_val <= start && second_val >= end) {
success = true;
break;
}
}
return success;
}
/*
* This test validates that merge is called when expanding a mapping.
* Mapping containing three pages is created, middle page is unmapped
* and then the mapping containing the first page is expanded so that
* it fills the created hole. The two parts should merge creating
* single mapping with three pages.
*/
static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size)
{
char *test_name = "mremap expand merge";
bool success = false;
char *remap, *start;
start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (start == MAP_FAILED) {
ksft_print_msg("mmap failed: %s\n", strerror(errno));
goto out;
}
munmap(start + page_size, page_size);
remap = mremap(start, page_size, 2 * page_size, 0);
if (remap == MAP_FAILED) {
ksft_print_msg("mremap failed: %s\n", strerror(errno));
munmap(start, page_size);
munmap(start + 2 * page_size, page_size);
goto out;
}
success = is_range_mapped(maps_fp, start, start + 3 * page_size);
munmap(start, 3 * page_size);
out:
if (success)
ksft_test_result_pass("%s\n", test_name);
else
ksft_test_result_fail("%s\n", test_name);
}
/*
* Similar to mremap_expand_merge() except instead of removing the middle page,
* we remove the last then attempt to remap offset from the second page. This
* should result in the mapping being restored to its former state.
*/
static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size)
{
char *test_name = "mremap expand merge offset";
bool success = false;
char *remap, *start;
start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (start == MAP_FAILED) {
ksft_print_msg("mmap failed: %s\n", strerror(errno));
goto out;
}
/* Unmap final page to ensure we have space to expand. */
munmap(start + 2 * page_size, page_size);
remap = mremap(start + page_size, page_size, 2 * page_size, 0);
if (remap == MAP_FAILED) {
ksft_print_msg("mremap failed: %s\n", strerror(errno));
munmap(start, 2 * page_size);
goto out;
}
success = is_range_mapped(maps_fp, start, start + 3 * page_size);
munmap(start, 3 * page_size);
out:
if (success)
ksft_test_result_pass("%s\n", test_name);
else
ksft_test_result_fail("%s\n", test_name);
}
/*
* Returns the start address of the mapping on success, else returns
* NULL on failure.
*/
static void *get_source_mapping(struct config c)
{
unsigned long long addr = 0ULL;
void *src_addr = NULL;
unsigned long long mmap_min_addr;
mmap_min_addr = get_mmap_min_addr();
retry:
addr += c.src_alignment;
if (addr < mmap_min_addr)
goto retry;
src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
-1, 0);
if (src_addr == MAP_FAILED) {
if (errno == EPERM || errno == EEXIST)
goto retry;
goto error;
}
/*
* Check that the address is aligned to the specified alignment.
* Addresses which have alignments that are multiples of that
* specified are not considered valid. For instance, 1GB address is
* 2MB-aligned, however it will not be considered valid for a
* requested alignment of 2MB. This is done to reduce coincidental
* alignment in the tests.
*/
if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
!((unsigned long long) src_addr & c.src_alignment)) {
munmap(src_addr, c.region_size);
goto retry;
}
if (!src_addr)
goto error;
return src_addr;
error:
ksft_print_msg("Failed to map source region: %s\n",
strerror(errno));
return NULL;
}
/* Returns the time taken for the remap on success else returns -1. */
static long long remap_region(struct config c, unsigned int threshold_mb,
char pattern_seed)
{
void *addr, *src_addr, *dest_addr;
unsigned long long i;
struct timespec t_start = {0, 0}, t_end = {0, 0};
long long start_ns, end_ns, align_mask, ret, offset;
unsigned long long threshold;
if (threshold_mb == VALIDATION_NO_THRESHOLD)
threshold = c.region_size;
else
threshold = MIN(threshold_mb * _1MB, c.region_size);
src_addr = get_source_mapping(c);
if (!src_addr) {
ret = -1;
goto out;
}
/* Set byte pattern */
srand(pattern_seed);
for (i = 0; i < threshold; i++)
memset((char *) src_addr + i, (char) rand(), 1);
/* Mask to zero out lower bits of address for alignment */
align_mask = ~(c.dest_alignment - 1);
/* Offset of destination address from the end of the source region */
offset = (c.overlapping) ? -c.dest_alignment : c.dest_alignment;
addr = (void *) (((unsigned long long) src_addr + c.region_size
+ offset) & align_mask);
/* See comment in get_source_mapping() */
if (!((unsigned long long) addr & c.dest_alignment))
addr = (void *) ((unsigned long long) addr | c.dest_alignment);
/* Don't destroy existing mappings unless expected to overlap */
while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
/* Check for unsigned overflow */
if (addr + c.dest_alignment < addr) {
ksft_print_msg("Couldn't find a valid region to remap to\n");
ret = -1;
goto out;
}
addr += c.dest_alignment;
}
clock_gettime(CLOCK_MONOTONIC, &t_start);
dest_addr = mremap(src_addr, c.region_size, c.region_size,
MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
clock_gettime(CLOCK_MONOTONIC, &t_end);
if (dest_addr == MAP_FAILED) {
ksft_print_msg("mremap failed: %s\n", strerror(errno));
ret = -1;
goto clean_up_src;
}
/* Verify byte pattern after remapping */
srand(pattern_seed);
for (i = 0; i < threshold; i++) {
char c = (char) rand();
if (((char *) dest_addr)[i] != c) {
ksft_print_msg("Data after remap doesn't match at offset %d\n",
i);
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
((char *) dest_addr)[i] & 0xff);
ret = -1;
goto clean_up_dest;
}
}
start_ns = t_start.tv_sec * NS_PER_SEC + t_start.tv_nsec;
end_ns = t_end.tv_sec * NS_PER_SEC + t_end.tv_nsec;
ret = end_ns - start_ns;
/*
* Since the destination address is specified using MREMAP_FIXED, subsequent
* mremap will unmap any previous mapping at the address range specified by
* dest_addr and region_size. This significantly affects the remap time of
* subsequent tests. So we clean up mappings after each test.
*/
clean_up_dest:
munmap(dest_addr, c.region_size);
clean_up_src:
munmap(src_addr, c.region_size);
out:
return ret;
}
static void run_mremap_test_case(struct test test_case, int *failures,
unsigned int threshold_mb,
unsigned int pattern_seed)
{
long long remap_time = remap_region(test_case.config, threshold_mb,
pattern_seed);
if (remap_time < 0) {
if (test_case.expect_failure)
ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
test_case.name);
else {
ksft_test_result_fail("%s\n", test_case.name);
*failures += 1;
}
} else {
/*
* Comparing mremap time is only applicable if entire region
* was faulted in.
*/
if (threshold_mb == VALIDATION_NO_THRESHOLD ||
test_case.config.region_size <= threshold_mb * _1MB)
ksft_test_result_pass("%s\n\tmremap time: %12lldns\n",
test_case.name, remap_time);
else
ksft_test_result_pass("%s\n", test_case.name);
}
}
static void usage(const char *cmd)
{
fprintf(stderr,
"Usage: %s [[-t <threshold_mb>] [-p <pattern_seed>]]\n"
"-t\t only validate threshold_mb of the remapped region\n"
" \t if 0 is supplied no threshold is used; all tests\n"
" \t are run and remapped regions validated fully.\n"
" \t The default threshold used is 4MB.\n"
"-p\t provide a seed to generate the random pattern for\n"
" \t validating the remapped region.\n", cmd);
}
static int parse_args(int argc, char **argv, unsigned int *threshold_mb,
unsigned int *pattern_seed)
{
const char *optstr = "t:p:";
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 't':
*threshold_mb = atoi(optarg);
break;
case 'p':
*pattern_seed = atoi(optarg);
break;
default:
usage(argv[0]);
return -1;
}
}
if (optind < argc) {
usage(argv[0]);
return -1;
}
return 0;
}
#define MAX_TEST 13
#define MAX_PERF_TEST 3
int main(int argc, char **argv)
{
int failures = 0;
int i, run_perf_tests;
unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
unsigned int pattern_seed;
int num_expand_tests = 2;
struct test test_cases[MAX_TEST];
struct test perf_test_cases[MAX_PERF_TEST];
int page_size;
time_t t;
FILE *maps_fp;
pattern_seed = (unsigned int) time(&t);
if (parse_args(argc, argv, &threshold_mb, &pattern_seed) < 0)
exit(EXIT_FAILURE);
ksft_print_msg("Test configs:\n\tthreshold_mb=%u\n\tpattern_seed=%u\n\n",
threshold_mb, pattern_seed);
page_size = sysconf(_SC_PAGESIZE);
/* Expected mremap failures */
test_cases[0] = MAKE_TEST(page_size, page_size, page_size,
OVERLAPPING, EXPECT_FAILURE,
"mremap - Source and Destination Regions Overlapping");
test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size,
NON_OVERLAPPING, EXPECT_FAILURE,
"mremap - Destination Address Misaligned (1KB-aligned)");
test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size,
NON_OVERLAPPING, EXPECT_FAILURE,
"mremap - Source Address Misaligned (1KB-aligned)");
/* Src addr PTE aligned */
test_cases[3] = MAKE_TEST(PTE, PTE, PTE * 2,
NON_OVERLAPPING, EXPECT_SUCCESS,
"8KB mremap - Source PTE-aligned, Destination PTE-aligned");
/* Src addr 1MB aligned */
test_cases[4] = MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2MB mremap - Source 1MB-aligned, Destination PTE-aligned");
test_cases[5] = MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
/* Src addr PMD aligned */
test_cases[6] = MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
"4MB mremap - Source PMD-aligned, Destination PTE-aligned");
test_cases[7] = MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
"4MB mremap - Source PMD-aligned, Destination 1MB-aligned");
test_cases[8] = MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
"4MB mremap - Source PMD-aligned, Destination PMD-aligned");
/* Src addr PUD aligned */
test_cases[9] = MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2GB mremap - Source PUD-aligned, Destination PTE-aligned");
test_cases[10] = MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2GB mremap - Source PUD-aligned, Destination 1MB-aligned");
test_cases[11] = MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2GB mremap - Source PUD-aligned, Destination PMD-aligned");
test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"2GB mremap - Source PUD-aligned, Destination PUD-aligned");
perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"1GB mremap - Source PTE-aligned, Destination PTE-aligned");
/*
* mremap 1GB region - Page table level aligned time
* comparison.
*/
perf_test_cases[1] = MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"1GB mremap - Source PMD-aligned, Destination PMD-aligned");
perf_test_cases[2] = MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
"1GB mremap - Source PUD-aligned, Destination PUD-aligned");
run_perf_tests = (threshold_mb == VALIDATION_NO_THRESHOLD) ||
(threshold_mb * _1MB >= _1GB);
ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ?
ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests);
for (i = 0; i < ARRAY_SIZE(test_cases); i++)
run_mremap_test_case(test_cases[i], &failures, threshold_mb,
pattern_seed);
maps_fp = fopen("/proc/self/maps", "r");
if (maps_fp == NULL) {
ksft_print_msg("Failed to read /proc/self/maps: %s\n", strerror(errno));
exit(KSFT_FAIL);
}
mremap_expand_merge(maps_fp, page_size);
mremap_expand_merge_offset(maps_fp, page_size);
fclose(maps_fp);
if (run_perf_tests) {
ksft_print_msg("\n%s\n",
"mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:");
for (i = 0; i < ARRAY_SIZE(perf_test_cases); i++)
run_mremap_test_case(perf_test_cases[i], &failures,
threshold_mb, pattern_seed);
}
if (failures > 0)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/mremap_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hugepage-madvise:
*
* Basic functional testing of madvise MADV_DONTNEED and MADV_REMOVE
* on hugetlb mappings.
*
* Before running this test, make sure the administrator has pre-allocated
* at least MIN_FREE_PAGES hugetlb pages and they are free. In addition,
* the test takes an argument that is the path to a file in a hugetlbfs
* filesystem. Therefore, a hugetlbfs filesystem must be mounted on some
* directory.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"
#define MIN_FREE_PAGES 20
#define NR_HUGE_PAGES 10 /* common number of pages to map/allocate */
#define validate_free_pages(exp_free) \
do { \
int fhp = get_free_hugepages(); \
if (fhp != (exp_free)) { \
printf("Unexpected number of free huge " \
"pages line %d\n", __LINE__); \
exit(1); \
} \
} while (0)
unsigned long huge_page_size;
unsigned long base_page_size;
unsigned long get_free_hugepages(void)
{
unsigned long fhp = 0;
char *line = NULL;
size_t linelen = 0;
FILE *f = fopen("/proc/meminfo", "r");
if (!f)
return fhp;
while (getline(&line, &linelen, f) > 0) {
if (sscanf(line, "HugePages_Free: %lu", &fhp) == 1)
break;
}
free(line);
fclose(f);
return fhp;
}
void write_fault_pages(void *addr, unsigned long nr_pages)
{
unsigned long i;
for (i = 0; i < nr_pages; i++)
*((unsigned long *)(addr + (i * huge_page_size))) = i;
}
void read_fault_pages(void *addr, unsigned long nr_pages)
{
volatile unsigned long dummy = 0;
unsigned long i;
for (i = 0; i < nr_pages; i++) {
dummy += *((unsigned long *)(addr + (i * huge_page_size)));
/* Prevent the compiler from optimizing out the entire loop: */
asm volatile("" : "+r" (dummy));
}
}
int main(int argc, char **argv)
{
unsigned long free_hugepages;
void *addr, *addr2;
int fd;
int ret;
huge_page_size = default_huge_page_size();
if (!huge_page_size) {
printf("Unable to determine huge page size, exiting!\n");
exit(1);
}
base_page_size = sysconf(_SC_PAGE_SIZE);
if (!huge_page_size) {
printf("Unable to determine base page size, exiting!\n");
exit(1);
}
free_hugepages = get_free_hugepages();
if (free_hugepages < MIN_FREE_PAGES) {
printf("Not enough free huge pages to test, exiting!\n");
exit(1);
}
fd = memfd_create(argv[0], MFD_HUGETLB);
if (fd < 0) {
perror("memfd_create() failed");
exit(1);
}
/*
* Test validity of MADV_DONTNEED addr and length arguments. mmap
* size is NR_HUGE_PAGES + 2. One page at the beginning and end of
* the mapping will be unmapped so we KNOW there is nothing mapped
* there.
*/
addr = mmap(NULL, (NR_HUGE_PAGES + 2) * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
if (munmap(addr, huge_page_size) ||
munmap(addr + (NR_HUGE_PAGES + 1) * huge_page_size,
huge_page_size)) {
perror("munmap");
exit(1);
}
addr = addr + huge_page_size;
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* addr before mapping should fail */
ret = madvise(addr - base_page_size, NR_HUGE_PAGES * huge_page_size,
MADV_DONTNEED);
if (!ret) {
printf("Unexpected success of madvise call with invalid addr line %d\n",
__LINE__);
exit(1);
}
/* addr + length after mapping should fail */
ret = madvise(addr, (NR_HUGE_PAGES * huge_page_size) + base_page_size,
MADV_DONTNEED);
if (!ret) {
printf("Unexpected success of madvise call with invalid length line %d\n",
__LINE__);
exit(1);
}
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
/*
* Test alignment of MADV_DONTNEED addr and length arguments
*/
addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* addr is not huge page size aligned and should fail */
ret = madvise(addr + base_page_size,
NR_HUGE_PAGES * huge_page_size - base_page_size,
MADV_DONTNEED);
if (!ret) {
printf("Unexpected success of madvise call with unaligned start address %d\n",
__LINE__);
exit(1);
}
/* addr + length should be aligned down to huge page size */
if (madvise(addr,
((NR_HUGE_PAGES - 1) * huge_page_size) + base_page_size,
MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
/* should free all but last page in mapping */
validate_free_pages(free_hugepages - 1);
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
validate_free_pages(free_hugepages);
/*
* Test MADV_DONTNEED on anonymous private mapping
*/
addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
/* should free all pages in mapping */
validate_free_pages(free_hugepages);
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
/*
* Test MADV_DONTNEED on private mapping of hugetlb file
*/
if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
perror("fallocate");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE, fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* read should not consume any pages */
read_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* madvise should not free any pages */
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* writes should allocate private pages */
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
/* madvise should free private pages */
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* writes should allocate private pages */
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
/*
* The fallocate below certainly should free the pages associated
* with the file. However, pages in the private mapping are also
* freed. This is not the 'correct' behavior, but is expected
* because this is how it has worked since the initial hugetlb
* implementation.
*/
if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0, NR_HUGE_PAGES * huge_page_size)) {
perror("fallocate");
exit(1);
}
validate_free_pages(free_hugepages);
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
/*
* Test MADV_DONTNEED on shared mapping of hugetlb file
*/
if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
perror("fallocate");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* write should not consume any pages */
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* madvise should not free any pages */
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/*
* Test MADV_REMOVE on shared mapping of hugetlb file
*
* madvise is same as hole punch and should free all pages.
*/
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages);
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
/*
* Test MADV_REMOVE on shared and private mapping of hugetlb file
*/
if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
perror("fallocate");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* shared write should not consume any additional pages */
write_fault_pages(addr, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
addr2 = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE, fd, 0);
if (addr2 == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* private read should not consume any pages */
read_fault_pages(addr2, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* private write should consume additional pages */
write_fault_pages(addr2, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
/* madvise of shared mapping should not free any pages */
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
/* madvise of private mapping should free private pages */
if (madvise(addr2, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages - NR_HUGE_PAGES);
/* private write should consume additional pages again */
write_fault_pages(addr2, NR_HUGE_PAGES);
validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
/*
* madvise should free both file and private pages although this is
* not correct. private pages should not be freed, but this is
* expected. See comment associated with FALLOC_FL_PUNCH_HOLE call.
*/
if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
perror("madvise");
exit(1);
}
validate_free_pages(free_hugepages);
(void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
(void)munmap(addr2, NR_HUGE_PAGES * huge_page_size);
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/mm/hugetlb-madvise.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hugepage-mmap:
*
* Example of using huge page memory in a user application using the mmap
* system call. Before running this application, make sure that the
* administrator has mounted the hugetlbfs filesystem (on some directory
* like /mnt) using the command mount -t hugetlbfs nodev /mnt. In this
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
* huge pages. That means that if one requires a fixed address, a huge page
* aligned address starting with 0x800000... will be required. If a fixed
* address is not required, the kernel will select an address in the proper
* range.
* Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
/* Only ia64 requires this */
#ifdef __ia64__
#define ADDR (void *)(0x8000000000000000UL)
#define FLAGS (MAP_SHARED | MAP_FIXED)
#else
#define ADDR (void *)(0x0UL)
#define FLAGS (MAP_SHARED)
#endif
static void check_bytes(char *addr)
{
printf("First hex is %x\n", *((unsigned int *)addr));
}
static void write_bytes(char *addr)
{
unsigned long i;
for (i = 0; i < LENGTH; i++)
*(addr + i) = (char)i;
}
static int read_bytes(char *addr)
{
unsigned long i;
check_bytes(addr);
for (i = 0; i < LENGTH; i++)
if (*(addr + i) != (char)i) {
printf("Mismatch at %lu\n", i);
return 1;
}
return 0;
}
int main(void)
{
void *addr;
int fd, ret;
fd = memfd_create("hugepage-mmap", MFD_HUGETLB);
if (fd < 0) {
perror("memfd_create() failed");
exit(1);
}
addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
close(fd);
exit(1);
}
printf("Returned address is %p\n", addr);
check_bytes(addr);
write_bytes(addr);
ret = read_bytes(addr);
munmap(addr, LENGTH);
close(fd);
return ret;
}
| linux-master | tools/testing/selftests/mm/hugepage-mmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests for mremap w/ MREMAP_DONTUNMAP.
*
* Copyright 2020, Brian Geffon <[email protected]>
*/
#define _GNU_SOURCE
#include <sys/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "../kselftest.h"
unsigned long page_size;
char *page_buffer;
static void dump_maps(void)
{
char cmd[32];
snprintf(cmd, sizeof(cmd), "cat /proc/%d/maps", getpid());
system(cmd);
}
#define BUG_ON(condition, description) \
do { \
if (condition) { \
fprintf(stderr, "[FAIL]\t%s():%d\t%s:%s\n", __func__, \
__LINE__, (description), strerror(errno)); \
dump_maps(); \
exit(1); \
} \
} while (0)
// Try a simple operation for to "test" for kernel support this prevents
// reporting tests as failed when it's run on an older kernel.
static int kernel_support_for_mremap_dontunmap()
{
int ret = 0;
unsigned long num_pages = 1;
void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
// This simple remap should only fail if MREMAP_DONTUNMAP isn't
// supported.
void *dest_mapping =
mremap(source_mapping, num_pages * page_size, num_pages * page_size,
MREMAP_DONTUNMAP | MREMAP_MAYMOVE, 0);
if (dest_mapping == MAP_FAILED) {
ret = errno;
} else {
BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
"unable to unmap destination mapping");
}
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
return ret;
}
// This helper will just validate that an entire mapping contains the expected
// byte.
static int check_region_contains_byte(void *addr, unsigned long size, char byte)
{
BUG_ON(size & (page_size - 1),
"check_region_contains_byte expects page multiples");
BUG_ON((unsigned long)addr & (page_size - 1),
"check_region_contains_byte expects page alignment");
memset(page_buffer, byte, page_size);
unsigned long num_pages = size / page_size;
unsigned long i;
// Compare each page checking that it contains our expected byte.
for (i = 0; i < num_pages; ++i) {
int ret =
memcmp(addr + (i * page_size), page_buffer, page_size);
if (ret) {
return ret;
}
}
return 0;
}
// this test validates that MREMAP_DONTUNMAP moves the pagetables while leaving
// the source mapping mapped.
static void mremap_dontunmap_simple()
{
unsigned long num_pages = 5;
void *source_mapping =
mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
memset(source_mapping, 'a', num_pages * page_size);
// Try to just move the whole mapping anywhere (not fixed).
void *dest_mapping =
mremap(source_mapping, num_pages * page_size, num_pages * page_size,
MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
BUG_ON(dest_mapping == MAP_FAILED, "mremap");
// Validate that the pages have been moved, we know they were moved if
// the dest_mapping contains a's.
BUG_ON(check_region_contains_byte
(dest_mapping, num_pages * page_size, 'a') != 0,
"pages did not migrate");
BUG_ON(check_region_contains_byte
(source_mapping, num_pages * page_size, 0) != 0,
"source should have no ptes");
BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
"unable to unmap destination mapping");
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
}
// This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected.
static void mremap_dontunmap_simple_shmem()
{
unsigned long num_pages = 5;
int mem_fd = memfd_create("memfd", MFD_CLOEXEC);
BUG_ON(mem_fd < 0, "memfd_create");
BUG_ON(ftruncate(mem_fd, num_pages * page_size) < 0,
"ftruncate");
void *source_mapping =
mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
MAP_FILE | MAP_SHARED, mem_fd, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
BUG_ON(close(mem_fd) < 0, "close");
memset(source_mapping, 'a', num_pages * page_size);
// Try to just move the whole mapping anywhere (not fixed).
void *dest_mapping =
mremap(source_mapping, num_pages * page_size, num_pages * page_size,
MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
if (dest_mapping == MAP_FAILED && errno == EINVAL) {
// Old kernel which doesn't support MREMAP_DONTUNMAP on shmem.
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
return;
}
BUG_ON(dest_mapping == MAP_FAILED, "mremap");
// Validate that the pages have been moved, we know they were moved if
// the dest_mapping contains a's.
BUG_ON(check_region_contains_byte
(dest_mapping, num_pages * page_size, 'a') != 0,
"pages did not migrate");
// Because the region is backed by shmem, we will actually see the same
// memory at the source location still.
BUG_ON(check_region_contains_byte
(source_mapping, num_pages * page_size, 'a') != 0,
"source should have no ptes");
BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
"unable to unmap destination mapping");
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
}
// This test validates MREMAP_DONTUNMAP will move page tables to a specific
// destination using MREMAP_FIXED, also while validating that the source
// remains intact.
static void mremap_dontunmap_simple_fixed()
{
unsigned long num_pages = 5;
// Since we want to guarantee that we can remap to a point, we will
// create a mapping up front.
void *dest_mapping =
mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(dest_mapping == MAP_FAILED, "mmap");
memset(dest_mapping, 'X', num_pages * page_size);
void *source_mapping =
mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
memset(source_mapping, 'a', num_pages * page_size);
void *remapped_mapping =
mremap(source_mapping, num_pages * page_size, num_pages * page_size,
MREMAP_FIXED | MREMAP_DONTUNMAP | MREMAP_MAYMOVE,
dest_mapping);
BUG_ON(remapped_mapping == MAP_FAILED, "mremap");
BUG_ON(remapped_mapping != dest_mapping,
"mremap should have placed the remapped mapping at dest_mapping");
// The dest mapping will have been unmap by mremap so we expect the Xs
// to be gone and replaced with a's.
BUG_ON(check_region_contains_byte
(dest_mapping, num_pages * page_size, 'a') != 0,
"pages did not migrate");
// And the source mapping will have had its ptes dropped.
BUG_ON(check_region_contains_byte
(source_mapping, num_pages * page_size, 0) != 0,
"source should have no ptes");
BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
"unable to unmap destination mapping");
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
}
// This test validates that we can MREMAP_DONTUNMAP for a portion of an
// existing mapping.
static void mremap_dontunmap_partial_mapping()
{
/*
* source mapping:
* --------------
* | aaaaaaaaaa |
* --------------
* to become:
* --------------
* | aaaaa00000 |
* --------------
* With the destination mapping containing 5 pages of As.
* ---------
* | aaaaa |
* ---------
*/
unsigned long num_pages = 10;
void *source_mapping =
mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
memset(source_mapping, 'a', num_pages * page_size);
// We will grab the last 5 pages of the source and move them.
void *dest_mapping =
mremap(source_mapping + (5 * page_size), 5 * page_size,
5 * page_size,
MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
BUG_ON(dest_mapping == MAP_FAILED, "mremap");
// We expect the first 5 pages of the source to contain a's and the
// final 5 pages to contain zeros.
BUG_ON(check_region_contains_byte(source_mapping, 5 * page_size, 'a') !=
0, "first 5 pages of source should have original pages");
BUG_ON(check_region_contains_byte
(source_mapping + (5 * page_size), 5 * page_size, 0) != 0,
"final 5 pages of source should have no ptes");
// Finally we expect the destination to have 5 pages worth of a's.
BUG_ON(check_region_contains_byte(dest_mapping, 5 * page_size, 'a') !=
0, "dest mapping should contain ptes from the source");
BUG_ON(munmap(dest_mapping, 5 * page_size) == -1,
"unable to unmap destination mapping");
BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
"unable to unmap source mapping");
}
// This test validates that we can remap over only a portion of a mapping.
static void mremap_dontunmap_partial_mapping_overwrite(void)
{
/*
* source mapping:
* ---------
* |aaaaa|
* ---------
* dest mapping initially:
* -----------
* |XXXXXXXXXX|
* ------------
* Source to become:
* ---------
* |00000|
* ---------
* With the destination mapping containing 5 pages of As.
* ------------
* |aaaaaXXXXX|
* ------------
*/
void *source_mapping =
mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(source_mapping == MAP_FAILED, "mmap");
memset(source_mapping, 'a', 5 * page_size);
void *dest_mapping =
mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(dest_mapping == MAP_FAILED, "mmap");
memset(dest_mapping, 'X', 10 * page_size);
// We will grab the last 5 pages of the source and move them.
void *remapped_mapping =
mremap(source_mapping, 5 * page_size,
5 * page_size,
MREMAP_DONTUNMAP | MREMAP_MAYMOVE | MREMAP_FIXED, dest_mapping);
BUG_ON(dest_mapping == MAP_FAILED, "mremap");
BUG_ON(dest_mapping != remapped_mapping, "expected to remap to dest_mapping");
BUG_ON(check_region_contains_byte(source_mapping, 5 * page_size, 0) !=
0, "first 5 pages of source should have no ptes");
// Finally we expect the destination to have 5 pages worth of a's.
BUG_ON(check_region_contains_byte(dest_mapping, 5 * page_size, 'a') != 0,
"dest mapping should contain ptes from the source");
// Finally the last 5 pages shouldn't have been touched.
BUG_ON(check_region_contains_byte(dest_mapping + (5 * page_size),
5 * page_size, 'X') != 0,
"dest mapping should have retained the last 5 pages");
BUG_ON(munmap(dest_mapping, 10 * page_size) == -1,
"unable to unmap destination mapping");
BUG_ON(munmap(source_mapping, 5 * page_size) == -1,
"unable to unmap source mapping");
}
int main(void)
{
page_size = sysconf(_SC_PAGE_SIZE);
// test for kernel support for MREMAP_DONTUNMAP skipping the test if
// not.
if (kernel_support_for_mremap_dontunmap() != 0) {
printf("No kernel support for MREMAP_DONTUNMAP\n");
return KSFT_SKIP;
}
// Keep a page sized buffer around for when we need it.
page_buffer =
mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
BUG_ON(page_buffer == MAP_FAILED, "unable to mmap a page.");
mremap_dontunmap_simple();
mremap_dontunmap_simple_shmem();
mremap_dontunmap_simple_fixed();
mremap_dontunmap_partial_mapping();
mremap_dontunmap_partial_mapping_overwrite();
BUG_ON(munmap(page_buffer, page_size) == -1,
"unable to unmap page buffer");
printf("OK\n");
return 0;
}
| linux-master | tools/testing/selftests/mm/mremap_dontunmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program reserves and uses hugetlb memory, supporting a bunch of
* scenarios needed by the charged_reserved_hugetlb.sh test.
*/
#include <err.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <sys/mman.h>
/* Global definitions. */
enum method {
HUGETLBFS,
MMAP_MAP_HUGETLB,
SHM,
MAX_METHOD
};
/* Global variables. */
static const char *self;
static char *shmaddr;
static int shmid;
/*
* Show usage and exit.
*/
static void exit_usage(void)
{
printf("Usage: %s -p <path to hugetlbfs file> -s <size to map> "
"[-m <0=hugetlbfs | 1=mmap(MAP_HUGETLB)>] [-l] [-r] "
"[-o] [-w] [-n]\n",
self);
exit(EXIT_FAILURE);
}
void sig_handler(int signo)
{
printf("Received %d.\n", signo);
if (signo == SIGINT) {
printf("Deleting the memory\n");
if (shmdt((const void *)shmaddr) != 0) {
perror("Detach failure");
shmctl(shmid, IPC_RMID, NULL);
exit(4);
}
shmctl(shmid, IPC_RMID, NULL);
printf("Done deleting the memory\n");
}
exit(2);
}
int main(int argc, char **argv)
{
int fd = 0;
int key = 0;
int *ptr = NULL;
int c = 0;
int size = 0;
char path[256] = "";
enum method method = MAX_METHOD;
int want_sleep = 0, private = 0;
int populate = 0;
int write = 0;
int reserve = 1;
if (signal(SIGINT, sig_handler) == SIG_ERR)
err(1, "\ncan't catch SIGINT\n");
/* Parse command-line arguments. */
setvbuf(stdout, NULL, _IONBF, 0);
self = argv[0];
while ((c = getopt(argc, argv, "s:p:m:owlrn")) != -1) {
switch (c) {
case 's':
size = atoi(optarg);
break;
case 'p':
strncpy(path, optarg, sizeof(path));
break;
case 'm':
if (atoi(optarg) >= MAX_METHOD) {
errno = EINVAL;
perror("Invalid -m.");
exit_usage();
}
method = atoi(optarg);
break;
case 'o':
populate = 1;
break;
case 'w':
write = 1;
break;
case 'l':
want_sleep = 1;
break;
case 'r':
private
= 1;
break;
case 'n':
reserve = 0;
break;
default:
errno = EINVAL;
perror("Invalid arg");
exit_usage();
}
}
if (strncmp(path, "", sizeof(path)) != 0) {
printf("Writing to this path: %s\n", path);
} else {
errno = EINVAL;
perror("path not found");
exit_usage();
}
if (size != 0) {
printf("Writing this size: %d\n", size);
} else {
errno = EINVAL;
perror("size not found");
exit_usage();
}
if (!populate)
printf("Not populating.\n");
else
printf("Populating.\n");
if (!write)
printf("Not writing to memory.\n");
if (method == MAX_METHOD) {
errno = EINVAL;
perror("-m Invalid");
exit_usage();
} else
printf("Using method=%d\n", method);
if (!private)
printf("Shared mapping.\n");
else
printf("Private mapping.\n");
if (!reserve)
printf("NO_RESERVE mapping.\n");
else
printf("RESERVE mapping.\n");
switch (method) {
case HUGETLBFS:
printf("Allocating using HUGETLBFS.\n");
fd = open(path, O_CREAT | O_RDWR, 0777);
if (fd == -1)
err(1, "Failed to open file.");
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
(private ? MAP_PRIVATE : MAP_SHARED) |
(populate ? MAP_POPULATE : 0) |
(reserve ? 0 : MAP_NORESERVE),
fd, 0);
if (ptr == MAP_FAILED) {
close(fd);
err(1, "Error mapping the file");
}
break;
case MMAP_MAP_HUGETLB:
printf("Allocating using MAP_HUGETLB.\n");
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
(private ? (MAP_PRIVATE | MAP_ANONYMOUS) :
MAP_SHARED) |
MAP_HUGETLB | (populate ? MAP_POPULATE : 0) |
(reserve ? 0 : MAP_NORESERVE),
-1, 0);
if (ptr == MAP_FAILED)
err(1, "mmap");
printf("Returned address is %p\n", ptr);
break;
case SHM:
printf("Allocating using SHM.\n");
shmid = shmget(key, size,
SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
if (shmid < 0) {
shmid = shmget(++key, size,
SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
if (shmid < 0)
err(1, "shmget");
}
printf("shmid: 0x%x, shmget key:%d\n", shmid, key);
ptr = shmat(shmid, NULL, 0);
if (ptr == (int *)-1) {
perror("Shared memory attach failure");
shmctl(shmid, IPC_RMID, NULL);
exit(2);
}
printf("shmaddr: %p\n", ptr);
break;
default:
errno = EINVAL;
err(1, "Invalid method.");
}
if (write) {
printf("Writing to memory.\n");
memset(ptr, 1, size);
}
if (want_sleep) {
/* Signal to caller that we're done. */
printf("DONE\n");
/* Hold memory until external kill signal is delivered. */
while (1)
sleep(100);
}
if (method == HUGETLBFS)
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/mm/write_to_hugetlbfs.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sys/mman.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <stdbool.h>
#include "mlock2.h"
#include "../kselftest.h"
struct vm_boundaries {
unsigned long start;
unsigned long end;
};
static int get_vm_area(unsigned long addr, struct vm_boundaries *area)
{
FILE *file;
int ret = 1;
char line[1024] = {0};
char *end_addr;
char *stop;
unsigned long start;
unsigned long end;
if (!area)
return ret;
file = fopen("/proc/self/maps", "r");
if (!file) {
perror("fopen");
return ret;
}
memset(area, 0, sizeof(struct vm_boundaries));
while(fgets(line, 1024, file)) {
end_addr = strchr(line, '-');
if (!end_addr) {
printf("cannot parse /proc/self/maps\n");
goto out;
}
*end_addr = '\0';
end_addr++;
stop = strchr(end_addr, ' ');
if (!stop) {
printf("cannot parse /proc/self/maps\n");
goto out;
}
sscanf(line, "%lx", &start);
sscanf(end_addr, "%lx", &end);
if (start <= addr && end > addr) {
area->start = start;
area->end = end;
ret = 0;
goto out;
}
}
out:
fclose(file);
return ret;
}
#define VMFLAGS "VmFlags:"
static bool is_vmflag_set(unsigned long addr, const char *vmflag)
{
char *line = NULL;
char *flags;
size_t size = 0;
bool ret = false;
FILE *smaps;
smaps = seek_to_smaps_entry(addr);
if (!smaps) {
printf("Unable to parse /proc/self/smaps\n");
goto out;
}
while (getline(&line, &size, smaps) > 0) {
if (!strstr(line, VMFLAGS)) {
free(line);
line = NULL;
size = 0;
continue;
}
flags = line + strlen(VMFLAGS);
ret = (strstr(flags, vmflag) != NULL);
goto out;
}
out:
free(line);
fclose(smaps);
return ret;
}
#define SIZE "Size:"
#define RSS "Rss:"
#define LOCKED "lo"
static unsigned long get_value_for_name(unsigned long addr, const char *name)
{
char *line = NULL;
size_t size = 0;
char *value_ptr;
FILE *smaps = NULL;
unsigned long value = -1UL;
smaps = seek_to_smaps_entry(addr);
if (!smaps) {
printf("Unable to parse /proc/self/smaps\n");
goto out;
}
while (getline(&line, &size, smaps) > 0) {
if (!strstr(line, name)) {
free(line);
line = NULL;
size = 0;
continue;
}
value_ptr = line + strlen(name);
if (sscanf(value_ptr, "%lu kB", &value) < 1) {
printf("Unable to parse smaps entry for Size\n");
goto out;
}
break;
}
out:
if (smaps)
fclose(smaps);
free(line);
return value;
}
static bool is_vma_lock_on_fault(unsigned long addr)
{
bool locked;
unsigned long vma_size, vma_rss;
locked = is_vmflag_set(addr, LOCKED);
if (!locked)
return false;
vma_size = get_value_for_name(addr, SIZE);
vma_rss = get_value_for_name(addr, RSS);
/* only one page is faulted in */
return (vma_rss < vma_size);
}
#define PRESENT_BIT 0x8000000000000000ULL
#define PFN_MASK 0x007FFFFFFFFFFFFFULL
#define UNEVICTABLE_BIT (1UL << 18)
static int lock_check(unsigned long addr)
{
bool locked;
unsigned long vma_size, vma_rss;
locked = is_vmflag_set(addr, LOCKED);
if (!locked)
return false;
vma_size = get_value_for_name(addr, SIZE);
vma_rss = get_value_for_name(addr, RSS);
return (vma_rss == vma_size);
}
static int unlock_lock_check(char *map)
{
if (is_vmflag_set((unsigned long)map, LOCKED)) {
printf("VMA flag %s is present on page 1 after unlock\n", LOCKED);
return 1;
}
return 0;
}
static int test_mlock_lock()
{
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
}
if (mlock2_(map, 2 * page_size, 0)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
_exit(KSFT_SKIP);
}
perror("mlock2(0)");
goto unmap;
}
if (!lock_check((unsigned long)map))
goto unmap;
/* Now unlock and recheck attributes */
if (munlock(map, 2 * page_size)) {
perror("munlock()");
goto unmap;
}
ret = unlock_lock_check(map);
unmap:
munmap(map, 2 * page_size);
out:
return ret;
}
static int onfault_check(char *map)
{
*map = 'a';
if (!is_vma_lock_on_fault((unsigned long)map)) {
printf("VMA is not marked for lock on fault\n");
return 1;
}
return 0;
}
static int unlock_onfault_check(char *map)
{
unsigned long page_size = getpagesize();
if (is_vma_lock_on_fault((unsigned long)map) ||
is_vma_lock_on_fault((unsigned long)map + page_size)) {
printf("VMA is still lock on fault after unlock\n");
return 1;
}
return 0;
}
static int test_mlock_onfault()
{
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
}
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
_exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
}
if (onfault_check(map))
goto unmap;
/* Now unlock and recheck attributes */
if (munlock(map, 2 * page_size)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
_exit(KSFT_SKIP);
}
perror("munlock()");
goto unmap;
}
ret = unlock_onfault_check(map);
unmap:
munmap(map, 2 * page_size);
out:
return ret;
}
static int test_lock_onfault_of_present()
{
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
}
*map = 'a';
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
_exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
}
if (!is_vma_lock_on_fault((unsigned long)map) ||
!is_vma_lock_on_fault((unsigned long)map + page_size)) {
printf("VMA with present pages is not marked lock on fault\n");
goto unmap;
}
ret = 0;
unmap:
munmap(map, 2 * page_size);
out:
return ret;
}
static int test_munlockall()
{
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_munlockall mmap");
goto out;
}
if (mlockall(MCL_CURRENT)) {
perror("mlockall(MCL_CURRENT)");
goto out;
}
if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
perror("munlockall()");
goto unmap;
}
if (unlock_lock_check(map))
goto unmap;
munmap(map, 2 * page_size);
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_munlockall second mmap");
goto out;
}
if (mlockall(MCL_CURRENT | MCL_ONFAULT)) {
perror("mlockall(MCL_CURRENT | MCL_ONFAULT)");
goto unmap;
}
if (onfault_check(map))
goto unmap;
if (munlockall()) {
perror("munlockall()");
goto unmap;
}
if (unlock_onfault_check(map))
goto unmap;
if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
perror("mlockall(MCL_CURRENT | MCL_FUTURE)");
goto out;
}
if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
perror("munlockall()");
goto unmap;
}
ret = unlock_lock_check(map);
unmap:
munmap(map, 2 * page_size);
out:
munlockall();
return ret;
}
static int test_vma_management(bool call_mlock)
{
int ret = 1;
void *map;
unsigned long page_size = getpagesize();
struct vm_boundaries page1;
struct vm_boundaries page2;
struct vm_boundaries page3;
map = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("mmap()");
return ret;
}
if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
_exit(KSFT_SKIP);
}
perror("mlock(ONFAULT)\n");
goto out;
}
if (get_vm_area((unsigned long)map, &page1) ||
get_vm_area((unsigned long)map + page_size, &page2) ||
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
printf("couldn't find mapping in /proc/self/maps\n");
goto out;
}
/*
* Before we unlock a portion, we need to that all three pages are in
* the same VMA. If they are not we abort this test (Note that this is
* not a failure)
*/
if (page1.start != page2.start || page2.start != page3.start) {
printf("VMAs are not merged to start, aborting test\n");
ret = 0;
goto out;
}
if (munlock(map + page_size, page_size)) {
perror("munlock()");
goto out;
}
if (get_vm_area((unsigned long)map, &page1) ||
get_vm_area((unsigned long)map + page_size, &page2) ||
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
printf("couldn't find mapping in /proc/self/maps\n");
goto out;
}
/* All three VMAs should be different */
if (page1.start == page2.start || page2.start == page3.start) {
printf("failed to split VMA for munlock\n");
goto out;
}
/* Now unlock the first and third page and check the VMAs again */
if (munlock(map, page_size * 3)) {
perror("munlock()");
goto out;
}
if (get_vm_area((unsigned long)map, &page1) ||
get_vm_area((unsigned long)map + page_size, &page2) ||
get_vm_area((unsigned long)map + page_size * 2, &page3)) {
printf("couldn't find mapping in /proc/self/maps\n");
goto out;
}
/* Now all three VMAs should be the same */
if (page1.start != page2.start || page2.start != page3.start) {
printf("failed to merge VMAs after munlock\n");
goto out;
}
ret = 0;
out:
munmap(map, 3 * page_size);
return ret;
}
static int test_mlockall(int (test_function)(bool call_mlock))
{
int ret = 1;
if (mlockall(MCL_CURRENT | MCL_ONFAULT | MCL_FUTURE)) {
perror("mlockall");
return ret;
}
ret = test_function(false);
munlockall();
return ret;
}
int main(int argc, char **argv)
{
int ret = 0;
ret += test_mlock_lock();
ret += test_mlock_onfault();
ret += test_munlockall();
ret += test_lock_onfault_of_present();
ret += test_vma_management(true);
ret += test_mlockall(test_vma_management);
return ret;
}
| linux-master | tools/testing/selftests/mm/mlock2-tests.c |
/*
* Stress test for transparent huge pages, memory compaction and migration.
*
* Authors: Konstantin Khlebnikov <[email protected]>
*
* This is free and unencumbered software released into the public domain.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <err.h>
#include <time.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <sys/mman.h>
#include "vm_util.h"
int backing_fd = -1;
int mmap_flags = MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE;
#define PROT_RW (PROT_READ | PROT_WRITE)
int main(int argc, char **argv)
{
size_t ram, len;
void *ptr, *p;
struct timespec start, a, b;
int i = 0;
char *name = NULL;
double s;
uint8_t *map;
size_t map_len;
int pagemap_fd;
int duration = 0;
ram = sysconf(_SC_PHYS_PAGES);
if (ram > SIZE_MAX / psize() / 4)
ram = SIZE_MAX / 4;
else
ram *= psize();
len = ram;
while (++i < argc) {
if (!strcmp(argv[i], "-h"))
errx(1, "usage: %s [-f <filename>] [-d <duration>] [size in MiB]", argv[0]);
else if (!strcmp(argv[i], "-f"))
name = argv[++i];
else if (!strcmp(argv[i], "-d"))
duration = atoi(argv[++i]);
else
len = atoll(argv[i]) << 20;
}
if (name) {
backing_fd = open(name, O_RDWR);
if (backing_fd == -1)
errx(2, "open %s", name);
mmap_flags = MAP_SHARED;
}
warnx("allocate %zd transhuge pages, using %zd MiB virtual memory"
" and %zd MiB of ram", len >> HPAGE_SHIFT, len >> 20,
ram >> (20 + HPAGE_SHIFT - pshift() - 1));
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
err(2, "open pagemap");
len -= len % HPAGE_SIZE;
ptr = mmap(NULL, len + HPAGE_SIZE, PROT_RW, mmap_flags, backing_fd, 0);
if (ptr == MAP_FAILED)
err(2, "initial mmap");
ptr += HPAGE_SIZE - (uintptr_t)ptr % HPAGE_SIZE;
if (madvise(ptr, len, MADV_HUGEPAGE))
err(2, "MADV_HUGEPAGE");
map_len = ram >> (HPAGE_SHIFT - 1);
map = malloc(map_len);
if (!map)
errx(2, "map malloc");
clock_gettime(CLOCK_MONOTONIC, &start);
while (1) {
int nr_succeed = 0, nr_failed = 0, nr_pages = 0;
memset(map, 0, map_len);
clock_gettime(CLOCK_MONOTONIC, &a);
for (p = ptr; p < ptr + len; p += HPAGE_SIZE) {
int64_t pfn;
pfn = allocate_transhuge(p, pagemap_fd);
if (pfn < 0) {
nr_failed++;
} else {
size_t idx = pfn >> (HPAGE_SHIFT - pshift());
nr_succeed++;
if (idx >= map_len) {
map = realloc(map, idx + 1);
if (!map)
errx(2, "map realloc");
memset(map + map_len, 0, idx + 1 - map_len);
map_len = idx + 1;
}
if (!map[idx])
nr_pages++;
map[idx] = 1;
}
/* split transhuge page, keep last page */
if (madvise(p, HPAGE_SIZE - psize(), MADV_DONTNEED))
err(2, "MADV_DONTNEED");
}
clock_gettime(CLOCK_MONOTONIC, &b);
s = b.tv_sec - a.tv_sec + (b.tv_nsec - a.tv_nsec) / 1000000000.;
warnx("%.3f s/loop, %.3f ms/page, %10.3f MiB/s\t"
"%4d succeed, %4d failed, %4d different pages",
s, s * 1000 / (len >> HPAGE_SHIFT), len / s / (1 << 20),
nr_succeed, nr_failed, nr_pages);
if (duration > 0 && b.tv_sec - start.tv_sec >= duration)
return 0;
}
}
| linux-master | tools/testing/selftests/mm/transhuge-stress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hugepage-mremap:
*
* Example of remapping huge page memory in a user application using the
* mremap system call. The path to a file in a hugetlbfs filesystem must
* be passed as the last argument to this test. The amount of memory used
* by this test in MBs can optionally be passed as an argument. If no memory
* amount is passed, the default amount is 10MB.
*
* To make sure the test triggers pmd sharing and goes through the 'unshare'
* path in the mremap code use 1GB (1024) or more.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h> /* Definition of O_* constants */
#include <sys/syscall.h> /* Definition of SYS_* constants */
#include <linux/userfaultfd.h>
#include <sys/ioctl.h>
#include <string.h>
#include <stdbool.h>
#include "vm_util.h"
#define DEFAULT_LENGTH_MB 10UL
#define MB_TO_BYTES(x) (x * 1024 * 1024)
#define PROTECTION (PROT_READ | PROT_WRITE | PROT_EXEC)
#define FLAGS (MAP_SHARED | MAP_ANONYMOUS)
static void check_bytes(char *addr)
{
printf("First hex is %x\n", *((unsigned int *)addr));
}
static void write_bytes(char *addr, size_t len)
{
unsigned long i;
for (i = 0; i < len; i++)
*(addr + i) = (char)i;
}
static int read_bytes(char *addr, size_t len)
{
unsigned long i;
check_bytes(addr);
for (i = 0; i < len; i++)
if (*(addr + i) != (char)i) {
printf("Mismatch at %lu\n", i);
return 1;
}
return 0;
}
static void register_region_with_uffd(char *addr, size_t len)
{
long uffd; /* userfaultfd file descriptor */
struct uffdio_api uffdio_api;
/* Create and enable userfaultfd object. */
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (uffd == -1) {
perror("userfaultfd");
exit(1);
}
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
perror("ioctl-UFFDIO_API");
exit(1);
}
/* Create a private anonymous mapping. The memory will be
* demand-zero paged--that is, not yet allocated. When we
* actually touch the memory, it will be allocated via
* the userfaultfd.
*/
addr = mmap(NULL, len, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
printf("Address returned by mmap() = %p\n", addr);
/* Register the memory range of the mapping we just created for
* handling by the userfaultfd object. In mode, we request to track
* missing pages (i.e., pages that have not yet been faulted in).
*/
if (uffd_register(uffd, addr, len, true, false, false)) {
perror("ioctl-UFFDIO_REGISTER");
exit(1);
}
}
int main(int argc, char *argv[])
{
size_t length = 0;
int ret = 0, fd;
if (argc >= 2 && !strcmp(argv[1], "-h")) {
printf("Usage: %s [length_in_MB]\n", argv[0]);
exit(1);
}
/* Read memory length as the first arg if valid, otherwise fallback to
* the default length.
*/
if (argc >= 2)
length = (size_t)atoi(argv[1]);
else
length = DEFAULT_LENGTH_MB;
length = MB_TO_BYTES(length);
fd = memfd_create(argv[0], MFD_HUGETLB);
if (fd < 0) {
perror("Open failed");
exit(1);
}
/* mmap to a PUD aligned address to hopefully trigger pmd sharing. */
unsigned long suggested_addr = 0x7eaa40000000;
void *haddr = mmap((void *)suggested_addr, length, PROTECTION,
MAP_HUGETLB | MAP_SHARED | MAP_POPULATE, fd, 0);
printf("Map haddr: Returned address is %p\n", haddr);
if (haddr == MAP_FAILED) {
perror("mmap1");
exit(1);
}
/* mmap again to a dummy address to hopefully trigger pmd sharing. */
suggested_addr = 0x7daa40000000;
void *daddr = mmap((void *)suggested_addr, length, PROTECTION,
MAP_HUGETLB | MAP_SHARED | MAP_POPULATE, fd, 0);
printf("Map daddr: Returned address is %p\n", daddr);
if (daddr == MAP_FAILED) {
perror("mmap3");
exit(1);
}
suggested_addr = 0x7faa40000000;
void *vaddr =
mmap((void *)suggested_addr, length, PROTECTION, FLAGS, -1, 0);
printf("Map vaddr: Returned address is %p\n", vaddr);
if (vaddr == MAP_FAILED) {
perror("mmap2");
exit(1);
}
register_region_with_uffd(haddr, length);
void *addr = mremap(haddr, length, length,
MREMAP_MAYMOVE | MREMAP_FIXED, vaddr);
if (addr == MAP_FAILED) {
perror("mremap");
exit(1);
}
printf("Mremap: Returned address is %p\n", addr);
check_bytes(addr);
write_bytes(addr, length);
ret = read_bytes(addr, length);
munmap(addr, length);
addr = mremap(addr, length, length, 0);
if (addr != MAP_FAILED) {
printf("mremap: Expected failure, but call succeeded\n");
exit(1);
}
close(fd);
return ret;
}
| linux-master | tools/testing/selftests/mm/hugepage-mremap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HMM stands for Heterogeneous Memory Management, it is a helper layer inside
* the linux kernel to help device drivers mirror a process address space in
* the device. This allows the device to use the same address space which
* makes communication and data exchange a lot easier.
*
* This framework's sole purpose is to exercise various code paths inside
* the kernel to make sure that HMM performs as expected and to flush out any
* bugs.
*/
#include "../kselftest_harness.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <strings.h>
#include <time.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
/*
* This is a private UAPI to the kernel test module so it isn't exported
* in the usual include/uapi/... directory.
*/
#include <lib/test_hmm_uapi.h>
#include <mm/gup_test.h>
struct hmm_buffer {
void *ptr;
void *mirror;
unsigned long size;
int fd;
uint64_t cpages;
uint64_t faults;
};
enum {
HMM_PRIVATE_DEVICE_ONE,
HMM_PRIVATE_DEVICE_TWO,
HMM_COHERENCE_DEVICE_ONE,
HMM_COHERENCE_DEVICE_TWO,
};
#define TWOMEG (1 << 21)
#define HMM_BUFFER_SIZE (1024 << 12)
#define HMM_PATH_MAX 64
#define NTIMES 10
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
/* Just the flags we need, copied from mm.h: */
#ifndef FOLL_WRITE
#define FOLL_WRITE 0x01 /* check pte is writable */
#endif
#ifndef FOLL_LONGTERM
#define FOLL_LONGTERM 0x100 /* mapping lifetime is indefinite */
#endif
FIXTURE(hmm)
{
int fd;
unsigned int page_size;
unsigned int page_shift;
};
FIXTURE_VARIANT(hmm)
{
int device_number;
};
FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
{
.device_number = HMM_PRIVATE_DEVICE_ONE,
};
FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
{
.device_number = HMM_COHERENCE_DEVICE_ONE,
};
FIXTURE(hmm2)
{
int fd0;
int fd1;
unsigned int page_size;
unsigned int page_shift;
};
FIXTURE_VARIANT(hmm2)
{
int device_number0;
int device_number1;
};
FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
{
.device_number0 = HMM_PRIVATE_DEVICE_ONE,
.device_number1 = HMM_PRIVATE_DEVICE_TWO,
};
FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
{
.device_number0 = HMM_COHERENCE_DEVICE_ONE,
.device_number1 = HMM_COHERENCE_DEVICE_TWO,
};
static int hmm_open(int unit)
{
char pathname[HMM_PATH_MAX];
int fd;
snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
fd = open(pathname, O_RDWR, 0);
if (fd < 0)
fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
pathname);
return fd;
}
static bool hmm_is_coherent_type(int dev_num)
{
return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
}
FIXTURE_SETUP(hmm)
{
self->page_size = sysconf(_SC_PAGE_SIZE);
self->page_shift = ffs(self->page_size) - 1;
self->fd = hmm_open(variant->device_number);
if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
SKIP(exit(0), "DEVICE_COHERENT not available");
ASSERT_GE(self->fd, 0);
}
FIXTURE_SETUP(hmm2)
{
self->page_size = sysconf(_SC_PAGE_SIZE);
self->page_shift = ffs(self->page_size) - 1;
self->fd0 = hmm_open(variant->device_number0);
if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
SKIP(exit(0), "DEVICE_COHERENT not available");
ASSERT_GE(self->fd0, 0);
self->fd1 = hmm_open(variant->device_number1);
ASSERT_GE(self->fd1, 0);
}
FIXTURE_TEARDOWN(hmm)
{
int ret = close(self->fd);
ASSERT_EQ(ret, 0);
self->fd = -1;
}
FIXTURE_TEARDOWN(hmm2)
{
int ret = close(self->fd0);
ASSERT_EQ(ret, 0);
self->fd0 = -1;
ret = close(self->fd1);
ASSERT_EQ(ret, 0);
self->fd1 = -1;
}
static int hmm_dmirror_cmd(int fd,
unsigned long request,
struct hmm_buffer *buffer,
unsigned long npages)
{
struct hmm_dmirror_cmd cmd;
int ret;
/* Simulate a device reading system memory. */
cmd.addr = (__u64)buffer->ptr;
cmd.ptr = (__u64)buffer->mirror;
cmd.npages = npages;
for (;;) {
ret = ioctl(fd, request, &cmd);
if (ret == 0)
break;
if (errno == EINTR)
continue;
return -errno;
}
buffer->cpages = cmd.cpages;
buffer->faults = cmd.faults;
return 0;
}
static void hmm_buffer_free(struct hmm_buffer *buffer)
{
if (buffer == NULL)
return;
if (buffer->ptr)
munmap(buffer->ptr, buffer->size);
free(buffer->mirror);
free(buffer);
}
/*
* Create a temporary file that will be deleted on close.
*/
static int hmm_create_file(unsigned long size)
{
char path[HMM_PATH_MAX];
int fd;
strcpy(path, "/tmp");
fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
if (fd >= 0) {
int r;
do {
r = ftruncate(fd, size);
} while (r == -1 && errno == EINTR);
if (!r)
return fd;
close(fd);
}
return -1;
}
/*
* Return a random unsigned number.
*/
static unsigned int hmm_random(void)
{
static int fd = -1;
unsigned int r;
if (fd < 0) {
fd = open("/dev/urandom", O_RDONLY);
if (fd < 0) {
fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
__FILE__, __LINE__);
return ~0U;
}
}
read(fd, &r, sizeof(r));
return r;
}
static void hmm_nanosleep(unsigned int n)
{
struct timespec t;
t.tv_sec = 0;
t.tv_nsec = n;
nanosleep(&t, NULL);
}
static int hmm_migrate_sys_to_dev(int fd,
struct hmm_buffer *buffer,
unsigned long npages)
{
return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
}
static int hmm_migrate_dev_to_sys(int fd,
struct hmm_buffer *buffer,
unsigned long npages)
{
return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
}
/*
* Simple NULL test of device open/close.
*/
TEST_F(hmm, open_close)
{
}
/*
* Read private anonymous memory.
*/
TEST_F(hmm, anon_read)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
int val;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/*
* Initialize buffer in system memory but leave the first two pages
* zero (pte_none and pfn_zero).
*/
i = 2 * self->page_size / sizeof(*ptr);
for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Set buffer permission to read-only. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Populate the CPU page table with a special zero page. */
val = *(int *)(buffer->ptr + self->page_size);
ASSERT_EQ(val, 0);
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
ptr = buffer->mirror;
for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
for (; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Read private anonymous memory which has been protected with
* mprotect() PROT_NONE.
*/
TEST_F(hmm, anon_read_prot)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize mirror buffer so we can verify it isn't written. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
/* Protect buffer from reading. */
ret = mprotect(buffer->ptr, size, PROT_NONE);
ASSERT_EQ(ret, 0);
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, -EFAULT);
/* Allow CPU to read the buffer so we can check it. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
hmm_buffer_free(buffer);
}
/*
* Write private anonymous memory.
*/
TEST_F(hmm, anon_write)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Write private anonymous memory which has been protected with
* mprotect() PROT_READ.
*/
TEST_F(hmm, anon_write_prot)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Simulate a device reading a zero page of memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
ASSERT_EQ(buffer->faults, 1);
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, -EPERM);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
/* Now allow writing and see that the zero page is replaced. */
ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Check that a device writing an anonymous private mapping
* will copy-on-write if a child process inherits the mapping.
*/
TEST_F(hmm, anon_write_child)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
pid_t pid;
int child_fd;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer->ptr so we can tell if it is written. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
pid = fork();
if (pid == -1)
ASSERT_EQ(pid, 0);
if (pid != 0) {
waitpid(pid, &ret, 0);
ASSERT_EQ(WIFEXITED(ret), 1);
/* Check that the parent's buffer did not change. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
return;
}
/* Check that we see the parent's values. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* The child process needs its own mirror to its own mm. */
child_fd = hmm_open(0);
ASSERT_GE(child_fd, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
close(child_fd);
exit(0);
}
/*
* Check that a device writing an anonymous shared mapping
* will not copy-on-write if a child process inherits the mapping.
*/
TEST_F(hmm, anon_write_child_shared)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
pid_t pid;
int child_fd;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer->ptr so we can tell if it is written. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = -i;
pid = fork();
if (pid == -1)
ASSERT_EQ(pid, 0);
if (pid != 0) {
waitpid(pid, &ret, 0);
ASSERT_EQ(WIFEXITED(ret), 1);
/* Check that the parent's buffer did change. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
return;
}
/* Check that we see the parent's values. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
/* The child process needs its own mirror to its own mm. */
child_fd = hmm_open(0);
ASSERT_GE(child_fd, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], -i);
close(child_fd);
exit(0);
}
/*
* Write private anonymous huge page.
*/
TEST_F(hmm, anon_write_huge)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = 2 * TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
size = TWOMEG;
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
old_ptr = buffer->ptr;
buffer->ptr = map;
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Read numeric data from raw and tagged kernel status files. Used to read
* /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
*/
static long file_read_ulong(char *file, const char *tag)
{
int fd;
char buf[2048];
int len;
char *p, *q;
long val;
fd = open(file, O_RDONLY);
if (fd < 0) {
/* Error opening the file */
return -1;
}
len = read(fd, buf, sizeof(buf));
close(fd);
if (len < 0) {
/* Error in reading the file */
return -1;
}
if (len == sizeof(buf)) {
/* Error file is too large */
return -1;
}
buf[len] = '\0';
/* Search for a tag if provided */
if (tag) {
p = strstr(buf, tag);
if (!p)
return -1; /* looks like the line we want isn't there */
p += strlen(tag);
} else
p = buf;
val = strtol(p, &q, 0);
if (*q != ' ') {
/* Error parsing the file */
return -1;
}
return val;
}
/*
* Write huge TLBFS page.
*/
TEST_F(hmm, anon_write_hugetlbfs)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long default_hsize;
unsigned long i;
int *ptr;
int ret;
default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
if (default_hsize < 0 || default_hsize*1024 < default_hsize)
SKIP(return, "Huge page size could not be determined");
default_hsize = default_hsize*1024; /* KB to B */
size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (buffer->ptr == MAP_FAILED) {
free(buffer);
SKIP(return, "Huge page could not be allocated");
}
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
munmap(buffer->ptr, buffer->size);
buffer->ptr = NULL;
hmm_buffer_free(buffer);
}
/*
* Read mmap'ed file memory.
*/
TEST_F(hmm, file_read)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
int fd;
ssize_t len;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
fd = hmm_create_file(size);
ASSERT_GE(fd, 0);
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = fd;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
/* Write initial contents of the file. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
len = pwrite(fd, buffer->mirror, size, 0);
ASSERT_EQ(len, size);
memset(buffer->mirror, 0, size);
buffer->ptr = mmap(NULL, size,
PROT_READ,
MAP_SHARED,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Write mmap'ed file memory.
*/
TEST_F(hmm, file_write)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
int fd;
ssize_t len;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
fd = hmm_create_file(size);
ASSERT_GE(fd, 0);
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = fd;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize data that the device will write to buffer->ptr. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device wrote. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Check that the device also wrote the file. */
len = pread(fd, buffer->mirror, size, 0);
ASSERT_EQ(len, size);
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Migrate anonymous memory to device private memory.
*/
TEST_F(hmm, migrate)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Migrate anonymous memory to device private memory and fault some of it back
* to system memory, then try migrating the resulting mix of system and device
* private memory to the device.
*/
TEST_F(hmm, migrate_fault)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Fault half the pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate memory to the device again. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
TEST_F(hmm, migrate_release)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Release device memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
ASSERT_EQ(ret, 0);
/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Migrate anonymous shared memory to device private memory.
*/
TEST_F(hmm, migrate_shared)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, -ENOENT);
hmm_buffer_free(buffer);
}
/*
* Try to migrate various memory types to device private memory.
*/
TEST_F(hmm2, migrate_mixed)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
int *ptr;
unsigned char *p;
int ret;
int val;
npages = 6;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
p = buffer->ptr;
/* Migrating a protected area should be an error. */
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
ASSERT_EQ(ret, -EINVAL);
/* Punch a hole after the first page address. */
ret = munmap(buffer->ptr + self->page_size, self->page_size);
ASSERT_EQ(ret, 0);
/* We expect an error if the vma doesn't cover the range. */
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
ASSERT_EQ(ret, -EINVAL);
/* Page 2 will be a read-only zero page. */
ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
PROT_READ);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 2 * self->page_size);
val = *ptr + 3;
ASSERT_EQ(val, 3);
/* Page 3 will be read-only. */
ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
PROT_READ | PROT_WRITE);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 3 * self->page_size);
*ptr = val;
ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
PROT_READ);
ASSERT_EQ(ret, 0);
/* Page 4-5 will be read-write. */
ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
PROT_READ | PROT_WRITE);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 4 * self->page_size);
*ptr = val;
ptr = (int *)(buffer->ptr + 5 * self->page_size);
*ptr = val;
/* Now try to migrate pages 2-5 to device 1. */
buffer->ptr = p + 2 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 4);
/* Page 5 won't be migrated to device 0 because it's on device 1. */
buffer->ptr = p + 5 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
ASSERT_EQ(ret, -ENOENT);
buffer->ptr = p;
buffer->ptr = p;
hmm_buffer_free(buffer);
}
/*
* Migrate anonymous memory to device memory and back to system memory
* multiple times. In case of private zone configuration, this is done
* through fault pages accessed by CPU. In case of coherent zone configuration,
* the pages from the device should be explicitly migrated back to system memory.
* The reason is Coherent device zone has coherent access by CPU, therefore
* it will not generate any page fault.
*/
TEST_F(hmm, migrate_multiple)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
unsigned long c;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
for (c = 0; c < NTIMES; c++) {
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate back to system memory and check them. */
if (hmm_is_coherent_type(variant->device_number)) {
ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
}
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
}
/*
* Read anonymous memory multiple times.
*/
TEST_F(hmm, anon_read_multiple)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
unsigned long c;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
for (c = 0; c < NTIMES; c++) {
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i + c;
/* Simulate a device reading system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i + c);
hmm_buffer_free(buffer);
}
}
void *unmap_buffer(void *p)
{
struct hmm_buffer *buffer = p;
/* Delay for a bit and then unmap buffer while it is being read. */
hmm_nanosleep(hmm_random() % 32000);
munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
buffer->ptr = NULL;
return NULL;
}
/*
* Try reading anonymous memory while it is being unmapped.
*/
TEST_F(hmm, anon_teardown)
{
unsigned long npages;
unsigned long size;
unsigned long c;
void *ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
for (c = 0; c < NTIMES; ++c) {
pthread_t thread;
struct hmm_buffer *buffer;
unsigned long i;
int *ptr;
int rc;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i + c;
rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
ASSERT_EQ(rc, 0);
/* Simulate a device reading system memory. */
rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
npages);
if (rc == 0) {
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror;
i < size / sizeof(*ptr);
++i)
ASSERT_EQ(ptr[i], i + c);
}
pthread_join(thread, &ret);
hmm_buffer_free(buffer);
}
}
/*
* Test memory snapshot without faulting in pages accessed by the device.
*/
TEST_F(hmm, mixedmap)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned char *m;
int ret;
npages = 1;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(npages);
ASSERT_NE(buffer->mirror, NULL);
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
self->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Simulate a device snapshotting CPU pagetables. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device saw. */
m = buffer->mirror;
ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
hmm_buffer_free(buffer);
}
/*
* Test memory snapshot without faulting in pages accessed by the device.
*/
TEST_F(hmm2, snapshot)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
int *ptr;
unsigned char *p;
unsigned char *m;
int ret;
int val;
npages = 7;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(npages);
ASSERT_NE(buffer->mirror, NULL);
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
p = buffer->ptr;
/* Punch a hole after the first page address. */
ret = munmap(buffer->ptr + self->page_size, self->page_size);
ASSERT_EQ(ret, 0);
/* Page 2 will be read-only zero page. */
ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
PROT_READ);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 2 * self->page_size);
val = *ptr + 3;
ASSERT_EQ(val, 3);
/* Page 3 will be read-only. */
ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
PROT_READ | PROT_WRITE);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 3 * self->page_size);
*ptr = val;
ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
PROT_READ);
ASSERT_EQ(ret, 0);
/* Page 4-6 will be read-write. */
ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
PROT_READ | PROT_WRITE);
ASSERT_EQ(ret, 0);
ptr = (int *)(buffer->ptr + 4 * self->page_size);
*ptr = val;
/* Page 5 will be migrated to device 0. */
buffer->ptr = p + 5 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
/* Page 6 will be migrated to device 1. */
buffer->ptr = p + 6 * self->page_size;
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
/* Simulate a device snapshotting CPU pagetables. */
buffer->ptr = p;
ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device saw. */
m = buffer->mirror;
ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
if (!hmm_is_coherent_type(variant->device_number0)) {
ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
HMM_DMIRROR_PROT_WRITE);
ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
} else {
ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
HMM_DMIRROR_PROT_WRITE);
ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
HMM_DMIRROR_PROT_WRITE);
}
hmm_buffer_free(buffer);
}
/*
* Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
* should be mapped by a large page table entry.
*/
TEST_F(hmm, compound)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long default_hsize;
int *ptr;
unsigned char *m;
int ret;
unsigned long i;
/* Skip test if we can't allocate a hugetlbfs page. */
default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
if (default_hsize < 0 || default_hsize*1024 < default_hsize)
SKIP(return, "Huge page size could not be determined");
default_hsize = default_hsize*1024; /* KB to B */
size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-1, 0);
if (buffer->ptr == MAP_FAILED) {
free(buffer);
return;
}
buffer->size = size;
buffer->mirror = malloc(npages);
ASSERT_NE(buffer->mirror, NULL);
/* Initialize the pages the device will snapshot in buffer->ptr. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Simulate a device snapshotting CPU pagetables. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device saw. */
m = buffer->mirror;
for (i = 0; i < npages; ++i)
ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
HMM_DMIRROR_PROT_PMD);
/* Make the region read-only. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate a device snapshotting CPU pagetables. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device saw. */
m = buffer->mirror;
for (i = 0; i < npages; ++i)
ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
HMM_DMIRROR_PROT_PMD);
munmap(buffer->ptr, buffer->size);
buffer->ptr = NULL;
hmm_buffer_free(buffer);
}
/*
* Test two devices reading the same memory (double mapped).
*/
TEST_F(hmm2, double_map)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = 6;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(npages);
ASSERT_NE(buffer->mirror, NULL);
/* Reserve a range of addresses. */
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Make region read-only. */
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate device 0 reading system memory. */
ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Simulate device 1 reading system memory. */
ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Migrate pages to device 1 and try to read from device 0. */
ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
ASSERT_EQ(buffer->faults, 1);
/* Check what device 0 read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
/*
* Basic check of exclusive faulting.
*/
TEST_F(hmm, exclusive)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i]++, i);
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i+1);
/* Check atomic access revoked */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
hmm_buffer_free(buffer);
}
TEST_F(hmm, exclusive_mprotect)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);
/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, -EPERM);
hmm_buffer_free(buffer);
}
/*
* Check copy-on-write works.
*/
TEST_F(hmm, exclusive_cow)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
fork();
/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i]++, i);
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i+1);
hmm_buffer_free(buffer);
}
static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
int npages, int size, int flags)
{
struct gup_test gup = {
.nr_pages_per_call = npages,
.addr = addr,
.gup_flags = FOLL_WRITE | flags,
.size = size,
};
if (ioctl(gup_fd, cmd, &gup)) {
perror("ioctl on error\n");
return errno;
}
return 0;
}
/*
* Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
* This should trigger a migration back to system memory for both, private
* and coherent type pages.
* This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
* to your configuration before you run it.
*/
TEST_F(hmm, hmm_gup_test)
{
struct hmm_buffer *buffer;
int gup_fd;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
unsigned char *m;
gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
if (gup_fd == -1)
SKIP(return, "Skipping test, could not find gup_test driver");
npages = 4;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
ASSERT_EQ(gup_test_exec(gup_fd,
(unsigned long)buffer->ptr,
GUP_BASIC_TEST, 1, self->page_size, 0), 0);
ASSERT_EQ(gup_test_exec(gup_fd,
(unsigned long)buffer->ptr + 1 * self->page_size,
GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
ASSERT_EQ(gup_test_exec(gup_fd,
(unsigned long)buffer->ptr + 2 * self->page_size,
PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
ASSERT_EQ(gup_test_exec(gup_fd,
(unsigned long)buffer->ptr + 3 * self->page_size,
PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
/* Take snapshot to CPU pagetables */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
m = buffer->mirror;
if (hmm_is_coherent_type(variant->device_number)) {
ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
} else {
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
}
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
/*
* Check again the content on the pages. Make sure there's no
* corrupted data.
*/
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
close(gup_fd);
hmm_buffer_free(buffer);
}
/*
* Test copy-on-write in device pages.
* In case of writing to COW private page(s), a page fault will migrate pages
* back to system memory first. Then, these pages will be duplicated. In case
* of COW device coherent type, pages are duplicated directly from device
* memory.
*/
TEST_F(hmm, hmm_cow_in_device)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;
unsigned char *m;
pid_t pid;
int status;
npages = 4;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
pid = fork();
if (pid == -1)
ASSERT_EQ(pid, 0);
if (!pid) {
/* Child process waitd for SIGTERM from the parent. */
while (1) {
}
perror("Should not reach this\n");
exit(0);
}
/* Parent process writes to COW pages(s) and gets a
* new copy in system. In case of device private pages,
* this write causes a migration to system mem first.
*/
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Terminate child and wait */
EXPECT_EQ(0, kill(pid, SIGTERM));
EXPECT_EQ(pid, waitpid(pid, &status, 0));
EXPECT_NE(0, WIFSIGNALED(status));
EXPECT_EQ(SIGTERM, WTERMSIG(status));
/* Take snapshot to CPU pagetables */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
m = buffer->mirror;
for (i = 0; i < npages; i++)
ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
hmm_buffer_free(buffer);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/mm/hmm-tests.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.