linux/tools/testing/selftests/kvm/x86/tsc_msrs_test.c
Sean Christopherson 67730e6c53 KVM: selftests: Use canonical $(ARCH) paths for KVM selftests directories
Use the kernel's canonical $(ARCH) paths instead of the raw target triple
for KVM selftests directories.  KVM selftests are quite nearly the only
place in the entire kernel that using the target triple for directories,
tools/testing/selftests/drivers/s390x being the lone holdout.

Using the kernel's preferred nomenclature eliminates the minor, but
annoying, friction of having to translate to KVM's selftests directories,
e.g. for pattern matching, opening files, running selftests, etc.

Opportunsitically delete file comments that reference the full path of the
file, as they are obviously prone to becoming stale, and serve no known
purpose.

Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
Acked-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Acked-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20241128005547.4077116-16-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-12-18 14:15:04 -08:00

161 lines
4.8 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include <stdio.h>
#include <string.h>
#include "kvm_util.h"
#include "processor.h"
#define UNITY (1ull << 30)
#define HOST_ADJUST (UNITY * 64)
#define GUEST_STEP (UNITY * 4)
#define ROUND(x) ((x + UNITY / 2) & -UNITY)
#define rounded_rdmsr(x) ROUND(rdmsr(x))
#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
static void guest_code(void)
{
u64 val = 0;
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
val = 1ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
GUEST_SYNC(2);
val = 2ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC_ADJUST, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Host: setting the TSC offset. */
GUEST_SYNC(3);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
* host-side offset and affect both MSRs.
*/
GUEST_SYNC(4);
val = 3ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC_ADJUST, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
* offset is now visible in MSR_IA32_TSC_ADJUST.
*/
GUEST_SYNC(5);
val = 4ull * GUEST_STEP;
wrmsr(MSR_IA32_TSC, val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
GUEST_DONE();
}
static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
if (!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1)
ksft_test_result_pass("stage %d passed\n", stage + 1);
else
ksft_test_result_fail(
"stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]);
return;
case UCALL_DONE:
ksft_test_result_pass("stage %d passed\n", stage + 1);
return;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
}
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t val;
ksft_print_header();
ksft_set_plan(5);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
val = 0;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
run_vcpu(vcpu, 1);
val = 1ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
run_vcpu(vcpu, 2);
val = 2ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST.
*/
vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
* host-side offset and affect both MSRs.
*/
run_vcpu(vcpu, 4);
val = 3ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
* offset is now visible in MSR_IA32_TSC_ADJUST.
*/
run_vcpu(vcpu, 5);
val = 4ull * GUEST_STEP;
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
kvm_vm_free(vm);
ksft_finished(); /* Print results and exit() accordingly */
}