2021-12-10 10:46:12 -06:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2018, Google LLC.
|
|
|
|
*/
|
|
|
|
#ifndef SELFTEST_KVM_UCALL_COMMON_H
|
|
|
|
#define SELFTEST_KVM_UCALL_COMMON_H
|
2022-06-15 19:31:15 +00:00
|
|
|
#include "test_util.h"
|
2023-07-31 13:30:24 -07:00
|
|
|
#include "ucall.h"
|
2021-12-10 10:46:12 -06:00
|
|
|
|
|
|
|
/* Common ucalls */
|
|
|
|
enum {
|
|
|
|
UCALL_NONE,
|
|
|
|
UCALL_SYNC,
|
|
|
|
UCALL_ABORT,
|
2023-07-28 17:36:16 -07:00
|
|
|
UCALL_PRINTF,
|
2021-12-10 10:46:12 -06:00
|
|
|
UCALL_DONE,
|
|
|
|
UCALL_UNHANDLED,
|
|
|
|
};
|
|
|
|
|
2022-06-15 19:31:14 +00:00
|
|
|
#define UCALL_MAX_ARGS 7
|
2023-07-28 17:36:16 -07:00
|
|
|
#define UCALL_BUFFER_LEN 1024
|
2021-12-10 10:46:12 -06:00
|
|
|
|
|
|
|
struct ucall {
|
|
|
|
uint64_t cmd;
|
|
|
|
uint64_t args[UCALL_MAX_ARGS];
|
2023-07-28 17:36:16 -07:00
|
|
|
char buffer[UCALL_BUFFER_LEN];
|
KVM: selftests: Add ucall pool based implementation
To play nice with guests whose stack memory is encrypted, e.g. AMD SEV,
introduce a new "ucall pool" implementation that passes the ucall struct
via dedicated memory (which can be mapped shared, a.k.a. as plain text).
Because not all architectures have access to the vCPU index in the guest,
use a bitmap with atomic accesses to track which entries in the pool are
free/used. A list+lock could also work in theory, but synchronizing the
individual pointers to the guest would be a mess.
Note, there's no need to rewalk the bitmap to ensure success. If all
vCPUs are simply allocating, success is guaranteed because there are
enough entries for all vCPUs. If one or more vCPUs are freeing and then
reallocating, success is guaranteed because vCPUs _always_ walk the
bitmap from 0=>N; if vCPU frees an entry and then wins a race to
re-allocate, then either it will consume the entry it just freed (bit is
the first free bit), or the losing vCPU is guaranteed to see the freed
bit (winner consumes an earlier bit, which the loser hasn't yet visited).
Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: Peter Gonda <pgonda@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-8-seanjc@google.com
2022-10-06 00:34:09 +00:00
|
|
|
|
|
|
|
/* Host virtual address of this struct. */
|
|
|
|
struct ucall *hva;
|
2021-12-10 10:46:12 -06:00
|
|
|
};
|
|
|
|
|
2022-10-06 00:34:05 +00:00
|
|
|
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
2022-10-06 00:34:03 +00:00
|
|
|
void ucall_arch_do_ucall(vm_vaddr_t uc);
|
2022-10-06 00:34:04 +00:00
|
|
|
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
|
2022-10-06 00:34:03 +00:00
|
|
|
|
2021-12-10 10:46:12 -06:00
|
|
|
void ucall(uint64_t cmd, int nargs, ...);
|
2023-11-29 14:49:16 -08:00
|
|
|
__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
|
|
|
|
__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
|
|
|
|
const char *file, unsigned int line,
|
|
|
|
const char *fmt, ...);
|
2022-10-06 00:34:04 +00:00
|
|
|
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
|
KVM: selftests: Add ucall pool based implementation
To play nice with guests whose stack memory is encrypted, e.g. AMD SEV,
introduce a new "ucall pool" implementation that passes the ucall struct
via dedicated memory (which can be mapped shared, a.k.a. as plain text).
Because not all architectures have access to the vCPU index in the guest,
use a bitmap with atomic accesses to track which entries in the pool are
free/used. A list+lock could also work in theory, but synchronizing the
individual pointers to the guest would be a mess.
Note, there's no need to rewalk the bitmap to ensure success. If all
vCPUs are simply allocating, success is guaranteed because there are
enough entries for all vCPUs. If one or more vCPUs are freeing and then
reallocating, success is guaranteed because vCPUs _always_ walk the
bitmap from 0=>N; if vCPU frees an entry and then wins a race to
re-allocate, then either it will consume the entry it just freed (bit is
the first free bit), or the losing vCPU is guaranteed to see the freed
bit (winner consumes an earlier bit, which the loser hasn't yet visited).
Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: Peter Gonda <pgonda@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-8-seanjc@google.com
2022-10-06 00:34:09 +00:00
|
|
|
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
2023-07-28 17:36:15 -07:00
|
|
|
int ucall_nr_pages_required(uint64_t page_size);
|
2022-10-06 00:34:03 +00:00
|
|
|
|
2022-11-19 01:34:44 +00:00
|
|
|
/*
|
|
|
|
* Perform userspace call without any associated data. This bare call avoids
|
|
|
|
* allocating a ucall struct, which can be useful if the atomic operations in
|
|
|
|
* the full ucall() are problematic and/or unwanted. Note, this will come out
|
|
|
|
* as UCALL_NONE on the backend.
|
|
|
|
*/
|
|
|
|
#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL)
|
|
|
|
|
2021-12-10 10:46:12 -06:00
|
|
|
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
|
|
|
|
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
|
|
|
|
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
|
2023-10-27 11:22:12 -07:00
|
|
|
#define GUEST_SYNC1(arg0) ucall(UCALL_SYNC, 1, arg0)
|
|
|
|
#define GUEST_SYNC2(arg0, arg1) ucall(UCALL_SYNC, 2, arg0, arg1)
|
|
|
|
#define GUEST_SYNC3(arg0, arg1, arg2) \
|
|
|
|
ucall(UCALL_SYNC, 3, arg0, arg1, arg2)
|
|
|
|
#define GUEST_SYNC4(arg0, arg1, arg2, arg3) \
|
|
|
|
ucall(UCALL_SYNC, 4, arg0, arg1, arg2, arg3)
|
|
|
|
#define GUEST_SYNC5(arg0, arg1, arg2, arg3, arg4) \
|
|
|
|
ucall(UCALL_SYNC, 5, arg0, arg1, arg2, arg3, arg4)
|
|
|
|
#define GUEST_SYNC6(arg0, arg1, arg2, arg3, arg4, arg5) \
|
|
|
|
ucall(UCALL_SYNC, 6, arg0, arg1, arg2, arg3, arg4, arg5)
|
|
|
|
|
2023-07-28 17:36:16 -07:00
|
|
|
#define GUEST_PRINTF(_fmt, _args...) ucall_fmt(UCALL_PRINTF, _fmt, ##_args)
|
2021-12-10 10:46:12 -06:00
|
|
|
#define GUEST_DONE() ucall(UCALL_DONE, 0)
|
2022-06-15 19:31:13 +00:00
|
|
|
|
2023-07-28 17:36:16 -07:00
|
|
|
#define REPORT_GUEST_PRINTF(ucall) pr_info("%s", (ucall).buffer)
|
|
|
|
|
2022-06-15 19:31:13 +00:00
|
|
|
enum guest_assert_builtin_args {
|
|
|
|
GUEST_ERROR_STRING,
|
|
|
|
GUEST_FILE,
|
|
|
|
GUEST_LINE,
|
|
|
|
GUEST_ASSERT_BUILTIN_NARGS
|
|
|
|
};
|
|
|
|
|
KVM: selftests: Add formatted guest assert support in ucall framework
Add printf-based GUEST_ASSERT macros and accompanying host-side support to
provide an assert-specific versions of GUEST_PRINTF(). To make it easier
to parse assert messages, for humans and bots alike, preserve/use the same
layout as host asserts, e.g. in the example below, the reported expression,
file, line number, and message are from the guest assertion, not the host
reporting of the assertion.
The call stack still captures the host reporting, but capturing the guest
stack is a less pressing concern, i.e. can be done in the future, and an
optimal solution would capture *both* the host and guest stacks, i.e.
capturing the host stack isn't an outright bug.
Running soft int test
==== Test Assertion Failure ====
x86_64/svm_nested_soft_inject_test.c:39: regs->rip != (unsigned long)l2_guest_code_int
pid=214104 tid=214104 errno=4 - Interrupted system call
1 0x0000000000401b35: run_test at svm_nested_soft_inject_test.c:191
2 0x00000000004017d2: main at svm_nested_soft_inject_test.c:212
3 0x0000000000415b03: __libc_start_call_main at libc-start.o:?
4 0x000000000041714f: __libc_start_main_impl at ??:?
5 0x0000000000401660: _start at ??:?
Expected IRQ at RIP 0x401e50, received IRQ at 0x401e50
Don't bother sharing code between ucall_assert() and ucall_fmt(), as
forwarding the variable arguments would either require using macros or
building a va_list, i.e. would make the code less readable and/or require
just as much copy+paste code anyways.
Gate the new macros with a flag so that tests can more or less be switched
over one-by-one. The slow conversion won't be perfect, e.g. library code
won't pick up the flag, but the only asserts in library code are of the
vanilla GUEST_ASSERT() variety, i.e. don't print out variables.
Add a temporary alias to GUEST_ASSERT_1() to fudge around ARM's
arch_timer.h header using GUEST_ASSERT_1(), thus thwarting any attempt to
convert tests one-by-one.
Link: https://lore.kernel.org/r/20230729003643.1053367-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2023-07-28 17:36:17 -07:00
|
|
|
#define ____GUEST_ASSERT(_condition, _exp, _fmt, _args...) \
|
|
|
|
do { \
|
|
|
|
if (!(_condition)) \
|
|
|
|
ucall_assert(UCALL_ABORT, _exp, __FILE__, __LINE__, _fmt, ##_args); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __GUEST_ASSERT(_condition, _fmt, _args...) \
|
|
|
|
____GUEST_ASSERT(_condition, #_condition, _fmt, ##_args)
|
|
|
|
|
|
|
|
#define GUEST_ASSERT(_condition) \
|
|
|
|
__GUEST_ASSERT(_condition, #_condition)
|
|
|
|
|
|
|
|
#define GUEST_FAIL(_fmt, _args...) \
|
|
|
|
ucall_assert(UCALL_ABORT, "Unconditional guest failure", \
|
|
|
|
__FILE__, __LINE__, _fmt, ##_args)
|
|
|
|
|
|
|
|
#define GUEST_ASSERT_EQ(a, b) \
|
|
|
|
do { \
|
|
|
|
typeof(a) __a = (a); \
|
|
|
|
typeof(b) __b = (b); \
|
|
|
|
____GUEST_ASSERT(__a == __b, #a " == " #b, "%#lx != %#lx (%s != %s)", \
|
|
|
|
(unsigned long)(__a), (unsigned long)(__b), #a, #b); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define GUEST_ASSERT_NE(a, b) \
|
|
|
|
do { \
|
|
|
|
typeof(a) __a = (a); \
|
|
|
|
typeof(b) __b = (b); \
|
|
|
|
____GUEST_ASSERT(__a != __b, #a " != " #b, "%#lx == %#lx (%s == %s)", \
|
|
|
|
(unsigned long)(__a), (unsigned long)(__b), #a, #b); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define REPORT_GUEST_ASSERT(ucall) \
|
|
|
|
test_assert(false, (const char *)(ucall).args[GUEST_ERROR_STRING], \
|
|
|
|
(const char *)(ucall).args[GUEST_FILE], \
|
|
|
|
(ucall).args[GUEST_LINE], "%s", (ucall).buffer)
|
|
|
|
|
2021-12-10 10:46:12 -06:00
|
|
|
#endif /* SELFTEST_KVM_UCALL_COMMON_H */
|