2018-08-22 15:20:00 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* KVM dirty page logging test
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018, Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <pthread.h>
|
2020-09-30 21:22:37 -04:00
|
|
|
#include <semaphore.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <errno.h>
|
2018-08-22 15:20:00 +08:00
|
|
|
#include <linux/bitmap.h>
|
|
|
|
#include <linux/bitops.h>
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
#include <linux/atomic.h>
|
2022-09-26 15:51:19 +01:00
|
|
|
#include <asm/barrier.h>
|
2018-08-22 15:20:00 +08:00
|
|
|
|
|
|
|
#include "kvm_util.h"
|
2020-12-18 15:17:32 +01:00
|
|
|
#include "test_util.h"
|
|
|
|
#include "guest_modes.h"
|
2018-09-18 19:54:28 +02:00
|
|
|
#include "processor.h"
|
2024-03-14 16:26:20 -07:00
|
|
|
#include "ucall_common.h"
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2022-11-10 18:49:14 +08:00
|
|
|
#define DIRTY_MEM_BITS 30 /* 1G */
|
|
|
|
#define PAGE_SHIFT_4K 12
|
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/* The memory slot index to track dirty pages */
|
2018-09-18 19:54:32 +02:00
|
|
|
#define TEST_MEM_SLOT_INDEX 1
|
|
|
|
|
2019-07-31 17:15:25 +02:00
|
|
|
/* Default guest test virtual memory offset */
|
|
|
|
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
|
2018-09-18 19:54:32 +02:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
|
2018-10-28 12:58:42 -07:00
|
|
|
#define TEST_HOST_LOOP_N 32UL
|
2018-09-18 19:54:32 +02:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/* Interval for each host loop (ms) */
|
2018-10-28 12:58:42 -07:00
|
|
|
#define TEST_HOST_LOOP_INTERVAL 10UL
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2025-01-10 16:30:00 -08:00
|
|
|
/*
|
|
|
|
* Ensure the vCPU is able to perform a reasonable number of writes in each
|
|
|
|
* iteration to provide a lower bound on coverage.
|
|
|
|
*/
|
|
|
|
#define TEST_MIN_WRITES_PER_ITERATION 0x100
|
|
|
|
|
2019-07-31 17:15:25 +02:00
|
|
|
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
|
|
|
|
#if defined(__s390x__)
|
|
|
|
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
|
|
|
# define test_bit_le(nr, addr) \
|
|
|
|
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
|
2022-11-19 01:34:47 +00:00
|
|
|
# define __set_bit_le(nr, addr) \
|
|
|
|
__set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
|
|
|
|
# define __clear_bit_le(nr, addr) \
|
|
|
|
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
|
|
|
|
# define __test_and_set_bit_le(nr, addr) \
|
|
|
|
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
|
|
|
|
# define __test_and_clear_bit_le(nr, addr) \
|
|
|
|
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
|
2019-07-31 17:15:25 +02:00
|
|
|
#else
|
2022-11-19 01:34:47 +00:00
|
|
|
# define test_bit_le test_bit
|
|
|
|
# define __set_bit_le __set_bit
|
|
|
|
# define __clear_bit_le __clear_bit
|
|
|
|
# define __test_and_set_bit_le __test_and_set_bit
|
|
|
|
# define __test_and_clear_bit_le __test_and_clear_bit
|
2019-07-31 17:15:25 +02:00
|
|
|
#endif
|
|
|
|
|
2020-09-30 21:22:39 -04:00
|
|
|
#define TEST_DIRTY_RING_COUNT 65536
|
|
|
|
|
|
|
|
#define SIG_IPI SIGUSR1
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/*
|
2018-09-18 19:54:32 +02:00
|
|
|
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
|
|
|
|
* sync_global_to/from_guest() are used when accessing from
|
|
|
|
* the host. READ/WRITE_ONCE() should also be used with anything
|
|
|
|
* that may change.
|
2018-08-22 15:20:00 +08:00
|
|
|
*/
|
2018-09-18 19:54:32 +02:00
|
|
|
static uint64_t host_page_size;
|
|
|
|
static uint64_t guest_page_size;
|
2018-09-18 19:54:34 +02:00
|
|
|
static uint64_t guest_num_pages;
|
2018-09-18 19:54:32 +02:00
|
|
|
static uint64_t iteration;
|
2025-01-10 16:30:00 -08:00
|
|
|
static uint64_t nr_writes;
|
2025-01-10 16:29:54 -08:00
|
|
|
static bool vcpu_stop;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2018-09-18 19:54:36 +02:00
|
|
|
/*
|
2018-11-06 14:57:08 +01:00
|
|
|
* Guest physical memory offset of the testing memory slot.
|
|
|
|
* This will be set to the topmost valid physical address minus
|
|
|
|
* the test memory size.
|
|
|
|
*/
|
|
|
|
static uint64_t guest_test_phys_mem;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Guest virtual memory offset of the testing memory slot.
|
|
|
|
* Must not conflict with identity mapped test code.
|
2018-09-18 19:54:36 +02:00
|
|
|
*/
|
2018-11-06 14:57:07 +01:00
|
|
|
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
2018-09-18 19:54:36 +02:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/*
|
2018-09-18 19:54:32 +02:00
|
|
|
* Continuously write to the first 8 bytes of a random pages within
|
|
|
|
* the testing memory region.
|
2018-08-22 15:20:00 +08:00
|
|
|
*/
|
2018-09-18 19:54:32 +02:00
|
|
|
static void guest_code(void)
|
2018-08-22 15:20:00 +08:00
|
|
|
{
|
2019-07-31 17:15:25 +02:00
|
|
|
uint64_t addr;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2025-01-10 16:29:52 -08:00
|
|
|
#ifdef __s390x__
|
2025-01-10 16:29:54 -08:00
|
|
|
uint64_t i;
|
|
|
|
|
2019-07-31 17:15:25 +02:00
|
|
|
/*
|
|
|
|
* On s390x, all pages of a 1M segment are initially marked as dirty
|
|
|
|
* when a page of the segment is written to for the very first time.
|
|
|
|
* To compensate this specialty in this test, we need to touch all
|
|
|
|
* pages during the first iteration.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < guest_num_pages; i++) {
|
|
|
|
addr = guest_test_virt_mem + i * guest_page_size;
|
KVM: selftests: Add vcpu_arch_put_guest() to do writes from guest code
Introduce a macro, vcpu_arch_put_guest(), for "putting" values to memory
from guest code in "interesting" situations, e.g. when writing memory that
is being dirty logged. Structure the macro so that arch code can provide
a custom implementation, e.g. x86 will use the macro to force emulation of
the access.
Use the helper in dirty_log_test, which is of particular interest (see
above), and in xen_shinfo_test, which isn't all that interesting, but
provides a second usage of the macro with a different size operand
(uint8_t versus uint64_t), i.e. to help verify that the macro works for
more than just 64-bit values.
Use "put" as the verb to align with the kernel's {get,put}_user()
terminology.
Link: https://lore.kernel.org/r/20240314185459.2439072-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-03-14 11:54:57 -07:00
|
|
|
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
|
2025-01-10 16:30:00 -08:00
|
|
|
nr_writes++;
|
2019-07-31 17:15:25 +02:00
|
|
|
}
|
2025-01-10 16:29:52 -08:00
|
|
|
#endif
|
2019-07-31 17:15:25 +02:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
while (true) {
|
2025-01-10 16:29:54 -08:00
|
|
|
while (!READ_ONCE(vcpu_stop)) {
|
2019-07-31 17:15:25 +02:00
|
|
|
addr = guest_test_virt_mem;
|
2024-03-14 11:54:54 -07:00
|
|
|
addr += (guest_random_u64(&guest_rng) % guest_num_pages)
|
2018-09-18 19:54:32 +02:00
|
|
|
* guest_page_size;
|
2021-11-11 00:03:00 +00:00
|
|
|
addr = align_down(addr, host_page_size);
|
KVM: selftests: Add vcpu_arch_put_guest() to do writes from guest code
Introduce a macro, vcpu_arch_put_guest(), for "putting" values to memory
from guest code in "interesting" situations, e.g. when writing memory that
is being dirty logged. Structure the macro so that arch code can provide
a custom implementation, e.g. x86 will use the macro to force emulation of
the access.
Use the helper in dirty_log_test, which is of particular interest (see
above), and in xen_shinfo_test, which isn't all that interesting, but
provides a second usage of the macro with a different size operand
(uint8_t versus uint64_t), i.e. to help verify that the macro works for
more than just 64-bit values.
Use "put" as the verb to align with the kernel's {get,put}_user()
terminology.
Link: https://lore.kernel.org/r/20240314185459.2439072-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-03-14 11:54:57 -07:00
|
|
|
|
|
|
|
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
|
2025-01-10 16:30:00 -08:00
|
|
|
nr_writes++;
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
2018-09-18 19:54:32 +02:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
GUEST_SYNC(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 19:54:32 +02:00
|
|
|
/* Host variables */
|
|
|
|
static bool host_quit;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
|
|
|
/* Points to the test VM memory region on which we track dirty logs */
|
2018-09-18 19:54:32 +02:00
|
|
|
static void *host_test_mem;
|
|
|
|
static uint64_t host_num_pages;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
|
|
|
/* For statistics only */
|
2018-09-18 19:54:32 +02:00
|
|
|
static uint64_t host_dirty_count;
|
|
|
|
static uint64_t host_clear_count;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
/* Whether dirty ring reset is requested, or finished */
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
static sem_t sem_vcpu_stop;
|
|
|
|
static sem_t sem_vcpu_cont;
|
2025-01-10 16:29:54 -08:00
|
|
|
|
2020-09-30 21:22:39 -04:00
|
|
|
/*
|
|
|
|
* This is updated by the vcpu thread to tell the host whether it's a
|
|
|
|
* ring-full event. It should only be read until a sem_wait() of
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
* sem_vcpu_stop and before vcpu continues to run.
|
2020-09-30 21:22:39 -04:00
|
|
|
*/
|
|
|
|
static bool dirty_ring_vcpu_ring_full;
|
2025-01-10 16:29:45 -08:00
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
/*
|
|
|
|
* This is only used for verifying the dirty pages. Dirty ring has a very
|
|
|
|
* tricky case when the ring just got full, kvm will do userspace exit due to
|
|
|
|
* ring full. When that happens, the very last PFN is set but actually the
|
|
|
|
* data is not changed (the guest WRITE is not really applied yet), because
|
|
|
|
* we found that the dirty ring is full, refused to continue the vcpu, and
|
|
|
|
* recorded the dirty gfn with the old contents.
|
|
|
|
*
|
|
|
|
* For this specific case, it's safe to skip checking this pfn for this
|
|
|
|
* bit, because it's a redundant bit, and when the write happens later the bit
|
|
|
|
* will be set again. We use this variable to always keep track of the latest
|
|
|
|
* dirty gfn we've collected, so that if a mismatch of data found later in the
|
|
|
|
* verifying process, we let it pass.
|
|
|
|
*/
|
2025-01-10 16:29:45 -08:00
|
|
|
static uint64_t dirty_ring_last_page = -1ULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In addition to the above, it is possible (especially if this
|
|
|
|
* test is run nested) for the above scenario to repeat multiple times:
|
|
|
|
*
|
|
|
|
* The following can happen:
|
|
|
|
*
|
|
|
|
* - L1 vCPU: Memory write is logged to PML but not committed.
|
|
|
|
*
|
|
|
|
* - L1 test thread: Ignores the write because its last dirty ring entry
|
|
|
|
* Resets the dirty ring which:
|
|
|
|
* - Resets the A/D bits in EPT
|
|
|
|
* - Issues tlb flush (invept), which is intercepted by L0
|
|
|
|
*
|
|
|
|
* - L0: frees the whole nested ept mmu root as the response to invept,
|
|
|
|
* and thus ensures that when memory write is retried, it will fault again
|
|
|
|
*
|
|
|
|
* - L1 vCPU: Same memory write is logged to the PML but not committed again.
|
|
|
|
*
|
|
|
|
* - L1 test thread: Ignores the write because its last dirty ring entry (again)
|
|
|
|
* Resets the dirty ring which:
|
|
|
|
* - Resets the A/D bits in EPT (again)
|
|
|
|
* - Issues tlb flush (again) which is intercepted by L0
|
|
|
|
*
|
|
|
|
* ...
|
|
|
|
*
|
|
|
|
* N times
|
|
|
|
*
|
|
|
|
* - L1 vCPU: Memory write is logged in the PML and then committed.
|
|
|
|
* Lots of other memory writes are logged and committed.
|
|
|
|
* ...
|
|
|
|
*
|
|
|
|
* - L1 test thread: Sees the memory write along with other memory writes
|
|
|
|
* in the dirty ring, and since the write is usually not
|
|
|
|
* the last entry in the dirty-ring and has a very outdated
|
|
|
|
* iteration, the test fails.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Note that this is only possible when the write was the last log entry
|
|
|
|
* write during iteration N-1, thus remember last iteration last log entry
|
|
|
|
* and also don't fail when it is reported in the next iteration, together with
|
|
|
|
* an outdated iteration count.
|
|
|
|
*/
|
|
|
|
static uint64_t dirty_ring_prev_iteration_last_page;
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
enum log_mode_t {
|
|
|
|
/* Only use KVM_GET_DIRTY_LOG for logging */
|
|
|
|
LOG_MODE_DIRTY_LOG = 0,
|
|
|
|
|
|
|
|
/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
|
|
|
|
LOG_MODE_CLEAR_LOG = 1,
|
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
/* Use dirty ring for logging */
|
|
|
|
LOG_MODE_DIRTY_RING = 2,
|
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
LOG_MODE_NUM,
|
|
|
|
|
|
|
|
/* Run all supported modes */
|
|
|
|
LOG_MODE_ALL = LOG_MODE_NUM,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Mode of logging to test. Default is to run all supported modes */
|
|
|
|
static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
|
|
|
|
/* Logging mode for current run */
|
|
|
|
static enum log_mode_t host_log_mode;
|
2020-09-30 21:22:37 -04:00
|
|
|
static pthread_t vcpu_thread;
|
2020-09-30 21:22:41 -04:00
|
|
|
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
static bool clear_log_supported(void)
|
|
|
|
{
|
2022-05-27 15:13:03 -07:00
|
|
|
return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_log_create_vm_done(struct kvm_vm *vm)
|
|
|
|
{
|
|
|
|
u64 manual_caps;
|
|
|
|
|
|
|
|
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
|
|
|
|
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
|
|
|
|
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
|
|
|
|
KVM_DIRTY_LOG_INITIALLY_SET);
|
2022-06-02 13:19:09 -07:00
|
|
|
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
void *bitmap, uint32_t num_pages,
|
|
|
|
uint32_t *unused)
|
2020-09-30 21:22:33 -04:00
|
|
|
{
|
2022-02-15 17:40:19 -08:00
|
|
|
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
void *bitmap, uint32_t num_pages,
|
|
|
|
uint32_t *unused)
|
2020-09-30 21:22:33 -04:00
|
|
|
{
|
2022-02-15 17:40:19 -08:00
|
|
|
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
|
|
|
|
kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
|
|
|
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
/* Should only be called after a GUEST_SYNC */
|
|
|
|
static void vcpu_handle_sync_stop(void)
|
|
|
|
{
|
2025-01-10 16:29:54 -08:00
|
|
|
if (READ_ONCE(vcpu_stop)) {
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
sem_post(&sem_vcpu_stop);
|
2025-01-10 16:29:47 -08:00
|
|
|
sem_wait(&sem_vcpu_cont);
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-10 16:29:47 -08:00
|
|
|
static void default_after_vcpu_run(struct kvm_vcpu *vcpu)
|
2020-09-30 21:22:35 -04:00
|
|
|
{
|
2022-02-15 17:40:19 -08:00
|
|
|
struct kvm_run *run = vcpu->run;
|
2020-09-30 21:22:35 -04:00
|
|
|
|
2022-06-02 13:41:33 -07:00
|
|
|
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
|
2023-12-06 18:02:43 +01:00
|
|
|
"Invalid guest sync status: exit_reason=%s",
|
2020-09-30 21:22:35 -04:00
|
|
|
exit_reason_str(run->exit_reason));
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
|
|
|
|
vcpu_handle_sync_stop();
|
2020-09-30 21:22:35 -04:00
|
|
|
}
|
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
static bool dirty_ring_supported(void)
|
|
|
|
{
|
2022-09-26 15:51:20 +01:00
|
|
|
return (kvm_has_cap(KVM_CAP_DIRTY_LOG_RING) ||
|
|
|
|
kvm_has_cap(KVM_CAP_DIRTY_LOG_RING_ACQ_REL));
|
2020-09-30 21:22:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
|
|
|
|
{
|
2022-11-10 18:49:14 +08:00
|
|
|
uint64_t pages;
|
|
|
|
uint32_t limit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We rely on vcpu exit due to full dirty ring state. Adjust
|
|
|
|
* the ring buffer size to ensure we're able to reach the
|
|
|
|
* full dirty ring state.
|
|
|
|
*/
|
|
|
|
pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
|
|
|
|
pages = vm_adjust_num_guest_pages(vm->mode, pages);
|
|
|
|
if (vm->page_size < getpagesize())
|
|
|
|
pages = vm_num_host_pages(vm->mode, pages);
|
|
|
|
|
|
|
|
limit = 1 << (31 - __builtin_clz(pages));
|
|
|
|
test_dirty_ring_count = 1 << (31 - __builtin_clz(test_dirty_ring_count));
|
|
|
|
test_dirty_ring_count = min(limit, test_dirty_ring_count);
|
|
|
|
pr_info("dirty ring count: 0x%x\n", test_dirty_ring_count);
|
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
/*
|
|
|
|
* Switch to dirty ring mode after VM creation but before any
|
|
|
|
* of the vcpu creation.
|
|
|
|
*/
|
2020-09-30 21:22:41 -04:00
|
|
|
vm_enable_dirty_ring(vm, test_dirty_ring_count *
|
2020-09-30 21:22:37 -04:00
|
|
|
sizeof(struct kvm_dirty_gfn));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
|
|
|
|
{
|
2022-09-26 15:51:19 +01:00
|
|
|
return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
|
2020-09-30 21:22:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
|
|
|
|
{
|
2022-09-26 15:51:19 +01:00
|
|
|
smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
|
2020-09-30 21:22:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
|
|
|
|
int slot, void *bitmap,
|
|
|
|
uint32_t num_pages, uint32_t *fetch_index)
|
|
|
|
{
|
|
|
|
struct kvm_dirty_gfn *cur;
|
|
|
|
uint32_t count = 0;
|
|
|
|
|
|
|
|
while (true) {
|
2020-09-30 21:22:41 -04:00
|
|
|
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
|
2020-09-30 21:22:37 -04:00
|
|
|
if (!dirty_gfn_is_dirtied(cur))
|
|
|
|
break;
|
|
|
|
TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
|
|
|
|
"%u != %u", cur->slot, slot);
|
|
|
|
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
|
|
|
|
"0x%llx >= 0x%x", cur->offset, num_pages);
|
2022-11-19 01:34:47 +00:00
|
|
|
__set_bit_le(cur->offset, bitmap);
|
2020-09-30 21:22:37 -04:00
|
|
|
dirty_ring_last_page = cur->offset;
|
|
|
|
dirty_gfn_set_collected(cur);
|
|
|
|
(*fetch_index)++;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
void *bitmap, uint32_t num_pages,
|
|
|
|
uint32_t *ring_buf_idx)
|
2020-09-30 21:22:37 -04:00
|
|
|
{
|
2025-01-10 16:29:51 -08:00
|
|
|
uint32_t count, cleared;
|
2020-09-30 21:22:39 -04:00
|
|
|
|
2020-09-30 21:22:37 -04:00
|
|
|
/* Only have one vcpu */
|
2022-06-02 13:41:33 -07:00
|
|
|
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
slot, bitmap, num_pages,
|
|
|
|
ring_buf_idx);
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
|
2020-09-30 21:22:37 -04:00
|
|
|
|
KVM: selftests: Fix a semaphore imbalance in the dirty ring logging test
When finishing the final iteration of dirty_log_test testcase, set
host_quit _before_ the final "continue" so that the vCPU worker doesn't
run an extra iteration, and delete the hack-a-fix of an extra "continue"
from the dirty ring testcase. This fixes a bug where the extra post to
sem_vcpu_cont may not be consumed, which results in failures in subsequent
runs of the testcases. The bug likely was missed during development as
x86 supports only a single "guest mode", i.e. there aren't any subsequent
testcases after the dirty ring test, because for_each_guest_mode() only
runs a single iteration.
For the regular dirty log testcases, letting the vCPU run one extra
iteration is a non-issue as the vCPU worker waits on sem_vcpu_cont if and
only if the worker is explicitly told to stop (vcpu_sync_stop_requested).
But for the dirty ring test, which needs to periodically stop the vCPU to
reap the dirty ring, letting the vCPU resume the guest _after_ the last
iteration means the vCPU will get stuck without an extra "continue".
However, blindly firing off an post to sem_vcpu_cont isn't guaranteed to
be consumed, e.g. if the vCPU worker sees host_quit==true before resuming
the guest. This results in a dangling sem_vcpu_cont, which leads to
subsequent iterations getting out of sync, as the vCPU worker will
continue on before the main task is ready for it to resume the guest,
leading to a variety of asserts, e.g.
==== Test Assertion Failure ====
dirty_log_test.c:384: dirty_ring_vcpu_ring_full
pid=14854 tid=14854 errno=22 - Invalid argument
1 0x00000000004033eb: dirty_ring_collect_dirty_pages at dirty_log_test.c:384
2 0x0000000000402d27: log_mode_collect_dirty_pages at dirty_log_test.c:505
3 (inlined by) run_test at dirty_log_test.c:802
4 0x0000000000403dc7: for_each_guest_mode at guest_modes.c:100
5 0x0000000000401dff: main at dirty_log_test.c:941 (discriminator 3)
6 0x0000ffff9be173c7: ?? ??:0
7 0x0000ffff9be1749f: ?? ??:0
8 0x000000000040206f: _start at ??:?
Didn't continue vcpu even without ring full
Alternatively, the test could simply reset the semaphores before each
testcase, but papering over hacks with more hacks usually ends in tears.
Reported-by: Shaoqin Huang <shahuang@redhat.com>
Fixes: 84292e565951 ("KVM: selftests: Add dirty ring buffer test")
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Link: https://lore.kernel.org/r/20240202231831.354848-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-02-02 15:18:31 -08:00
|
|
|
/*
|
|
|
|
* Cleared pages should be the same as collected, as KVM is supposed to
|
|
|
|
* clear only the entries that have been harvested.
|
|
|
|
*/
|
2020-09-30 21:22:37 -04:00
|
|
|
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
|
|
|
|
"with collected (%u)", cleared, count);
|
|
|
|
}
|
|
|
|
|
2025-01-10 16:29:47 -08:00
|
|
|
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
|
2020-09-30 21:22:37 -04:00
|
|
|
{
|
2022-02-15 17:40:19 -08:00
|
|
|
struct kvm_run *run = vcpu->run;
|
2020-09-30 21:22:37 -04:00
|
|
|
|
|
|
|
/* A ucall-sync or ring-full event is allowed */
|
2022-06-02 13:41:33 -07:00
|
|
|
if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
|
2025-01-10 16:29:53 -08:00
|
|
|
vcpu_handle_sync_stop();
|
2025-01-10 16:29:47 -08:00
|
|
|
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
|
|
|
|
WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
|
2025-01-10 16:29:55 -08:00
|
|
|
vcpu_handle_sync_stop();
|
2020-09-30 21:22:37 -04:00
|
|
|
} else {
|
|
|
|
TEST_ASSERT(false, "Invalid guest sync status: "
|
2023-12-06 18:02:43 +01:00
|
|
|
"exit_reason=%s",
|
2020-09-30 21:22:37 -04:00
|
|
|
exit_reason_str(run->exit_reason));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
struct log_mode {
|
|
|
|
const char *name;
|
|
|
|
/* Return true if this mode is supported, otherwise false */
|
|
|
|
bool (*supported)(void);
|
|
|
|
/* Hook when the vm creation is done (before vcpu creation) */
|
|
|
|
void (*create_vm_done)(struct kvm_vm *vm);
|
|
|
|
/* Hook to collect the dirty pages into the bitmap provided */
|
2022-02-15 17:40:19 -08:00
|
|
|
void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
void *bitmap, uint32_t num_pages,
|
|
|
|
uint32_t *ring_buf_idx);
|
2020-09-30 21:22:35 -04:00
|
|
|
/* Hook to call when after each vcpu run */
|
2025-01-10 16:29:47 -08:00
|
|
|
void (*after_vcpu_run)(struct kvm_vcpu *vcpu);
|
2020-09-30 21:22:33 -04:00
|
|
|
} log_modes[LOG_MODE_NUM] = {
|
|
|
|
{
|
|
|
|
.name = "dirty-log",
|
|
|
|
.collect_dirty_pages = dirty_log_collect_dirty_pages,
|
2020-09-30 21:22:35 -04:00
|
|
|
.after_vcpu_run = default_after_vcpu_run,
|
2020-09-30 21:22:33 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "clear-log",
|
|
|
|
.supported = clear_log_supported,
|
|
|
|
.create_vm_done = clear_log_create_vm_done,
|
|
|
|
.collect_dirty_pages = clear_log_collect_dirty_pages,
|
2020-09-30 21:22:35 -04:00
|
|
|
.after_vcpu_run = default_after_vcpu_run,
|
2020-09-30 21:22:33 -04:00
|
|
|
},
|
2020-09-30 21:22:37 -04:00
|
|
|
{
|
|
|
|
.name = "dirty-ring",
|
|
|
|
.supported = dirty_ring_supported,
|
|
|
|
.create_vm_done = dirty_ring_create_vm_done,
|
|
|
|
.collect_dirty_pages = dirty_ring_collect_dirty_pages,
|
|
|
|
.after_vcpu_run = dirty_ring_after_vcpu_run,
|
|
|
|
},
|
2020-09-30 21:22:33 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void log_modes_dump(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
printf("all");
|
|
|
|
for (i = 0; i < LOG_MODE_NUM; i++)
|
|
|
|
printf(", %s", log_modes[i].name);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool log_mode_supported(void)
|
|
|
|
{
|
|
|
|
struct log_mode *mode = &log_modes[host_log_mode];
|
|
|
|
|
|
|
|
if (mode->supported)
|
|
|
|
return mode->supported();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void log_mode_create_vm_done(struct kvm_vm *vm)
|
|
|
|
{
|
|
|
|
struct log_mode *mode = &log_modes[host_log_mode];
|
|
|
|
|
|
|
|
if (mode->create_vm_done)
|
|
|
|
mode->create_vm_done(vm);
|
|
|
|
}
|
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
void *bitmap, uint32_t num_pages,
|
|
|
|
uint32_t *ring_buf_idx)
|
2020-09-30 21:22:33 -04:00
|
|
|
{
|
|
|
|
struct log_mode *mode = &log_modes[host_log_mode];
|
|
|
|
|
|
|
|
TEST_ASSERT(mode->collect_dirty_pages != NULL,
|
|
|
|
"collect_dirty_pages() is required for any log mode!");
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
|
|
|
|
2025-01-10 16:29:47 -08:00
|
|
|
static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu)
|
2020-09-30 21:22:35 -04:00
|
|
|
{
|
|
|
|
struct log_mode *mode = &log_modes[host_log_mode];
|
|
|
|
|
|
|
|
if (mode->after_vcpu_run)
|
2025-01-10 16:29:47 -08:00
|
|
|
mode->after_vcpu_run(vcpu);
|
2020-09-30 21:22:37 -04:00
|
|
|
}
|
|
|
|
|
2018-09-18 19:54:32 +02:00
|
|
|
static void *vcpu_worker(void *data)
|
2018-08-22 15:20:00 +08:00
|
|
|
{
|
2022-02-15 17:40:19 -08:00
|
|
|
struct kvm_vcpu *vcpu = data;
|
2020-09-30 21:22:39 -04:00
|
|
|
|
2025-01-10 16:30:02 -08:00
|
|
|
sem_wait(&sem_vcpu_cont);
|
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
while (!READ_ONCE(host_quit)) {
|
2018-09-18 19:54:32 +02:00
|
|
|
/* Let the guest dirty the random pages */
|
2025-01-10 16:29:47 -08:00
|
|
|
vcpu_run(vcpu);
|
|
|
|
log_mode_after_vcpu_run(vcpu);
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
|
2018-08-22 15:20:00 +08:00
|
|
|
{
|
KVM: selftests: Precisely track number of dirty/clear pages for each iteration
Track and print the number of dirty and clear pages for each iteration.
This provides parity between all log modes, and will allow collecting the
dirty ring multiple times per iteration without spamming the console.
Opportunistically drop the "Dirtied N pages" print, which is redundant
and wrong. For the dirty ring testcase, the vCPU isn't guaranteed to
complete a loop. And when the vCPU does complete a loot, there are no
guarantees that it has *dirtied* that many pages; because the writes are
to random address, the vCPU may have written the same page over and over,
i.e. only dirtied one page.
While the number of writes performed by the vCPU is also interesting,
e.g. the pr_info() could be tweaked to use different verbiage, pages_count
doesn't correctly track the number of writes either (because loops aren't
guaranteed to a complete). Delete the print for now, as a future patch
will precisely track the number of writes, at which point the verification
phase can report the number of writes performed by each iteration.
Link: https://lore.kernel.org/r/20250111003004.1235645-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:49 -08:00
|
|
|
uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0;
|
2020-02-14 15:59:20 +01:00
|
|
|
uint64_t step = vm_num_host_pages(mode, 1);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2018-09-18 19:54:34 +02:00
|
|
|
for (page = 0; page < host_num_pages; page += step) {
|
2025-01-10 16:29:50 -08:00
|
|
|
uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size);
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
/*
|
|
|
|
* Ensure both bitmaps are cleared, as a page can be written
|
|
|
|
* multiple times per iteration, i.e. can show up in both
|
|
|
|
* bitmaps, and the dirty ring is additive, i.e. doesn't purge
|
|
|
|
* bitmap entries from previous collections.
|
|
|
|
*/
|
|
|
|
if (__test_and_clear_bit_le(page, bmap[1]) || bmap0_dirty) {
|
KVM: selftests: Precisely track number of dirty/clear pages for each iteration
Track and print the number of dirty and clear pages for each iteration.
This provides parity between all log modes, and will allow collecting the
dirty ring multiple times per iteration without spamming the console.
Opportunistically drop the "Dirtied N pages" print, which is redundant
and wrong. For the dirty ring testcase, the vCPU isn't guaranteed to
complete a loop. And when the vCPU does complete a loot, there are no
guarantees that it has *dirtied* that many pages; because the writes are
to random address, the vCPU may have written the same page over and over,
i.e. only dirtied one page.
While the number of writes performed by the vCPU is also interesting,
e.g. the pr_info() could be tweaked to use different verbiage, pages_count
doesn't correctly track the number of writes either (because loops aren't
guaranteed to a complete). Delete the print for now, as a future patch
will precisely track the number of writes, at which point the verification
phase can report the number of writes performed by each iteration.
Link: https://lore.kernel.org/r/20250111003004.1235645-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:49 -08:00
|
|
|
nr_dirty_pages++;
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
/*
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
* If the page is dirty, the value written to memory
|
|
|
|
* should be the current iteration number.
|
2018-08-22 15:20:00 +08:00
|
|
|
*/
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
if (val == iteration)
|
2025-01-10 16:29:56 -08:00
|
|
|
continue;
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2025-01-10 16:29:56 -08:00
|
|
|
if (host_log_mode == LOG_MODE_DIRTY_RING) {
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
/*
|
2025-01-10 16:30:01 -08:00
|
|
|
* The last page in the ring from previous
|
|
|
|
* iteration can be written with the value
|
|
|
|
* from the previous iteration, as the value to
|
|
|
|
* be written may be cached in a CPU register.
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
*/
|
2025-01-10 16:30:01 -08:00
|
|
|
if (page == dirty_ring_prev_iteration_last_page &&
|
|
|
|
val == iteration - 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Any value from a previous iteration is legal
|
|
|
|
* for the last entry, as the write may not yet
|
|
|
|
* have retired, i.e. the page may hold whatever
|
|
|
|
* it had before this iteration started.
|
|
|
|
*/
|
|
|
|
if (page == dirty_ring_last_page &&
|
2025-01-10 16:29:59 -08:00
|
|
|
val < iteration)
|
2020-09-30 21:22:37 -04:00
|
|
|
continue;
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
} else if (!val && iteration == 1 && bmap0_dirty) {
|
|
|
|
/*
|
|
|
|
* When testing get+clear, the dirty bitmap
|
|
|
|
* starts with all bits set, and so the first
|
|
|
|
* iteration can observe a "dirty" page that
|
|
|
|
* was never written, but only in the first
|
|
|
|
* bitmap (collecting the bitmap also clears
|
|
|
|
* all dirty pages).
|
|
|
|
*/
|
|
|
|
continue;
|
2020-09-30 21:22:37 -04:00
|
|
|
}
|
|
|
|
|
2025-01-10 16:29:57 -08:00
|
|
|
TEST_FAIL("Dirty page %lu value (%lu) != iteration (%lu) "
|
|
|
|
"(last = %lu, prev_last = %lu)",
|
|
|
|
page, val, iteration, dirty_ring_last_page,
|
|
|
|
dirty_ring_prev_iteration_last_page);
|
2018-08-22 15:20:00 +08:00
|
|
|
} else {
|
KVM: selftests: Precisely track number of dirty/clear pages for each iteration
Track and print the number of dirty and clear pages for each iteration.
This provides parity between all log modes, and will allow collecting the
dirty ring multiple times per iteration without spamming the console.
Opportunistically drop the "Dirtied N pages" print, which is redundant
and wrong. For the dirty ring testcase, the vCPU isn't guaranteed to
complete a loop. And when the vCPU does complete a loot, there are no
guarantees that it has *dirtied* that many pages; because the writes are
to random address, the vCPU may have written the same page over and over,
i.e. only dirtied one page.
While the number of writes performed by the vCPU is also interesting,
e.g. the pr_info() could be tweaked to use different verbiage, pages_count
doesn't correctly track the number of writes either (because loops aren't
guaranteed to a complete). Delete the print for now, as a future patch
will precisely track the number of writes, at which point the verification
phase can report the number of writes performed by each iteration.
Link: https://lore.kernel.org/r/20250111003004.1235645-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:49 -08:00
|
|
|
nr_clean_pages++;
|
2018-08-22 15:20:00 +08:00
|
|
|
/*
|
|
|
|
* If cleared, the value written can be any
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
* value smaller than the iteration number.
|
2018-08-22 15:20:00 +08:00
|
|
|
*/
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
TEST_ASSERT(val < iteration,
|
|
|
|
"Clear page %lu value (%lu) >= iteration (%lu) "
|
2025-01-10 16:29:57 -08:00
|
|
|
"(last = %lu, prev_last = %lu)",
|
|
|
|
page, val, iteration, dirty_ring_last_page,
|
|
|
|
dirty_ring_prev_iteration_last_page);
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
|
|
|
}
|
KVM: selftests: Precisely track number of dirty/clear pages for each iteration
Track and print the number of dirty and clear pages for each iteration.
This provides parity between all log modes, and will allow collecting the
dirty ring multiple times per iteration without spamming the console.
Opportunistically drop the "Dirtied N pages" print, which is redundant
and wrong. For the dirty ring testcase, the vCPU isn't guaranteed to
complete a loop. And when the vCPU does complete a loot, there are no
guarantees that it has *dirtied* that many pages; because the writes are
to random address, the vCPU may have written the same page over and over,
i.e. only dirtied one page.
While the number of writes performed by the vCPU is also interesting,
e.g. the pr_info() could be tweaked to use different verbiage, pages_count
doesn't correctly track the number of writes either (because loops aren't
guaranteed to a complete). Delete the print for now, as a future patch
will precisely track the number of writes, at which point the verification
phase can report the number of writes performed by each iteration.
Link: https://lore.kernel.org/r/20250111003004.1235645-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:49 -08:00
|
|
|
|
2025-01-10 16:30:00 -08:00
|
|
|
pr_info("Iteration %2ld: dirty: %-6lu clean: %-6lu writes: %-6lu\n",
|
|
|
|
iteration, nr_dirty_pages, nr_clean_pages, nr_writes);
|
KVM: selftests: Precisely track number of dirty/clear pages for each iteration
Track and print the number of dirty and clear pages for each iteration.
This provides parity between all log modes, and will allow collecting the
dirty ring multiple times per iteration without spamming the console.
Opportunistically drop the "Dirtied N pages" print, which is redundant
and wrong. For the dirty ring testcase, the vCPU isn't guaranteed to
complete a loop. And when the vCPU does complete a loot, there are no
guarantees that it has *dirtied* that many pages; because the writes are
to random address, the vCPU may have written the same page over and over,
i.e. only dirtied one page.
While the number of writes performed by the vCPU is also interesting,
e.g. the pr_info() could be tweaked to use different verbiage, pages_count
doesn't correctly track the number of writes either (because loops aren't
guaranteed to a complete). Delete the print for now, as a future patch
will precisely track the number of writes, at which point the verification
phase can report the number of writes performed by each iteration.
Link: https://lore.kernel.org/r/20250111003004.1235645-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:49 -08:00
|
|
|
|
|
|
|
host_dirty_count += nr_dirty_pages;
|
|
|
|
host_clear_count += nr_clean_pages;
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
|
|
|
|
2022-02-16 16:44:34 -08:00
|
|
|
static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
|
2019-08-30 09:36:16 +08:00
|
|
|
uint64_t extra_mem_pages, void *guest_code)
|
2018-08-22 15:20:00 +08:00
|
|
|
{
|
2018-09-18 19:54:34 +02:00
|
|
|
struct kvm_vm *vm;
|
|
|
|
|
2020-02-14 15:59:16 +01:00
|
|
|
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
|
|
|
|
2023-10-27 11:22:11 -07:00
|
|
|
vm = __vm_create(VM_SHAPE(mode), 1, extra_mem_pages);
|
2022-04-18 13:02:55 -07:00
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
log_mode_create_vm_done(vm);
|
2022-02-16 16:44:34 -08:00
|
|
|
*vcpu = vm_vcpu_add(vm, 0, guest_code);
|
2018-09-18 19:54:34 +02:00
|
|
|
return vm;
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
|
|
|
|
2020-12-18 15:17:32 +01:00
|
|
|
struct test_params {
|
|
|
|
unsigned long iterations;
|
|
|
|
unsigned long interval;
|
|
|
|
uint64_t phys_offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void run_test(enum vm_guest_mode mode, void *arg)
|
2018-08-22 15:20:00 +08:00
|
|
|
{
|
2020-12-18 15:17:32 +01:00
|
|
|
struct test_params *p = arg;
|
2022-02-15 17:40:19 -08:00
|
|
|
struct kvm_vcpu *vcpu;
|
2018-08-22 15:20:00 +08:00
|
|
|
struct kvm_vm *vm;
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
unsigned long *bmap[2];
|
KVM: selftests: Clear dirty ring states between two modes in dirty_log_test
There are two states, which need to be cleared before next mode
is executed. Otherwise, we will hit failure as the following messages
indicate.
- The variable 'dirty_ring_vcpu_ring_full' shared by main and vcpu
thread. It's indicating if the vcpu exit due to full ring buffer.
The value can be carried from previous mode (VM_MODE_P40V48_4K) to
current one (VM_MODE_P40V48_64K) when VM_MODE_P40V48_16K isn't
supported.
- The current ring buffer index needs to be reset before next mode
(VM_MODE_P40V48_64K) is executed. Otherwise, the stale value is
carried from previous mode (VM_MODE_P40V48_4K).
# ./dirty_log_test -M dirty-ring
Setting log mode to: 'dirty-ring'
Test iterations: 32, interval: 10 (ms)
Testing guest mode: PA-bits:40, VA-bits:48, 4K pages
guest physical test memory offset: 0xffbfffc000
:
Dirtied 995328 pages
Total bits checked: dirty (1012434), clear (7114123), track_next (966700)
Testing guest mode: PA-bits:40, VA-bits:48, 64K pages
guest physical test memory offset: 0xffbffc0000
vcpu stops because vcpu is kicked out...
vcpu continues now.
Notifying vcpu to continue
Iteration 1 collected 0 pages
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
vcpu continues now.
vcpu stops because dirty ring is full...
==== Test Assertion Failure ====
dirty_log_test.c:369: cleared == count
pid=10541 tid=10541 errno=22 - Invalid argument
1 0x0000000000403087: dirty_ring_collect_dirty_pages at dirty_log_test.c:369
2 0x0000000000402a0b: log_mode_collect_dirty_pages at dirty_log_test.c:492
3 (inlined by) run_test at dirty_log_test.c:795
4 (inlined by) run_test at dirty_log_test.c:705
5 0x0000000000403a37: for_each_guest_mode at guest_modes.c:100
6 0x0000000000401ccf: main at dirty_log_test.c:938
7 0x0000ffff9ecd279b: ?? ??:0
8 0x0000ffff9ecd286b: ?? ??:0
9 0x0000000000401def: _start at ??:?
Reset dirty pages (0) mismatch with collected (35566)
Fix the issues by clearing 'dirty_ring_vcpu_ring_full' and the ring
buffer index before next new mode is to be executed.
Signed-off-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110104914.31280-7-gshan@redhat.com
2022-11-10 18:49:13 +08:00
|
|
|
uint32_t ring_buf_idx = 0;
|
KVM: selftests: Fix a semaphore imbalance in the dirty ring logging test
When finishing the final iteration of dirty_log_test testcase, set
host_quit _before_ the final "continue" so that the vCPU worker doesn't
run an extra iteration, and delete the hack-a-fix of an extra "continue"
from the dirty ring testcase. This fixes a bug where the extra post to
sem_vcpu_cont may not be consumed, which results in failures in subsequent
runs of the testcases. The bug likely was missed during development as
x86 supports only a single "guest mode", i.e. there aren't any subsequent
testcases after the dirty ring test, because for_each_guest_mode() only
runs a single iteration.
For the regular dirty log testcases, letting the vCPU run one extra
iteration is a non-issue as the vCPU worker waits on sem_vcpu_cont if and
only if the worker is explicitly told to stop (vcpu_sync_stop_requested).
But for the dirty ring test, which needs to periodically stop the vCPU to
reap the dirty ring, letting the vCPU resume the guest _after_ the last
iteration means the vCPU will get stuck without an extra "continue".
However, blindly firing off an post to sem_vcpu_cont isn't guaranteed to
be consumed, e.g. if the vCPU worker sees host_quit==true before resuming
the guest. This results in a dangling sem_vcpu_cont, which leads to
subsequent iterations getting out of sync, as the vCPU worker will
continue on before the main task is ready for it to resume the guest,
leading to a variety of asserts, e.g.
==== Test Assertion Failure ====
dirty_log_test.c:384: dirty_ring_vcpu_ring_full
pid=14854 tid=14854 errno=22 - Invalid argument
1 0x00000000004033eb: dirty_ring_collect_dirty_pages at dirty_log_test.c:384
2 0x0000000000402d27: log_mode_collect_dirty_pages at dirty_log_test.c:505
3 (inlined by) run_test at dirty_log_test.c:802
4 0x0000000000403dc7: for_each_guest_mode at guest_modes.c:100
5 0x0000000000401dff: main at dirty_log_test.c:941 (discriminator 3)
6 0x0000ffff9be173c7: ?? ??:0
7 0x0000ffff9be1749f: ?? ??:0
8 0x000000000040206f: _start at ??:?
Didn't continue vcpu even without ring full
Alternatively, the test could simply reset the semaphores before each
testcase, but papering over hacks with more hacks usually ends in tears.
Reported-by: Shaoqin Huang <shahuang@redhat.com>
Fixes: 84292e565951 ("KVM: selftests: Add dirty ring buffer test")
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Link: https://lore.kernel.org/r/20240202231831.354848-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-02-02 15:18:31 -08:00
|
|
|
int sem_val;
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2020-09-30 21:22:33 -04:00
|
|
|
if (!log_mode_supported()) {
|
|
|
|
print_skip("Log mode '%s' not supported",
|
|
|
|
log_modes[host_log_mode].name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-30 09:36:17 +08:00
|
|
|
/*
|
|
|
|
* We reserve page table for 2 times of extra dirty mem which
|
|
|
|
* will definitely cover the original (1G+) test range. Here
|
|
|
|
* we do the calculation with 4K page size which is the
|
|
|
|
* smallest so the page number will be enough for all archs
|
|
|
|
* (e.g., 64K page size guest will need even less memory for
|
|
|
|
* page tables).
|
|
|
|
*/
|
2022-02-16 16:44:34 -08:00
|
|
|
vm = create_vm(mode, &vcpu,
|
|
|
|
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
|
2019-08-30 09:36:17 +08:00
|
|
|
|
2022-02-16 16:51:20 -08:00
|
|
|
guest_page_size = vm->page_size;
|
2019-04-17 15:28:44 +02:00
|
|
|
/*
|
|
|
|
* A little more than 1G of guest page sized pages. Cover the
|
|
|
|
* case where the size is not aligned to 64 pages.
|
|
|
|
*/
|
2022-02-16 16:51:20 -08:00
|
|
|
guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
|
2020-02-14 15:59:20 +01:00
|
|
|
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
|
2020-03-12 11:40:55 +01:00
|
|
|
|
2018-09-18 19:54:32 +02:00
|
|
|
host_page_size = getpagesize();
|
2020-02-14 15:59:20 +01:00
|
|
|
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
|
2018-09-18 19:54:32 +02:00
|
|
|
|
2020-12-18 15:17:32 +01:00
|
|
|
if (!p->phys_offset) {
|
2022-02-16 16:51:20 -08:00
|
|
|
guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
|
|
|
|
guest_page_size;
|
2021-11-11 00:03:00 +00:00
|
|
|
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
|
2018-11-06 14:57:09 +01:00
|
|
|
} else {
|
2020-12-18 15:17:32 +01:00
|
|
|
guest_test_phys_mem = p->phys_offset;
|
2018-09-18 19:54:36 +02:00
|
|
|
}
|
|
|
|
|
2019-07-31 17:15:25 +02:00
|
|
|
#ifdef __s390x__
|
|
|
|
/* Align to 1M (segment size) */
|
2021-11-11 00:03:00 +00:00
|
|
|
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
|
2025-01-10 16:29:53 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The workaround in guest_code() to write all pages prior to the first
|
|
|
|
* iteration isn't compatible with the dirty ring, as the dirty ring
|
|
|
|
* support relies on the vCPU to actually stop when vcpu_stop is set so
|
|
|
|
* that the vCPU doesn't hang waiting for the dirty ring to be emptied.
|
|
|
|
*/
|
|
|
|
TEST_ASSERT(host_log_mode != LOG_MODE_DIRTY_RING,
|
|
|
|
"Test needs to be updated to support s390 dirty ring");
|
2019-07-31 17:15:25 +02:00
|
|
|
#endif
|
|
|
|
|
2020-02-14 15:59:16 +01:00
|
|
|
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
|
2018-09-18 19:54:36 +02:00
|
|
|
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
bmap[0] = bitmap_zalloc(host_num_pages);
|
|
|
|
bmap[1] = bitmap_zalloc(host_num_pages);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
|
|
|
/* Add an extra memory slot for testing dirty logging */
|
|
|
|
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
|
2018-11-06 14:57:07 +01:00
|
|
|
guest_test_phys_mem,
|
2018-08-22 15:20:00 +08:00
|
|
|
TEST_MEM_SLOT_INDEX,
|
2018-09-18 19:54:34 +02:00
|
|
|
guest_num_pages,
|
2018-08-22 15:20:00 +08:00
|
|
|
KVM_MEM_LOG_DIRTY_PAGES);
|
|
|
|
|
2018-11-06 14:57:07 +01:00
|
|
|
/* Do mapping for the dirty track memory slot */
|
2021-06-22 13:05:22 -07:00
|
|
|
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
|
2018-09-18 19:54:32 +02:00
|
|
|
|
|
|
|
/* Cache the HVA pointer of the region */
|
2018-11-06 14:57:07 +01:00
|
|
|
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2018-09-18 19:54:34 +02:00
|
|
|
/* Export the shared variables to the guest */
|
2018-09-18 19:54:32 +02:00
|
|
|
sync_global_to_guest(vm, host_page_size);
|
|
|
|
sync_global_to_guest(vm, guest_page_size);
|
2018-11-06 14:57:07 +01:00
|
|
|
sync_global_to_guest(vm, guest_test_virt_mem);
|
2018-09-18 19:54:34 +02:00
|
|
|
sync_global_to_guest(vm, guest_num_pages);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2018-09-18 19:54:34 +02:00
|
|
|
host_dirty_count = 0;
|
|
|
|
host_clear_count = 0;
|
2025-01-10 16:30:02 -08:00
|
|
|
WRITE_ONCE(host_quit, false);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
KVM: selftests: Fix a semaphore imbalance in the dirty ring logging test
When finishing the final iteration of dirty_log_test testcase, set
host_quit _before_ the final "continue" so that the vCPU worker doesn't
run an extra iteration, and delete the hack-a-fix of an extra "continue"
from the dirty ring testcase. This fixes a bug where the extra post to
sem_vcpu_cont may not be consumed, which results in failures in subsequent
runs of the testcases. The bug likely was missed during development as
x86 supports only a single "guest mode", i.e. there aren't any subsequent
testcases after the dirty ring test, because for_each_guest_mode() only
runs a single iteration.
For the regular dirty log testcases, letting the vCPU run one extra
iteration is a non-issue as the vCPU worker waits on sem_vcpu_cont if and
only if the worker is explicitly told to stop (vcpu_sync_stop_requested).
But for the dirty ring test, which needs to periodically stop the vCPU to
reap the dirty ring, letting the vCPU resume the guest _after_ the last
iteration means the vCPU will get stuck without an extra "continue".
However, blindly firing off an post to sem_vcpu_cont isn't guaranteed to
be consumed, e.g. if the vCPU worker sees host_quit==true before resuming
the guest. This results in a dangling sem_vcpu_cont, which leads to
subsequent iterations getting out of sync, as the vCPU worker will
continue on before the main task is ready for it to resume the guest,
leading to a variety of asserts, e.g.
==== Test Assertion Failure ====
dirty_log_test.c:384: dirty_ring_vcpu_ring_full
pid=14854 tid=14854 errno=22 - Invalid argument
1 0x00000000004033eb: dirty_ring_collect_dirty_pages at dirty_log_test.c:384
2 0x0000000000402d27: log_mode_collect_dirty_pages at dirty_log_test.c:505
3 (inlined by) run_test at dirty_log_test.c:802
4 0x0000000000403dc7: for_each_guest_mode at guest_modes.c:100
5 0x0000000000401dff: main at dirty_log_test.c:941 (discriminator 3)
6 0x0000ffff9be173c7: ?? ??:0
7 0x0000ffff9be1749f: ?? ??:0
8 0x000000000040206f: _start at ??:?
Didn't continue vcpu even without ring full
Alternatively, the test could simply reset the semaphores before each
testcase, but papering over hacks with more hacks usually ends in tears.
Reported-by: Shaoqin Huang <shahuang@redhat.com>
Fixes: 84292e565951 ("KVM: selftests: Add dirty ring buffer test")
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Link: https://lore.kernel.org/r/20240202231831.354848-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-02-02 15:18:31 -08:00
|
|
|
/*
|
|
|
|
* Ensure the previous iteration didn't leave a dangling semaphore, i.e.
|
|
|
|
* that the main task and vCPU worker were synchronized and completed
|
|
|
|
* verification of all iterations.
|
|
|
|
*/
|
|
|
|
sem_getvalue(&sem_vcpu_stop, &sem_val);
|
|
|
|
TEST_ASSERT_EQ(sem_val, 0);
|
|
|
|
sem_getvalue(&sem_vcpu_cont, &sem_val);
|
|
|
|
TEST_ASSERT_EQ(sem_val, 0);
|
|
|
|
|
2025-01-10 16:30:02 -08:00
|
|
|
TEST_ASSERT_EQ(vcpu_stop, false);
|
|
|
|
|
2022-02-15 17:40:19 -08:00
|
|
|
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
2025-01-10 16:30:03 -08:00
|
|
|
for (iteration = 1; iteration <= p->iterations; iteration++) {
|
2025-01-10 16:29:51 -08:00
|
|
|
unsigned long i;
|
|
|
|
|
2025-01-10 16:30:02 -08:00
|
|
|
sync_global_to_guest(vm, iteration);
|
|
|
|
|
|
|
|
WRITE_ONCE(nr_writes, 0);
|
|
|
|
sync_global_to_guest(vm, nr_writes);
|
|
|
|
|
2025-01-10 16:29:51 -08:00
|
|
|
dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
|
2025-01-10 16:30:02 -08:00
|
|
|
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
|
|
|
|
|
|
|
|
sem_post(&sem_vcpu_cont);
|
2025-01-10 16:29:51 -08:00
|
|
|
|
2025-01-10 16:30:00 -08:00
|
|
|
/*
|
|
|
|
* Let the vCPU run beyond the configured interval until it has
|
|
|
|
* performed the minimum number of writes. This verifies the
|
|
|
|
* guest is making forward progress, e.g. isn't stuck because
|
|
|
|
* of a KVM bug, and puts a firm floor on test coverage.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < p->interval || nr_writes < TEST_MIN_WRITES_PER_ITERATION; i++) {
|
|
|
|
/*
|
|
|
|
* Sleep in 1ms chunks to keep the interval math simple
|
|
|
|
* and so that the test doesn't run too far beyond the
|
|
|
|
* specified interval.
|
|
|
|
*/
|
2025-01-10 16:29:51 -08:00
|
|
|
usleep(1000);
|
|
|
|
|
2025-01-10 16:30:00 -08:00
|
|
|
sync_global_from_guest(vm, nr_writes);
|
|
|
|
|
2025-01-10 16:29:51 -08:00
|
|
|
/*
|
|
|
|
* Reap dirty pages while the guest is running so that
|
|
|
|
* dirty ring full events are resolved, i.e. so that a
|
|
|
|
* larger interval doesn't always end up with a vCPU
|
|
|
|
* that's effectively blocked. Collecting while the
|
|
|
|
* guest is running also verifies KVM doesn't lose any
|
|
|
|
* state.
|
|
|
|
*
|
|
|
|
* For bitmap modes, KVM overwrites the entire bitmap,
|
|
|
|
* i.e. collecting the bitmaps is destructive. Collect
|
|
|
|
* the bitmap only on the first pass, otherwise this
|
|
|
|
* test would lose track of dirty pages.
|
|
|
|
*/
|
|
|
|
if (i && host_log_mode != LOG_MODE_DIRTY_RING)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the dirty ring, empty the ring on subsequent
|
|
|
|
* passes only if the ring was filled at least once,
|
|
|
|
* to verify KVM's handling of a full ring (emptying
|
|
|
|
* the ring on every pass would make it unlikely the
|
|
|
|
* vCPU would ever fill the fing).
|
|
|
|
*/
|
2025-01-10 16:29:55 -08:00
|
|
|
if (i && !READ_ONCE(dirty_ring_vcpu_ring_full))
|
2025-01-10 16:29:51 -08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
bmap[0], host_num_pages,
|
2025-01-10 16:29:51 -08:00
|
|
|
&ring_buf_idx);
|
|
|
|
}
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
|
|
|
|
/*
|
2025-01-10 16:29:54 -08:00
|
|
|
* Stop the vCPU prior to collecting and verifying the dirty
|
|
|
|
* log. If the vCPU is allowed to run during collection, then
|
|
|
|
* pages that are written during this iteration may be missed,
|
|
|
|
* i.e. collected in the next iteration. And if the vCPU is
|
|
|
|
* writing memory during verification, pages that this thread
|
|
|
|
* sees as clean may be written with this iteration's value.
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
*/
|
2025-01-10 16:29:54 -08:00
|
|
|
WRITE_ONCE(vcpu_stop, true);
|
|
|
|
sync_global_to_guest(vm, vcpu_stop);
|
2025-01-10 16:29:47 -08:00
|
|
|
sem_wait(&sem_vcpu_stop);
|
2025-01-10 16:29:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear vcpu_stop after the vCPU thread has acknowledge the
|
|
|
|
* stop request and is waiting, i.e. is definitely not running!
|
|
|
|
*/
|
|
|
|
WRITE_ONCE(vcpu_stop, false);
|
|
|
|
sync_global_to_guest(vm, vcpu_stop);
|
|
|
|
|
2025-01-10 16:30:00 -08:00
|
|
|
/*
|
|
|
|
* Sync the number of writes performed before verification, the
|
|
|
|
* info will be printed along with the dirty/clean page counts.
|
|
|
|
*/
|
|
|
|
sync_global_from_guest(vm, nr_writes);
|
|
|
|
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
/*
|
|
|
|
* NOTE: for dirty ring, it's possible that we didn't stop at
|
|
|
|
* GUEST_SYNC but instead we stopped because ring is full;
|
|
|
|
* that's okay too because ring full means we're only missing
|
|
|
|
* the flush of the last page, and since we handle the last
|
|
|
|
* page specially verification will succeed anyway.
|
|
|
|
*/
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
|
|
|
|
bmap[1], host_num_pages,
|
|
|
|
&ring_buf_idx);
|
2020-02-14 15:59:20 +01:00
|
|
|
vm_dirty_log_verify(mode, bmap);
|
2018-08-22 15:20:00 +08:00
|
|
|
}
|
|
|
|
|
2025-01-10 16:30:02 -08:00
|
|
|
WRITE_ONCE(host_quit, true);
|
|
|
|
sem_post(&sem_vcpu_cont);
|
|
|
|
|
2018-08-22 15:20:00 +08:00
|
|
|
pthread_join(vcpu_thread, NULL);
|
|
|
|
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
pr_info("Total bits checked: dirty (%lu), clear (%lu)\n",
|
|
|
|
host_dirty_count, host_clear_count);
|
2018-08-22 15:20:00 +08:00
|
|
|
|
KVM: selftests: Collect *all* dirty entries in each dirty_log_test iteration
Collect all dirty entries during each iteration of dirty_log_test by
doing a final collection after the vCPU has been stopped. To deal with
KVM's destructive approach to getting the dirty bitmaps, use a second
bitmap for the post-stop collection.
Collecting all entries that were dirtied during an iteration simplifies
the verification logic *and* improves test coverage.
- If a page is written during iteration X, but not seen as dirty until
X+1, the test can get a false pass if the page is also written during
X+1.
- If a dirty page used a stale value from a previous iteration, the test
would grant a false pass.
- If a missed dirty log occurs in the last iteration, the test would fail
to detect the issue.
E.g. modifying mark_page_dirty_in_slot() to dirty an unwritten gfn:
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (!vcpu->extra_dirty &&
gfn_to_memslot(kvm, gfn + 1) == memslot) {
vcpu->extra_dirty = true;
mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
}
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
isn't detected with the current approach, even with an interval of 1ms
(when running nested in a VM; bare metal would be even *less* likely to
detect the bug due to the vCPU being able to dirty more memory). Whereas
collecting all dirty entries consistently detects failures with an
interval of 700ms or more (the longer interval means a higher probability
of an actual write to the prematurely-dirtied page).
Link: https://lore.kernel.org/r/20250111003004.1235645-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-01-10 16:29:58 -08:00
|
|
|
free(bmap[0]);
|
|
|
|
free(bmap[1]);
|
2018-08-22 15:20:00 +08:00
|
|
|
kvm_vm_free(vm);
|
2018-09-18 19:54:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void help(char *name)
|
|
|
|
{
|
|
|
|
puts("");
|
2018-09-18 19:54:36 +02:00
|
|
|
printf("usage: %s [-h] [-i iterations] [-I interval] "
|
2018-11-06 14:57:08 +01:00
|
|
|
"[-p offset] [-m mode]\n", name);
|
2018-09-18 19:54:34 +02:00
|
|
|
puts("");
|
2022-11-10 18:49:14 +08:00
|
|
|
printf(" -c: hint to dirty ring size, in number of entries\n");
|
2020-09-30 21:22:41 -04:00
|
|
|
printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
|
|
|
|
TEST_DIRTY_RING_COUNT);
|
2018-09-18 19:54:34 +02:00
|
|
|
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
|
|
|
|
TEST_HOST_LOOP_N);
|
|
|
|
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
|
|
|
|
TEST_HOST_LOOP_INTERVAL);
|
2018-11-06 14:57:08 +01:00
|
|
|
printf(" -p: specify guest physical test memory offset\n"
|
|
|
|
" Warning: a low offset can conflict with the loaded test code.\n");
|
2020-09-30 21:22:33 -04:00
|
|
|
printf(" -M: specify the host logging mode "
|
|
|
|
"(default: run all log modes). Supported modes: \n\t");
|
|
|
|
log_modes_dump();
|
2020-12-18 15:17:32 +01:00
|
|
|
guest_modes_help();
|
2018-09-18 19:54:34 +02:00
|
|
|
puts("");
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
2020-12-18 15:17:32 +01:00
|
|
|
struct test_params p = {
|
|
|
|
.iterations = TEST_HOST_LOOP_N,
|
|
|
|
.interval = TEST_HOST_LOOP_INTERVAL,
|
|
|
|
};
|
|
|
|
int opt, i;
|
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 02:36:47 +02:00
|
|
|
|
KVM: selftests: Sync data verify of dirty logging with guest sync
This fixes a bug that can trigger with e.g. "taskset -c 0 ./dirty_log_test" or
when the testing host is very busy.
A similar previous attempt is done [1] but that is not enough, the reason is
stated in the reply [2].
As a summary (partly quotting from [2]):
The problem is I think one guest memory write operation (of this specific test)
contains a few micro-steps when page is during kvm dirty tracking (here I'm
only considering write-protect rather than pml but pml should be similar at
least when the log buffer is full):
(1) Guest read 'iteration' number into register, prepare to write, page fault
(2) Set dirty bit in either dirty bitmap or dirty ring
(3) Return to guest, data written
When we verify the data, we assumed that all these steps are "atomic", say,
when (1) happened for this page, we assume (2) & (3) must have happened. We
had some trick to workaround "un-atomicity" of above three steps, as previous
version of this patch wanted to fix atomicity of step (2)+(3) by explicitly
letting the main thread wait for at least one vmenter of vcpu thread, which
should work. However what I overlooked is probably that we still have race
when (1) and (2) can be interrupted.
One example calltrace when it could happen that we read an old interation, got
interrupted before even setting the dirty bit and flushing data:
__schedule+1742
__cond_resched+52
__get_user_pages+530
get_user_pages_unlocked+197
hva_to_pfn+206
try_async_pf+132
direct_page_fault+320
kvm_mmu_page_fault+103
vmx_handle_exit+288
vcpu_enter_guest+2460
kvm_arch_vcpu_ioctl_run+325
kvm_vcpu_ioctl+526
__x64_sys_ioctl+131
do_syscall_64+51
entry_SYSCALL_64_after_hwframe+68
It means iteration number cached in vcpu register can be very old when dirty
bit set and data flushed.
So far I don't see an easy way to guarantee all steps 1-3 atomicity but to sync
at the GUEST_SYNC() point of guest code when we do verification of the dirty
bits as what this patch does.
[1] https://lore.kernel.org/lkml/20210413213641.23742-1-peterx@redhat.com/
[2] https://lore.kernel.org/lkml/20210417140956.GV4440@xz-x1/
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <drjones@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20210417143602.215059-2-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-17 10:36:01 -04:00
|
|
|
sem_init(&sem_vcpu_stop, 0, 0);
|
|
|
|
sem_init(&sem_vcpu_cont, 0, 0);
|
2020-09-30 21:22:37 -04:00
|
|
|
|
2020-12-18 15:17:32 +01:00
|
|
|
guest_modes_append_default();
|
2018-11-06 14:57:10 +01:00
|
|
|
|
2020-09-30 21:22:41 -04:00
|
|
|
while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
|
2018-09-18 19:54:34 +02:00
|
|
|
switch (opt) {
|
2020-09-30 21:22:41 -04:00
|
|
|
case 'c':
|
|
|
|
test_dirty_ring_count = strtol(optarg, NULL, 10);
|
|
|
|
break;
|
2018-09-18 19:54:34 +02:00
|
|
|
case 'i':
|
2020-12-18 15:17:32 +01:00
|
|
|
p.iterations = strtol(optarg, NULL, 10);
|
2018-09-18 19:54:34 +02:00
|
|
|
break;
|
|
|
|
case 'I':
|
2020-12-18 15:17:32 +01:00
|
|
|
p.interval = strtol(optarg, NULL, 10);
|
2018-09-18 19:54:34 +02:00
|
|
|
break;
|
2018-11-06 14:57:08 +01:00
|
|
|
case 'p':
|
2020-12-18 15:17:32 +01:00
|
|
|
p.phys_offset = strtoull(optarg, NULL, 0);
|
2018-09-18 19:54:36 +02:00
|
|
|
break;
|
2018-09-18 19:54:34 +02:00
|
|
|
case 'm':
|
2020-12-18 15:17:32 +01:00
|
|
|
guest_modes_cmdline(optarg);
|
2018-09-18 19:54:34 +02:00
|
|
|
break;
|
2020-09-30 21:22:33 -04:00
|
|
|
case 'M':
|
|
|
|
if (!strcmp(optarg, "all")) {
|
|
|
|
host_log_mode_option = LOG_MODE_ALL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (i = 0; i < LOG_MODE_NUM; i++) {
|
|
|
|
if (!strcmp(optarg, log_modes[i].name)) {
|
|
|
|
pr_info("Setting log mode to: '%s'\n",
|
|
|
|
optarg);
|
|
|
|
host_log_mode_option = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i == LOG_MODE_NUM) {
|
|
|
|
printf("Log mode '%s' invalid. Please choose "
|
|
|
|
"from: ", optarg);
|
|
|
|
log_modes_dump();
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
break;
|
2018-09-18 19:54:34 +02:00
|
|
|
case 'h':
|
|
|
|
default:
|
|
|
|
help(argv[0]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-10 16:30:04 -08:00
|
|
|
TEST_ASSERT(p.iterations > 0, "Iterations must be greater than zero");
|
2020-12-18 15:17:32 +01:00
|
|
|
TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
|
2018-09-18 19:54:34 +02:00
|
|
|
|
2020-02-14 15:59:16 +01:00
|
|
|
pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
|
2020-12-18 15:17:32 +01:00
|
|
|
p.iterations, p.interval);
|
2018-09-18 19:54:34 +02:00
|
|
|
|
2020-12-18 15:17:32 +01:00
|
|
|
if (host_log_mode_option == LOG_MODE_ALL) {
|
|
|
|
/* Run each log mode */
|
|
|
|
for (i = 0; i < LOG_MODE_NUM; i++) {
|
|
|
|
pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
|
|
|
|
host_log_mode = i;
|
|
|
|
for_each_guest_mode(run_test, &p);
|
2020-09-30 21:22:33 -04:00
|
|
|
}
|
2020-12-18 15:17:32 +01:00
|
|
|
} else {
|
|
|
|
host_log_mode = host_log_mode_option;
|
|
|
|
for_each_guest_mode(run_test, &p);
|
2018-09-18 19:54:34 +02:00
|
|
|
}
|
2018-08-22 15:20:00 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|