mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 16:25:34 +00:00
KVM: x86/mmu: clean up prefetch/prefault/speculative naming
"prefetch", "prefault" and "speculative" are used throughout KVM to mean the same thing. Use a single name, standardizing on "prefetch" which is already used by various functions such as direct_pte_prefetch, FNAME(prefetch_gpte), FNAME(pte_prefetch), etc. Suggested-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1e76a3ce0d
commit
2839180ce5
7 changed files with 16 additions and 16 deletions
|
@ -118,7 +118,7 @@ struct kvm_page_fault {
|
|||
/* arguments to kvm_mmu_do_page_fault. */
|
||||
const gpa_t addr;
|
||||
const u32 error_code;
|
||||
const bool prefault;
|
||||
const bool prefetch;
|
||||
|
||||
/* Derived from error_code. */
|
||||
const bool exec;
|
||||
|
@ -176,7 +176,7 @@ static inline bool is_nx_huge_page_enabled(void)
|
|||
}
|
||||
|
||||
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
u32 err, bool prefault)
|
||||
u32 err, bool prefetch)
|
||||
{
|
||||
struct kvm_page_fault fault = {
|
||||
.addr = cr2_or_gpa,
|
||||
|
@ -186,7 +186,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||
.present = err & PFERR_PRESENT_MASK,
|
||||
.rsvd = err & PFERR_RSVD_MASK,
|
||||
.user = err & PFERR_USER_MASK,
|
||||
.prefault = prefault,
|
||||
.prefetch = prefetch,
|
||||
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
|
||||
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
|
||||
|
||||
|
|
|
@ -2573,7 +2573,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
* be write-protected.
|
||||
*/
|
||||
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, bool can_unsync, bool speculative)
|
||||
gfn_t gfn, bool can_unsync, bool prefetch)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
bool locked = false;
|
||||
|
@ -2599,7 +2599,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
|||
if (sp->unsync)
|
||||
continue;
|
||||
|
||||
if (speculative)
|
||||
if (prefetch)
|
||||
return -EEXIST;
|
||||
|
||||
/*
|
||||
|
@ -2687,7 +2687,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
|||
|
||||
/* Prefetching always gets a writable pfn. */
|
||||
bool host_writable = !fault || fault->map_writable;
|
||||
bool speculative = !fault || fault->prefault;
|
||||
bool prefetch = !fault || fault->prefetch;
|
||||
bool write_fault = fault && fault->write;
|
||||
|
||||
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
|
||||
|
@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
|||
was_rmapped = 1;
|
||||
}
|
||||
|
||||
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative,
|
||||
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
|
||||
true, host_writable, &spte);
|
||||
|
||||
if (*sptep == spte) {
|
||||
|
@ -3923,7 +3923,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
|||
if (!async)
|
||||
return false; /* *pfn has correct page already */
|
||||
|
||||
if (!fault->prefault && kvm_can_do_async_pf(vcpu)) {
|
||||
if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
|
||||
trace_kvm_try_async_get_page(fault->addr, fault->gfn);
|
||||
if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
|
||||
trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
|
||||
|
|
|
@ -119,7 +119,7 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, bool can_unsync, bool speculative);
|
||||
gfn_t gfn, bool can_unsync, bool prefetch);
|
||||
|
||||
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
|
|
|
@ -853,7 +853,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
|||
*/
|
||||
if (!r) {
|
||||
pgprintk("%s: guest page fault\n", __func__);
|
||||
if (!fault->prefault)
|
||||
if (!fault->prefetch)
|
||||
kvm_inject_emulated_page_fault(vcpu, &walker.fault);
|
||||
|
||||
return RET_PF_RETRY;
|
||||
|
|
|
@ -92,7 +92,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
|
|||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
u64 old_spte, bool speculative, bool can_unsync,
|
||||
u64 old_spte, bool prefetch, bool can_unsync,
|
||||
bool host_writable, u64 *new_spte)
|
||||
{
|
||||
int level = sp->role.level;
|
||||
|
@ -111,7 +111,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|||
* read access. See FNAME(gpte_access) in paging_tmpl.h.
|
||||
*/
|
||||
spte |= shadow_present_mask;
|
||||
if (!speculative)
|
||||
if (!prefetch)
|
||||
spte |= spte_shadow_accessed_mask(spte);
|
||||
|
||||
if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
|
||||
|
@ -161,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|||
* e.g. it's write-tracked (upper-level SPs) or has one or more
|
||||
* shadow pages and unsync'ing pages is not allowed.
|
||||
*/
|
||||
if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) {
|
||||
if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) {
|
||||
pgprintk("%s: found shadow page for %llx, marking ro\n",
|
||||
__func__, gfn);
|
||||
wrprot = true;
|
||||
|
@ -174,7 +174,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|||
spte |= spte_shadow_dirty_mask(spte);
|
||||
|
||||
out:
|
||||
if (speculative)
|
||||
if (prefetch)
|
||||
spte = mark_spte_for_access_track(spte);
|
||||
|
||||
WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
|
||||
|
|
|
@ -332,7 +332,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
|
|||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
u64 old_spte, bool speculative, bool can_unsync,
|
||||
u64 old_spte, bool prefetch, bool can_unsync,
|
||||
bool host_writable, u64 *new_spte);
|
||||
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
|
||||
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
|
||||
|
|
|
@ -907,7 +907,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
|
|||
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
|
||||
else
|
||||
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
|
||||
fault->pfn, iter->old_spte, fault->prefault, true,
|
||||
fault->pfn, iter->old_spte, fault->prefetch, true,
|
||||
fault->map_writable, &new_spte);
|
||||
|
||||
if (new_spte == iter->old_spte)
|
||||
|
|
Loading…
Add table
Reference in a new issue