mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
ARM:
* Correctly clean the BSS to the PoC before allowing EL2 to access it on nVHE/hVHE/protected configurations * Propagate ownership of debug registers in protected mode after the rework that landed in 6.14-rc1 * Stop pretending that we can run the protected mode without a GICv3 being present on the host * Fix a use-after-free situation that can occur if a vcpu fails to initialise the NV shadow S2 MMU contexts * Always evaluate the need to arm a background timer for fully emulated guest timers * Fix the emulation of EL1 timers in the absence of FEAT_ECV * Correctly handle the EL2 virtual timer, specially when HCR_EL2.E2H==0 s390: * move some of the guest page table (gmap) logic into KVM itself, inching towards the final goal of completely removing gmap from the non-kvm memory management code. As an initial set of cleanups, move some code from mm/gmap into kvm and start using __kvm_faultin_pfn() to fault-in pages as needed; but especially stop abusing page->index and page->lru to aid in the pgdesc conversion. x86: * Add missing check in the fix to defer starting the huge page recovery vhost_task * SRSO_USER_KERNEL_NO does not need SYNTHESIZED_F -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmemTnEUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroP97gf/Rew+yEsRrHVk/j0R2XwFx51raYZy eaicv07jYmwsnaALq6BEj4xxDW8dJEJgj05Czm1o9+9x8nvY2+UjT2q4J8fY9xdD Yr+5GvEEz4x2GNL3ZYE3iHTNFQckNxOgMilLW3br1E+wjusShKmgGxPYTRyClQ34 gDBZQWzOG22UNC6PbW9dgTK54b57+NJdIZYuHz4LkMsTvzf6jXo5VumsgbbZqC4e VGh5EUEPL7+cNzGY/+WURXI6OojdPzbneH1NP82uT3lo2WaHK9+B3N6H+W71N/T4 u1P7+g0WmdNj3FITvDpTJ7jNhke2atEjI9rvtHz6gwtf9SIujyuNl55uRA== =r0h9 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM: - Correctly clean the BSS to the PoC before allowing EL2 to access it on nVHE/hVHE/protected configurations - Propagate ownership of debug registers in protected mode after the rework that landed in 6.14-rc1 - Stop pretending that we can run the protected mode without a GICv3 being present on the host - Fix a use-after-free situation that can occur if a vcpu fails to initialise the NV shadow S2 MMU contexts - Always evaluate the need to arm a background timer for fully emulated guest timers - Fix the emulation of EL1 timers in the absence of FEAT_ECV - Correctly handle the EL2 virtual timer, specially when HCR_EL2.E2H==0 s390: - move some of the guest page table (gmap) logic into KVM itself, inching towards the final goal of completely removing gmap from the non-kvm memory management code. As an initial set of cleanups, move some code from mm/gmap into kvm and start using __kvm_faultin_pfn() to fault-in pages as needed; but especially stop abusing page->index and page->lru to aid in the pgdesc conversion. x86: - Add missing check in the fix to defer starting the huge page recovery vhost_task - SRSO_USER_KERNEL_NO does not need SYNTHESIZED_F" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (31 commits) KVM: x86/mmu: Ensure NX huge page recovery thread is alive before waking KVM: remove kvm_arch_post_init_vm KVM: selftests: Fix spelling mistake "initally" -> "initially" kvm: x86: SRSO_USER_KERNEL_NO is not synthesized KVM: arm64: timer: Don't adjust the EL2 virtual timer offset KVM: arm64: timer: Correctly handle EL1 timer emulation when !FEAT_ECV KVM: arm64: timer: Always evaluate the need for a soft timer KVM: arm64: Fix nested S2 MMU structures reallocation KVM: arm64: Fail protected mode init if no vgic hardware is present KVM: arm64: Flush/sync debug state in protected mode KVM: s390: selftests: Streamline uc_skey test to issue iske after sske KVM: s390: remove the last user of page->index KVM: s390: move PGSTE softbits KVM: s390: remove useless page->index usage KVM: s390: move gmap_shadow_pgt_lookup() into kvm KVM: s390: stop using lists to keep track of used dat tables KVM: s390: stop using page->index for non-shadow gmaps KVM: s390: move some gmap shadowing functions away from mm/gmap.c KVM: s390: get rid of gmap_translate() KVM: s390: get rid of gmap_fault() ...
This commit is contained in:
commit
954a209f43
31 changed files with 1093 additions and 1007 deletions
|
@ -1419,7 +1419,7 @@ fetch) is injected in the guest.
|
||||||
S390:
|
S390:
|
||||||
^^^^^
|
^^^^^
|
||||||
|
|
||||||
Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set.
|
Returns -EINVAL or -EEXIST if the VM has the KVM_VM_S390_UCONTROL flag set.
|
||||||
Returns -EINVAL if called on a protected VM.
|
Returns -EINVAL if called on a protected VM.
|
||||||
|
|
||||||
4.36 KVM_SET_TSS_ADDR
|
4.36 KVM_SET_TSS_ADDR
|
||||||
|
|
|
@ -471,10 +471,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
||||||
|
|
||||||
trace_kvm_timer_emulate(ctx, should_fire);
|
trace_kvm_timer_emulate(ctx, should_fire);
|
||||||
|
|
||||||
if (should_fire != ctx->irq.level) {
|
if (should_fire != ctx->irq.level)
|
||||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_timer_update_status(ctx, should_fire);
|
kvm_timer_update_status(ctx, should_fire);
|
||||||
|
|
||||||
|
@ -761,21 +759,6 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
|
||||||
timer_irq(map->direct_ptimer),
|
timer_irq(map->direct_ptimer),
|
||||||
&arch_timer_irq_ops);
|
&arch_timer_irq_ops);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
/*
|
|
||||||
* The virtual offset behaviour is "interesting", as it
|
|
||||||
* always applies when HCR_EL2.E2H==0, but only when
|
|
||||||
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
|
|
||||||
* track E2H when putting the HV timer in "direct" mode.
|
|
||||||
*/
|
|
||||||
if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
|
|
||||||
struct arch_timer_offset *offs = &map->direct_vtimer->offset;
|
|
||||||
|
|
||||||
if (vcpu_el2_e2h_is_set(vcpu))
|
|
||||||
offs->vcpu_offset = NULL;
|
|
||||||
else
|
|
||||||
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -976,31 +959,21 @@ void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
|
||||||
* which allows trapping of the timer registers even with NV2.
|
* which allows trapping of the timer registers even with NV2.
|
||||||
* Still, this is still worse than FEAT_NV on its own. Meh.
|
* Still, this is still worse than FEAT_NV on its own. Meh.
|
||||||
*/
|
*/
|
||||||
if (!vcpu_el2_e2h_is_set(vcpu)) {
|
if (!cpus_have_final_cap(ARM64_HAS_ECV)) {
|
||||||
if (cpus_have_final_cap(ARM64_HAS_ECV))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A non-VHE guest hypervisor doesn't have any direct access
|
|
||||||
* to its timers: the EL2 registers trap (and the HW is
|
|
||||||
* fully emulated), while the EL0 registers access memory
|
|
||||||
* despite the access being notionally direct. Boo.
|
|
||||||
*
|
|
||||||
* We update the hardware timer registers with the
|
|
||||||
* latest value written by the guest to the VNCR page
|
|
||||||
* and let the hardware take care of the rest.
|
|
||||||
*/
|
|
||||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL);
|
|
||||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
|
|
||||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL);
|
|
||||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL);
|
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* For a VHE guest hypervisor, the EL2 state is directly
|
* For a VHE guest hypervisor, the EL2 state is directly
|
||||||
* stored in the host EL1 timers, while the emulated EL0
|
* stored in the host EL1 timers, while the emulated EL1
|
||||||
* state is stored in the VNCR page. The latter could have
|
* state is stored in the VNCR page. The latter could have
|
||||||
* been updated behind our back, and we must reset the
|
* been updated behind our back, and we must reset the
|
||||||
* emulation of the timers.
|
* emulation of the timers.
|
||||||
|
*
|
||||||
|
* A non-VHE guest hypervisor doesn't have any direct access
|
||||||
|
* to its timers: the EL2 registers trap despite being
|
||||||
|
* notionally direct (we use the EL1 HW, as for VHE), while
|
||||||
|
* the EL1 registers access memory.
|
||||||
|
*
|
||||||
|
* In both cases, process the emulated timers on each guest
|
||||||
|
* exit. Boo.
|
||||||
*/
|
*/
|
||||||
struct timer_map map;
|
struct timer_map map;
|
||||||
get_timer_map(vcpu, &map);
|
get_timer_map(vcpu, &map);
|
||||||
|
|
|
@ -2290,6 +2290,19 @@ static int __init init_subsystems(void)
|
||||||
break;
|
break;
|
||||||
case -ENODEV:
|
case -ENODEV:
|
||||||
case -ENXIO:
|
case -ENXIO:
|
||||||
|
/*
|
||||||
|
* No VGIC? No pKVM for you.
|
||||||
|
*
|
||||||
|
* Protected mode assumes that VGICv3 is present, so no point
|
||||||
|
* in trying to hobble along if vgic initialization fails.
|
||||||
|
*/
|
||||||
|
if (is_protected_kvm_enabled())
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Otherwise, userspace could choose to implement a GIC for its
|
||||||
|
* guest on non-cooperative hardware.
|
||||||
|
*/
|
||||||
vgic_present = false;
|
vgic_present = false;
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -2400,6 +2413,13 @@ static void kvm_hyp_init_symbols(void)
|
||||||
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
|
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
|
||||||
kvm_nvhe_sym(__icache_flags) = __icache_flags;
|
kvm_nvhe_sym(__icache_flags) = __icache_flags;
|
||||||
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
|
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush entire BSS since part of its data containing init symbols is read
|
||||||
|
* while the MMU is off.
|
||||||
|
*/
|
||||||
|
kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
|
||||||
|
kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||||
|
|
|
@ -91,11 +91,34 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
||||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||||
|
|
||||||
|
hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner;
|
||||||
|
|
||||||
|
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||||
|
hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state;
|
||||||
|
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||||
|
hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||||
|
|
||||||
|
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||||
|
host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state;
|
||||||
|
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||||
|
host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state;
|
||||||
|
}
|
||||||
|
|
||||||
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||||
|
|
||||||
fpsimd_sve_flush();
|
fpsimd_sve_flush();
|
||||||
|
flush_debug_state(hyp_vcpu);
|
||||||
|
|
||||||
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
||||||
|
|
||||||
|
@ -123,6 +146,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
fpsimd_sve_sync(&hyp_vcpu->vcpu);
|
fpsimd_sve_sync(&hyp_vcpu->vcpu);
|
||||||
|
sync_debug_state(hyp_vcpu);
|
||||||
|
|
||||||
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
||||||
|
|
||||||
|
|
|
@ -67,26 +67,27 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
swap(kvm->arch.nested_mmus, tmp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we went through a realocation, adjust the MMU back-pointers in
|
* If we went through a realocation, adjust the MMU back-pointers in
|
||||||
* the previously initialised kvm_pgtable structures.
|
* the previously initialised kvm_pgtable structures.
|
||||||
*/
|
*/
|
||||||
if (kvm->arch.nested_mmus != tmp)
|
if (kvm->arch.nested_mmus != tmp)
|
||||||
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
|
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
|
||||||
tmp[i].pgt->mmu = &tmp[i];
|
kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
|
||||||
|
|
||||||
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
|
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
|
||||||
ret = init_nested_s2_mmu(kvm, &tmp[i]);
|
ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
|
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
|
||||||
kvm_free_stage2_pgd(&tmp[i]);
|
kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm->arch.nested_mmus_size = num_mmus;
|
kvm->arch.nested_mmus_size = num_mmus;
|
||||||
kvm->arch.nested_mmus = tmp;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1452,6 +1452,16 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool access_hv_timer(struct kvm_vcpu *vcpu,
|
||||||
|
struct sys_reg_params *p,
|
||||||
|
const struct sys_reg_desc *r)
|
||||||
|
{
|
||||||
|
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||||
|
return undef_access(vcpu, p, r);
|
||||||
|
|
||||||
|
return access_arch_timer(vcpu, p, r);
|
||||||
|
}
|
||||||
|
|
||||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||||
s64 new, s64 cur)
|
s64 new, s64 cur)
|
||||||
{
|
{
|
||||||
|
@ -3103,9 +3113,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||||
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||||
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||||
|
|
||||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer },
|
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
|
||||||
EL2_REG(CNTHV_CTL_EL2, access_arch_timer, reset_val, 0),
|
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
|
||||||
EL2_REG(CNTHV_CVAL_EL2, access_arch_timer, reset_val, 0),
|
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
|
||||||
|
|
||||||
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
/**
|
/**
|
||||||
* struct gmap_struct - guest address space
|
* struct gmap_struct - guest address space
|
||||||
* @list: list head for the mm->context gmap list
|
* @list: list head for the mm->context gmap list
|
||||||
* @crst_list: list of all crst tables used in the guest address space
|
|
||||||
* @mm: pointer to the parent mm_struct
|
* @mm: pointer to the parent mm_struct
|
||||||
* @guest_to_host: radix tree with guest to host address translation
|
* @guest_to_host: radix tree with guest to host address translation
|
||||||
* @host_to_guest: radix tree with pointer to segment table entries
|
* @host_to_guest: radix tree with pointer to segment table entries
|
||||||
|
@ -35,7 +34,6 @@
|
||||||
* @guest_handle: protected virtual machine handle for the ultravisor
|
* @guest_handle: protected virtual machine handle for the ultravisor
|
||||||
* @host_to_rmap: radix tree with gmap_rmap lists
|
* @host_to_rmap: radix tree with gmap_rmap lists
|
||||||
* @children: list of shadow gmap structures
|
* @children: list of shadow gmap structures
|
||||||
* @pt_list: list of all page tables used in the shadow guest address space
|
|
||||||
* @shadow_lock: spinlock to protect the shadow gmap list
|
* @shadow_lock: spinlock to protect the shadow gmap list
|
||||||
* @parent: pointer to the parent gmap for shadow guest address spaces
|
* @parent: pointer to the parent gmap for shadow guest address spaces
|
||||||
* @orig_asce: ASCE for which the shadow page table has been created
|
* @orig_asce: ASCE for which the shadow page table has been created
|
||||||
|
@ -45,7 +43,6 @@
|
||||||
*/
|
*/
|
||||||
struct gmap {
|
struct gmap {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct list_head crst_list;
|
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct radix_tree_root guest_to_host;
|
struct radix_tree_root guest_to_host;
|
||||||
struct radix_tree_root host_to_guest;
|
struct radix_tree_root host_to_guest;
|
||||||
|
@ -61,7 +58,6 @@ struct gmap {
|
||||||
/* Additional data for shadow guest address spaces */
|
/* Additional data for shadow guest address spaces */
|
||||||
struct radix_tree_root host_to_rmap;
|
struct radix_tree_root host_to_rmap;
|
||||||
struct list_head children;
|
struct list_head children;
|
||||||
struct list_head pt_list;
|
|
||||||
spinlock_t shadow_lock;
|
spinlock_t shadow_lock;
|
||||||
struct gmap *parent;
|
struct gmap *parent;
|
||||||
unsigned long orig_asce;
|
unsigned long orig_asce;
|
||||||
|
@ -106,23 +102,21 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
|
||||||
void gmap_remove(struct gmap *gmap);
|
void gmap_remove(struct gmap *gmap);
|
||||||
struct gmap *gmap_get(struct gmap *gmap);
|
struct gmap *gmap_get(struct gmap *gmap);
|
||||||
void gmap_put(struct gmap *gmap);
|
void gmap_put(struct gmap *gmap);
|
||||||
|
void gmap_free(struct gmap *gmap);
|
||||||
|
struct gmap *gmap_alloc(unsigned long limit);
|
||||||
|
|
||||||
int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
||||||
unsigned long to, unsigned long len);
|
unsigned long to, unsigned long len);
|
||||||
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
|
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
|
||||||
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
|
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
|
||||||
unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
|
|
||||||
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
|
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
|
||||||
int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
|
|
||||||
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
|
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
|
||||||
void __gmap_zap(struct gmap *, unsigned long gaddr);
|
void __gmap_zap(struct gmap *, unsigned long gaddr);
|
||||||
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
|
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
|
||||||
|
|
||||||
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
|
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
|
||||||
|
|
||||||
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
|
void gmap_unshadow(struct gmap *sg);
|
||||||
int edat_level);
|
|
||||||
int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
|
|
||||||
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||||
int fake);
|
int fake);
|
||||||
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||||
|
@ -131,24 +125,22 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||||
int fake);
|
int fake);
|
||||||
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
||||||
int fake);
|
int fake);
|
||||||
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
|
|
||||||
unsigned long *pgt, int *dat_protection, int *fake);
|
|
||||||
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
|
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
|
||||||
|
|
||||||
void gmap_register_pte_notifier(struct gmap_notifier *);
|
void gmap_register_pte_notifier(struct gmap_notifier *);
|
||||||
void gmap_unregister_pte_notifier(struct gmap_notifier *);
|
void gmap_unregister_pte_notifier(struct gmap_notifier *);
|
||||||
|
|
||||||
int gmap_mprotect_notify(struct gmap *, unsigned long start,
|
int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
|
||||||
unsigned long len, int prot);
|
|
||||||
|
|
||||||
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
|
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
|
||||||
unsigned long gaddr, unsigned long vmaddr);
|
unsigned long gaddr, unsigned long vmaddr);
|
||||||
int s390_disable_cow_sharing(void);
|
int s390_disable_cow_sharing(void);
|
||||||
void s390_unlist_old_asce(struct gmap *gmap);
|
|
||||||
int s390_replace_asce(struct gmap *gmap);
|
int s390_replace_asce(struct gmap *gmap);
|
||||||
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
|
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
|
||||||
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
|
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long end, bool interruptible);
|
unsigned long end, bool interruptible);
|
||||||
|
int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
|
||||||
|
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* s390_uv_destroy_range - Destroy a range of pages in the given mm.
|
* s390_uv_destroy_range - Destroy a range of pages in the given mm.
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
#define KVM_S390_ESCA_CPU_SLOTS 248
|
#define KVM_S390_ESCA_CPU_SLOTS 248
|
||||||
#define KVM_MAX_VCPUS 255
|
#define KVM_MAX_VCPUS 255
|
||||||
|
|
||||||
|
#define KVM_INTERNAL_MEM_SLOTS 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These seem to be used for allocating ->chip in the routing table, which we
|
* These seem to be used for allocating ->chip in the routing table, which we
|
||||||
* don't use. 1 is as small as we can get to reduce the needed memory. If we
|
* don't use. 1 is as small as we can get to reduce the needed memory. If we
|
||||||
|
@ -931,12 +933,14 @@ struct sie_page2 {
|
||||||
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
|
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct vsie_page;
|
||||||
|
|
||||||
struct kvm_s390_vsie {
|
struct kvm_s390_vsie {
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct radix_tree_root addr_to_page;
|
struct radix_tree_root addr_to_page;
|
||||||
int page_count;
|
int page_count;
|
||||||
int next;
|
int next;
|
||||||
struct page *pages[KVM_MAX_VCPUS];
|
struct vsie_page *pages[KVM_MAX_VCPUS];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_s390_gisa_iam {
|
struct kvm_s390_gisa_iam {
|
||||||
|
|
|
@ -420,9 +420,10 @@ void setup_protection_map(void);
|
||||||
#define PGSTE_HC_BIT 0x0020000000000000UL
|
#define PGSTE_HC_BIT 0x0020000000000000UL
|
||||||
#define PGSTE_GR_BIT 0x0004000000000000UL
|
#define PGSTE_GR_BIT 0x0004000000000000UL
|
||||||
#define PGSTE_GC_BIT 0x0002000000000000UL
|
#define PGSTE_GC_BIT 0x0002000000000000UL
|
||||||
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
|
#define PGSTE_ST2_MASK 0x0000ffff00000000UL
|
||||||
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
|
#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
|
||||||
#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
|
#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
|
||||||
|
#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
|
||||||
|
|
||||||
/* Guest Page State used for virtualization */
|
/* Guest Page State used for virtualization */
|
||||||
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
|
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
|
||||||
|
@ -2007,4 +2008,18 @@ extern void s390_reset_cmma(struct mm_struct *mm);
|
||||||
#define pmd_pgtable(pmd) \
|
#define pmd_pgtable(pmd) \
|
||||||
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
||||||
|
|
||||||
|
static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
|
||||||
|
{
|
||||||
|
unsigned long *pgstes, res;
|
||||||
|
|
||||||
|
pgstes = pgt + _PAGE_ENTRIES;
|
||||||
|
|
||||||
|
res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
|
||||||
|
res |= pgstes[1] & PGSTE_ST2_MASK;
|
||||||
|
res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
|
||||||
|
res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _S390_PAGE_H */
|
#endif /* _S390_PAGE_H */
|
||||||
|
|
|
@ -628,12 +628,12 @@ static inline int is_prot_virt_host(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
int uv_pin_shared(unsigned long paddr);
|
int uv_pin_shared(unsigned long paddr);
|
||||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
|
||||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
|
|
||||||
int uv_destroy_folio(struct folio *folio);
|
int uv_destroy_folio(struct folio *folio);
|
||||||
int uv_destroy_pte(pte_t pte);
|
int uv_destroy_pte(pte_t pte);
|
||||||
int uv_convert_from_secure_pte(pte_t pte);
|
int uv_convert_from_secure_pte(pte_t pte);
|
||||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb);
|
||||||
|
int uv_convert_from_secure(unsigned long paddr);
|
||||||
|
int uv_convert_from_secure_folio(struct folio *folio);
|
||||||
|
|
||||||
void setup_uv(void);
|
void setup_uv(void);
|
||||||
|
|
||||||
|
|
|
@ -19,19 +19,6 @@
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/uv.h>
|
#include <asm/uv.h>
|
||||||
|
|
||||||
#if !IS_ENABLED(CONFIG_KVM)
|
|
||||||
unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gmap_fault(struct gmap *gmap, unsigned long gaddr,
|
|
||||||
unsigned int fault_flags)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
|
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
|
||||||
int __bootdata_preserved(prot_virt_guest);
|
int __bootdata_preserved(prot_virt_guest);
|
||||||
EXPORT_SYMBOL(prot_virt_guest);
|
EXPORT_SYMBOL(prot_virt_guest);
|
||||||
|
@ -159,6 +146,7 @@ int uv_destroy_folio(struct folio *folio)
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(uv_destroy_folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The present PTE still indirectly holds a folio reference through the mapping.
|
* The present PTE still indirectly holds a folio reference through the mapping.
|
||||||
|
@ -175,7 +163,7 @@ int uv_destroy_pte(pte_t pte)
|
||||||
*
|
*
|
||||||
* @paddr: Absolute host address of page to be exported
|
* @paddr: Absolute host address of page to be exported
|
||||||
*/
|
*/
|
||||||
static int uv_convert_from_secure(unsigned long paddr)
|
int uv_convert_from_secure(unsigned long paddr)
|
||||||
{
|
{
|
||||||
struct uv_cb_cfs uvcb = {
|
struct uv_cb_cfs uvcb = {
|
||||||
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
|
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
|
||||||
|
@ -187,11 +175,12 @@ static int uv_convert_from_secure(unsigned long paddr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(uv_convert_from_secure);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller must already hold a reference to the folio.
|
* The caller must already hold a reference to the folio.
|
||||||
*/
|
*/
|
||||||
static int uv_convert_from_secure_folio(struct folio *folio)
|
int uv_convert_from_secure_folio(struct folio *folio)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -206,6 +195,7 @@ static int uv_convert_from_secure_folio(struct folio *folio)
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The present PTE still indirectly holds a folio reference through the mapping.
|
* The present PTE still indirectly holds a folio reference through the mapping.
|
||||||
|
@ -237,13 +227,33 @@ static int expected_folio_refs(struct folio *folio)
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
/**
|
||||||
|
* make_folio_secure() - make a folio secure
|
||||||
|
* @folio: the folio to make secure
|
||||||
|
* @uvcb: the uvcb that describes the UVC to be used
|
||||||
|
*
|
||||||
|
* The folio @folio will be made secure if possible, @uvcb will be passed
|
||||||
|
* as-is to the UVC.
|
||||||
|
*
|
||||||
|
* Return: 0 on success;
|
||||||
|
* -EBUSY if the folio is in writeback or has too many references;
|
||||||
|
* -E2BIG if the folio is large;
|
||||||
|
* -EAGAIN if the UVC needs to be attempted again;
|
||||||
|
* -ENXIO if the address is not mapped;
|
||||||
|
* -EINVAL if the UVC failed for other reasons.
|
||||||
|
*
|
||||||
|
* Context: The caller must hold exactly one extra reference on the folio
|
||||||
|
* (it's the same logic as split_folio())
|
||||||
|
*/
|
||||||
|
int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
||||||
{
|
{
|
||||||
int expected, cc = 0;
|
int expected, cc = 0;
|
||||||
|
|
||||||
|
if (folio_test_large(folio))
|
||||||
|
return -E2BIG;
|
||||||
if (folio_test_writeback(folio))
|
if (folio_test_writeback(folio))
|
||||||
return -EAGAIN;
|
return -EBUSY;
|
||||||
expected = expected_folio_refs(folio);
|
expected = expected_folio_refs(folio) + 1;
|
||||||
if (!folio_ref_freeze(folio, expected))
|
if (!folio_ref_freeze(folio, expected))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
set_bit(PG_arch_1, &folio->flags);
|
set_bit(PG_arch_1, &folio->flags);
|
||||||
|
@ -267,251 +277,7 @@ static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
|
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(make_folio_secure);
|
||||||
/**
|
|
||||||
* should_export_before_import - Determine whether an export is needed
|
|
||||||
* before an import-like operation
|
|
||||||
* @uvcb: the Ultravisor control block of the UVC to be performed
|
|
||||||
* @mm: the mm of the process
|
|
||||||
*
|
|
||||||
* Returns whether an export is needed before every import-like operation.
|
|
||||||
* This is needed for shared pages, which don't trigger a secure storage
|
|
||||||
* exception when accessed from a different guest.
|
|
||||||
*
|
|
||||||
* Although considered as one, the Unpin Page UVC is not an actual import,
|
|
||||||
* so it is not affected.
|
|
||||||
*
|
|
||||||
* No export is needed also when there is only one protected VM, because the
|
|
||||||
* page cannot belong to the wrong VM in that case (there is no "other VM"
|
|
||||||
* it can belong to).
|
|
||||||
*
|
|
||||||
* Return: true if an export is needed before every import, otherwise false.
|
|
||||||
*/
|
|
||||||
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The misc feature indicates, among other things, that importing a
|
|
||||||
* shared page from a different protected VM will automatically also
|
|
||||||
* transfer its ownership.
|
|
||||||
*/
|
|
||||||
if (uv_has_feature(BIT_UV_FEAT_MISC))
|
|
||||||
return false;
|
|
||||||
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
|
|
||||||
return false;
|
|
||||||
return atomic_read(&mm->context.protected_count) > 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Drain LRU caches: the local one on first invocation and the ones of all
|
|
||||||
* CPUs on successive invocations. Returns "true" on the first invocation.
|
|
||||||
*/
|
|
||||||
static bool drain_lru(bool *drain_lru_called)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* If we have tried a local drain and the folio refcount
|
|
||||||
* still does not match our expected safe value, try with a
|
|
||||||
* system wide drain. This is needed if the pagevecs holding
|
|
||||||
* the page are on a different CPU.
|
|
||||||
*/
|
|
||||||
if (*drain_lru_called) {
|
|
||||||
lru_add_drain_all();
|
|
||||||
/* We give up here, don't retry immediately. */
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* We are here if the folio refcount does not match the
|
|
||||||
* expected safe value. The main culprits are usually
|
|
||||||
* pagevecs. With lru_add_drain() we drain the pagevecs
|
|
||||||
* on the local CPU so that hopefully the refcount will
|
|
||||||
* reach the expected safe value.
|
|
||||||
*/
|
|
||||||
lru_add_drain();
|
|
||||||
*drain_lru_called = true;
|
|
||||||
/* The caller should try again immediately */
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Requests the Ultravisor to make a page accessible to a guest.
|
|
||||||
* If it's brought in the first time, it will be cleared. If
|
|
||||||
* it has been exported before, it will be decrypted and integrity
|
|
||||||
* checked.
|
|
||||||
*/
|
|
||||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
bool drain_lru_called = false;
|
|
||||||
spinlock_t *ptelock;
|
|
||||||
unsigned long uaddr;
|
|
||||||
struct folio *folio;
|
|
||||||
pte_t *ptep;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
again:
|
|
||||||
rc = -EFAULT;
|
|
||||||
mmap_read_lock(gmap->mm);
|
|
||||||
|
|
||||||
uaddr = __gmap_translate(gmap, gaddr);
|
|
||||||
if (IS_ERR_VALUE(uaddr))
|
|
||||||
goto out;
|
|
||||||
vma = vma_lookup(gmap->mm, uaddr);
|
|
||||||
if (!vma)
|
|
||||||
goto out;
|
|
||||||
/*
|
|
||||||
* Secure pages cannot be huge and userspace should not combine both.
|
|
||||||
* In case userspace does it anyway this will result in an -EFAULT for
|
|
||||||
* the unpack. The guest is thus never reaching secure mode. If
|
|
||||||
* userspace is playing dirty tricky with mapping huge pages later
|
|
||||||
* on this will result in a segmentation fault.
|
|
||||||
*/
|
|
||||||
if (is_vm_hugetlb_page(vma))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
rc = -ENXIO;
|
|
||||||
ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
|
|
||||||
if (!ptep)
|
|
||||||
goto out;
|
|
||||||
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
|
|
||||||
folio = page_folio(pte_page(*ptep));
|
|
||||||
rc = -EAGAIN;
|
|
||||||
if (folio_test_large(folio)) {
|
|
||||||
rc = -E2BIG;
|
|
||||||
} else if (folio_trylock(folio)) {
|
|
||||||
if (should_export_before_import(uvcb, gmap->mm))
|
|
||||||
uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
|
|
||||||
rc = make_folio_secure(folio, uvcb);
|
|
||||||
folio_unlock(folio);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Once we drop the PTL, the folio may get unmapped and
|
|
||||||
* freed immediately. We need a temporary reference.
|
|
||||||
*/
|
|
||||||
if (rc == -EAGAIN || rc == -E2BIG)
|
|
||||||
folio_get(folio);
|
|
||||||
}
|
|
||||||
pte_unmap_unlock(ptep, ptelock);
|
|
||||||
out:
|
|
||||||
mmap_read_unlock(gmap->mm);
|
|
||||||
|
|
||||||
switch (rc) {
|
|
||||||
case -E2BIG:
|
|
||||||
folio_lock(folio);
|
|
||||||
rc = split_folio(folio);
|
|
||||||
folio_unlock(folio);
|
|
||||||
folio_put(folio);
|
|
||||||
|
|
||||||
switch (rc) {
|
|
||||||
case 0:
|
|
||||||
/* Splitting succeeded, try again immediately. */
|
|
||||||
goto again;
|
|
||||||
case -EAGAIN:
|
|
||||||
/* Additional folio references. */
|
|
||||||
if (drain_lru(&drain_lru_called))
|
|
||||||
goto again;
|
|
||||||
return -EAGAIN;
|
|
||||||
case -EBUSY:
|
|
||||||
/* Unexpected race. */
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return -ENXIO;
|
|
||||||
case -EAGAIN:
|
|
||||||
/*
|
|
||||||
* If we are here because the UVC returned busy or partial
|
|
||||||
* completion, this is just a useless check, but it is safe.
|
|
||||||
*/
|
|
||||||
folio_wait_writeback(folio);
|
|
||||||
folio_put(folio);
|
|
||||||
return -EAGAIN;
|
|
||||||
case -EBUSY:
|
|
||||||
/* Additional folio references. */
|
|
||||||
if (drain_lru(&drain_lru_called))
|
|
||||||
goto again;
|
|
||||||
return -EAGAIN;
|
|
||||||
case -ENXIO:
|
|
||||||
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
|
|
||||||
return -EFAULT;
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gmap_make_secure);
|
|
||||||
|
|
||||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
|
|
||||||
{
|
|
||||||
struct uv_cb_cts uvcb = {
|
|
||||||
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
|
|
||||||
.header.len = sizeof(uvcb),
|
|
||||||
.guest_handle = gmap->guest_handle,
|
|
||||||
.gaddr = gaddr,
|
|
||||||
};
|
|
||||||
|
|
||||||
return gmap_make_secure(gmap, gaddr, &uvcb);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* gmap_destroy_page - Destroy a guest page.
|
|
||||||
* @gmap: the gmap of the guest
|
|
||||||
* @gaddr: the guest address to destroy
|
|
||||||
*
|
|
||||||
* An attempt will be made to destroy the given guest page. If the attempt
|
|
||||||
* fails, an attempt is made to export the page. If both attempts fail, an
|
|
||||||
* appropriate error is returned.
|
|
||||||
*/
|
|
||||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
struct folio_walk fw;
|
|
||||||
unsigned long uaddr;
|
|
||||||
struct folio *folio;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
rc = -EFAULT;
|
|
||||||
mmap_read_lock(gmap->mm);
|
|
||||||
|
|
||||||
uaddr = __gmap_translate(gmap, gaddr);
|
|
||||||
if (IS_ERR_VALUE(uaddr))
|
|
||||||
goto out;
|
|
||||||
vma = vma_lookup(gmap->mm, uaddr);
|
|
||||||
if (!vma)
|
|
||||||
goto out;
|
|
||||||
/*
|
|
||||||
* Huge pages should not be able to become secure
|
|
||||||
*/
|
|
||||||
if (is_vm_hugetlb_page(vma))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
rc = 0;
|
|
||||||
folio = folio_walk_start(&fw, vma, uaddr, 0);
|
|
||||||
if (!folio)
|
|
||||||
goto out;
|
|
||||||
/*
|
|
||||||
* See gmap_make_secure(): large folios cannot be secure. Small
|
|
||||||
* folio implies FW_LEVEL_PTE.
|
|
||||||
*/
|
|
||||||
if (folio_test_large(folio) || !pte_write(fw.pte))
|
|
||||||
goto out_walk_end;
|
|
||||||
rc = uv_destroy_folio(folio);
|
|
||||||
/*
|
|
||||||
* Fault handlers can race; it is possible that two CPUs will fault
|
|
||||||
* on the same secure page. One CPU can destroy the page, reboot,
|
|
||||||
* re-enter secure mode and import it, while the second CPU was
|
|
||||||
* stuck at the beginning of the handler. At some point the second
|
|
||||||
* CPU will be able to progress, and it will not be able to destroy
|
|
||||||
* the page. In that case we do not want to terminate the process,
|
|
||||||
* we instead try to export the page.
|
|
||||||
*/
|
|
||||||
if (rc)
|
|
||||||
rc = uv_convert_from_secure_folio(folio);
|
|
||||||
out_walk_end:
|
|
||||||
folio_walk_end(&fw, vma);
|
|
||||||
out:
|
|
||||||
mmap_read_unlock(gmap->mm);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gmap_destroy_page);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To be called with the folio locked or with an extra reference! This will
|
* To be called with the folio locked or with an extra reference! This will
|
||||||
|
|
|
@ -8,7 +8,7 @@ include $(srctree)/virt/kvm/Makefile.kvm
|
||||||
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
|
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
|
||||||
|
|
||||||
kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
|
kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
|
||||||
kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
|
kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap.o gmap-vsie.o
|
||||||
|
|
||||||
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
|
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
|
||||||
obj-$(CONFIG_KVM) += kvm.o
|
obj-$(CONFIG_KVM) += kvm.o
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <asm/gmap.h>
|
#include <asm/gmap.h>
|
||||||
#include <asm/dat-bits.h>
|
#include <asm/dat-bits.h>
|
||||||
#include "kvm-s390.h"
|
#include "kvm-s390.h"
|
||||||
|
#include "gmap.h"
|
||||||
#include "gaccess.h"
|
#include "gaccess.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1392,6 +1393,44 @@ shadow_pgt:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* shadow_pgt_lookup() - find a shadow page table
|
||||||
|
* @sg: pointer to the shadow guest address space structure
|
||||||
|
* @saddr: the address in the shadow aguest address space
|
||||||
|
* @pgt: parent gmap address of the page table to get shadowed
|
||||||
|
* @dat_protection: if the pgtable is marked as protected by dat
|
||||||
|
* @fake: pgt references contiguous guest memory block, not a pgtable
|
||||||
|
*
|
||||||
|
* Returns 0 if the shadow page table was found and -EAGAIN if the page
|
||||||
|
* table was not found.
|
||||||
|
*
|
||||||
|
* Called with sg->mm->mmap_lock in read.
|
||||||
|
*/
|
||||||
|
static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
|
||||||
|
int *dat_protection, int *fake)
|
||||||
|
{
|
||||||
|
unsigned long pt_index;
|
||||||
|
unsigned long *table;
|
||||||
|
struct page *page;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
spin_lock(&sg->guest_table_lock);
|
||||||
|
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
|
||||||
|
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
|
||||||
|
/* Shadow page tables are full pages (pte+pgste) */
|
||||||
|
page = pfn_to_page(*table >> PAGE_SHIFT);
|
||||||
|
pt_index = gmap_pgste_get_pgt_addr(page_to_virt(page));
|
||||||
|
*pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE;
|
||||||
|
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
|
||||||
|
*fake = !!(pt_index & GMAP_SHADOW_FAKE_TABLE);
|
||||||
|
rc = 0;
|
||||||
|
} else {
|
||||||
|
rc = -EAGAIN;
|
||||||
|
}
|
||||||
|
spin_unlock(&sg->guest_table_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_s390_shadow_fault - handle fault on a shadow page table
|
* kvm_s390_shadow_fault - handle fault on a shadow page table
|
||||||
* @vcpu: virtual cpu
|
* @vcpu: virtual cpu
|
||||||
|
@ -1415,6 +1454,9 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
||||||
int dat_protection, fake;
|
int dat_protection, fake;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
mmap_read_lock(sg->mm);
|
mmap_read_lock(sg->mm);
|
||||||
/*
|
/*
|
||||||
* We don't want any guest-2 tables to change - so the parent
|
* We don't want any guest-2 tables to change - so the parent
|
||||||
|
@ -1423,7 +1465,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
||||||
*/
|
*/
|
||||||
ipte_lock(vcpu->kvm);
|
ipte_lock(vcpu->kvm);
|
||||||
|
|
||||||
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
|
rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
|
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
|
||||||
&fake);
|
&fake);
|
||||||
|
|
142
arch/s390/kvm/gmap-vsie.c
Normal file
142
arch/s390/kvm/gmap-vsie.c
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Guest memory management for KVM/s390 nested VMs.
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008, 2020, 2024
|
||||||
|
*
|
||||||
|
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||||
|
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||||
|
* David Hildenbrand <david@redhat.com>
|
||||||
|
* Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/kvm.h>
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/pgtable.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
#include <linux/mman.h>
|
||||||
|
|
||||||
|
#include <asm/lowcore.h>
|
||||||
|
#include <asm/gmap.h>
|
||||||
|
#include <asm/uv.h>
|
||||||
|
|
||||||
|
#include "kvm-s390.h"
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gmap_find_shadow - find a specific asce in the list of shadow tables
|
||||||
|
* @parent: pointer to the parent gmap
|
||||||
|
* @asce: ASCE for which the shadow table is created
|
||||||
|
* @edat_level: edat level to be used for the shadow translation
|
||||||
|
*
|
||||||
|
* Returns the pointer to a gmap if a shadow table with the given asce is
|
||||||
|
* already available, ERR_PTR(-EAGAIN) if another one is just being created,
|
||||||
|
* otherwise NULL
|
||||||
|
*
|
||||||
|
* Context: Called with parent->shadow_lock held
|
||||||
|
*/
|
||||||
|
static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce, int edat_level)
|
||||||
|
{
|
||||||
|
struct gmap *sg;
|
||||||
|
|
||||||
|
lockdep_assert_held(&parent->shadow_lock);
|
||||||
|
list_for_each_entry(sg, &parent->children, list) {
|
||||||
|
if (!gmap_shadow_valid(sg, asce, edat_level))
|
||||||
|
continue;
|
||||||
|
if (!sg->initialized)
|
||||||
|
return ERR_PTR(-EAGAIN);
|
||||||
|
refcount_inc(&sg->ref_count);
|
||||||
|
return sg;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gmap_shadow - create/find a shadow guest address space
|
||||||
|
* @parent: pointer to the parent gmap
|
||||||
|
* @asce: ASCE for which the shadow table is created
|
||||||
|
* @edat_level: edat level to be used for the shadow translation
|
||||||
|
*
|
||||||
|
* The pages of the top level page table referred by the asce parameter
|
||||||
|
* will be set to read-only and marked in the PGSTEs of the kvm process.
|
||||||
|
* The shadow table will be removed automatically on any change to the
|
||||||
|
* PTE mapping for the source table.
|
||||||
|
*
|
||||||
|
* Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
|
||||||
|
* ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
|
||||||
|
* parent gmap table could not be protected.
|
||||||
|
*/
|
||||||
|
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level)
|
||||||
|
{
|
||||||
|
struct gmap *sg, *new;
|
||||||
|
unsigned long limit;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (KVM_BUG_ON(parent->mm->context.allow_gmap_hpage_1m, (struct kvm *)parent->private) ||
|
||||||
|
KVM_BUG_ON(gmap_is_shadow(parent), (struct kvm *)parent->private))
|
||||||
|
return ERR_PTR(-EFAULT);
|
||||||
|
spin_lock(&parent->shadow_lock);
|
||||||
|
sg = gmap_find_shadow(parent, asce, edat_level);
|
||||||
|
spin_unlock(&parent->shadow_lock);
|
||||||
|
if (sg)
|
||||||
|
return sg;
|
||||||
|
/* Create a new shadow gmap */
|
||||||
|
limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
|
||||||
|
if (asce & _ASCE_REAL_SPACE)
|
||||||
|
limit = -1UL;
|
||||||
|
new = gmap_alloc(limit);
|
||||||
|
if (!new)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
new->mm = parent->mm;
|
||||||
|
new->parent = gmap_get(parent);
|
||||||
|
new->private = parent->private;
|
||||||
|
new->orig_asce = asce;
|
||||||
|
new->edat_level = edat_level;
|
||||||
|
new->initialized = false;
|
||||||
|
spin_lock(&parent->shadow_lock);
|
||||||
|
/* Recheck if another CPU created the same shadow */
|
||||||
|
sg = gmap_find_shadow(parent, asce, edat_level);
|
||||||
|
if (sg) {
|
||||||
|
spin_unlock(&parent->shadow_lock);
|
||||||
|
gmap_free(new);
|
||||||
|
return sg;
|
||||||
|
}
|
||||||
|
if (asce & _ASCE_REAL_SPACE) {
|
||||||
|
/* only allow one real-space gmap shadow */
|
||||||
|
list_for_each_entry(sg, &parent->children, list) {
|
||||||
|
if (sg->orig_asce & _ASCE_REAL_SPACE) {
|
||||||
|
spin_lock(&sg->guest_table_lock);
|
||||||
|
gmap_unshadow(sg);
|
||||||
|
spin_unlock(&sg->guest_table_lock);
|
||||||
|
list_del(&sg->list);
|
||||||
|
gmap_put(sg);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
refcount_set(&new->ref_count, 2);
|
||||||
|
list_add(&new->list, &parent->children);
|
||||||
|
if (asce & _ASCE_REAL_SPACE) {
|
||||||
|
/* nothing to protect, return right away */
|
||||||
|
new->initialized = true;
|
||||||
|
spin_unlock(&parent->shadow_lock);
|
||||||
|
return new;
|
||||||
|
}
|
||||||
|
spin_unlock(&parent->shadow_lock);
|
||||||
|
/* protect after insertion, so it will get properly invalidated */
|
||||||
|
mmap_read_lock(parent->mm);
|
||||||
|
rc = __kvm_s390_mprotect_many(parent, asce & _ASCE_ORIGIN,
|
||||||
|
((asce & _ASCE_TABLE_LENGTH) + 1),
|
||||||
|
PROT_READ, GMAP_NOTIFY_SHADOW);
|
||||||
|
mmap_read_unlock(parent->mm);
|
||||||
|
spin_lock(&parent->shadow_lock);
|
||||||
|
new->initialized = true;
|
||||||
|
if (rc) {
|
||||||
|
list_del(&new->list);
|
||||||
|
gmap_free(new);
|
||||||
|
new = ERR_PTR(rc);
|
||||||
|
}
|
||||||
|
spin_unlock(&parent->shadow_lock);
|
||||||
|
return new;
|
||||||
|
}
|
212
arch/s390/kvm/gmap.c
Normal file
212
arch/s390/kvm/gmap.c
Normal file
|
@ -0,0 +1,212 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Guest memory management for KVM/s390
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008, 2020, 2024
|
||||||
|
*
|
||||||
|
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||||
|
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||||
|
* David Hildenbrand <david@redhat.com>
|
||||||
|
* Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/kvm.h>
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/pgtable.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
|
||||||
|
#include <asm/lowcore.h>
|
||||||
|
#include <asm/gmap.h>
|
||||||
|
#include <asm/uv.h>
|
||||||
|
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* should_export_before_import - Determine whether an export is needed
|
||||||
|
* before an import-like operation
|
||||||
|
* @uvcb: the Ultravisor control block of the UVC to be performed
|
||||||
|
* @mm: the mm of the process
|
||||||
|
*
|
||||||
|
* Returns whether an export is needed before every import-like operation.
|
||||||
|
* This is needed for shared pages, which don't trigger a secure storage
|
||||||
|
* exception when accessed from a different guest.
|
||||||
|
*
|
||||||
|
* Although considered as one, the Unpin Page UVC is not an actual import,
|
||||||
|
* so it is not affected.
|
||||||
|
*
|
||||||
|
* No export is needed also when there is only one protected VM, because the
|
||||||
|
* page cannot belong to the wrong VM in that case (there is no "other VM"
|
||||||
|
* it can belong to).
|
||||||
|
*
|
||||||
|
* Return: true if an export is needed before every import, otherwise false.
|
||||||
|
*/
|
||||||
|
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The misc feature indicates, among other things, that importing a
|
||||||
|
* shared page from a different protected VM will automatically also
|
||||||
|
* transfer its ownership.
|
||||||
|
*/
|
||||||
|
if (uv_has_feature(BIT_UV_FEAT_MISC))
|
||||||
|
return false;
|
||||||
|
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
|
||||||
|
return false;
|
||||||
|
return atomic_read(&mm->context.protected_count) > 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
|
||||||
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Secure pages cannot be huge and userspace should not combine both.
|
||||||
|
* In case userspace does it anyway this will result in an -EFAULT for
|
||||||
|
* the unpack. The guest is thus never reaching secure mode.
|
||||||
|
* If userspace plays dirty tricks and decides to map huge pages at a
|
||||||
|
* later point in time, it will receive a segmentation fault or
|
||||||
|
* KVM_RUN will return -EFAULT.
|
||||||
|
*/
|
||||||
|
if (folio_test_hugetlb(folio))
|
||||||
|
return -EFAULT;
|
||||||
|
if (folio_test_large(folio)) {
|
||||||
|
mmap_read_unlock(gmap->mm);
|
||||||
|
rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
|
||||||
|
mmap_read_lock(gmap->mm);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
folio = page_folio(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!folio_trylock(folio))
|
||||||
|
return -EAGAIN;
|
||||||
|
if (should_export_before_import(uvcb, gmap->mm))
|
||||||
|
uv_convert_from_secure(folio_to_phys(folio));
|
||||||
|
rc = make_folio_secure(folio, uvcb);
|
||||||
|
folio_unlock(folio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In theory a race is possible and the folio might have become
|
||||||
|
* large again before the folio_trylock() above. In that case, no
|
||||||
|
* action is performed and -EAGAIN is returned; the callers will
|
||||||
|
* have to try again later.
|
||||||
|
* In most cases this implies running the VM again, getting the same
|
||||||
|
* exception again, and make another attempt in this function.
|
||||||
|
* This is expected to happen extremely rarely.
|
||||||
|
*/
|
||||||
|
if (rc == -E2BIG)
|
||||||
|
return -EAGAIN;
|
||||||
|
/* The folio has too many references, try to shake some off */
|
||||||
|
if (rc == -EBUSY) {
|
||||||
|
mmap_read_unlock(gmap->mm);
|
||||||
|
kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
|
||||||
|
mmap_read_lock(gmap->mm);
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gmap_make_secure() - make one guest page secure
|
||||||
|
* @gmap: the guest gmap
|
||||||
|
* @gaddr: the guest address that needs to be made secure
|
||||||
|
* @uvcb: the UVCB specifying which operation needs to be performed
|
||||||
|
*
|
||||||
|
* Context: needs to be called with kvm->srcu held.
|
||||||
|
* Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
|
||||||
|
*/
|
||||||
|
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||||
|
{
|
||||||
|
struct kvm *kvm = gmap->private;
|
||||||
|
struct page *page;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
lockdep_assert_held(&kvm->srcu);
|
||||||
|
|
||||||
|
page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
|
||||||
|
mmap_read_lock(gmap->mm);
|
||||||
|
if (page)
|
||||||
|
rc = __gmap_make_secure(gmap, page, uvcb);
|
||||||
|
kvm_release_page_clean(page);
|
||||||
|
mmap_read_unlock(gmap->mm);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
|
||||||
|
{
|
||||||
|
struct uv_cb_cts uvcb = {
|
||||||
|
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
|
||||||
|
.header.len = sizeof(uvcb),
|
||||||
|
.guest_handle = gmap->guest_handle,
|
||||||
|
.gaddr = gaddr,
|
||||||
|
};
|
||||||
|
|
||||||
|
return gmap_make_secure(gmap, gaddr, &uvcb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __gmap_destroy_page() - Destroy a guest page.
|
||||||
|
* @gmap: the gmap of the guest
|
||||||
|
* @page: the page to destroy
|
||||||
|
*
|
||||||
|
* An attempt will be made to destroy the given guest page. If the attempt
|
||||||
|
* fails, an attempt is made to export the page. If both attempts fail, an
|
||||||
|
* appropriate error is returned.
|
||||||
|
*
|
||||||
|
* Context: must be called holding the mm lock for gmap->mm
|
||||||
|
*/
|
||||||
|
static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
|
||||||
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See gmap_make_secure(): large folios cannot be secure. Small
|
||||||
|
* folio implies FW_LEVEL_PTE.
|
||||||
|
*/
|
||||||
|
if (folio_test_large(folio))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
rc = uv_destroy_folio(folio);
|
||||||
|
/*
|
||||||
|
* Fault handlers can race; it is possible that two CPUs will fault
|
||||||
|
* on the same secure page. One CPU can destroy the page, reboot,
|
||||||
|
* re-enter secure mode and import it, while the second CPU was
|
||||||
|
* stuck at the beginning of the handler. At some point the second
|
||||||
|
* CPU will be able to progress, and it will not be able to destroy
|
||||||
|
* the page. In that case we do not want to terminate the process,
|
||||||
|
* we instead try to export the page.
|
||||||
|
*/
|
||||||
|
if (rc)
|
||||||
|
rc = uv_convert_from_secure_folio(folio);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gmap_destroy_page() - Destroy a guest page.
|
||||||
|
* @gmap: the gmap of the guest
|
||||||
|
* @gaddr: the guest address to destroy
|
||||||
|
*
|
||||||
|
* An attempt will be made to destroy the given guest page. If the attempt
|
||||||
|
* fails, an attempt is made to export the page. If both attempts fail, an
|
||||||
|
* appropriate error is returned.
|
||||||
|
*
|
||||||
|
* Context: may sleep.
|
||||||
|
*/
|
||||||
|
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
mmap_read_lock(gmap->mm);
|
||||||
|
page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
|
||||||
|
if (page)
|
||||||
|
rc = __gmap_destroy_page(gmap, page);
|
||||||
|
kvm_release_page_clean(page);
|
||||||
|
mmap_read_unlock(gmap->mm);
|
||||||
|
return rc;
|
||||||
|
}
|
39
arch/s390/kvm/gmap.h
Normal file
39
arch/s390/kvm/gmap.h
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* KVM guest address space mapping code
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2007, 2016, 2025
|
||||||
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||||
|
* Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ARCH_KVM_S390_GMAP_H
|
||||||
|
#define ARCH_KVM_S390_GMAP_H
|
||||||
|
|
||||||
|
#define GMAP_SHADOW_FAKE_TABLE 1ULL
|
||||||
|
|
||||||
|
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
||||||
|
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
||||||
|
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
|
||||||
|
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gmap_shadow_valid - check if a shadow guest address space matches the
|
||||||
|
* given properties and is still valid
|
||||||
|
* @sg: pointer to the shadow guest address space structure
|
||||||
|
* @asce: ASCE for which the shadow table is requested
|
||||||
|
* @edat_level: edat level to be used for the shadow translation
|
||||||
|
*
|
||||||
|
* Returns 1 if the gmap shadow is still valid and matches the given
|
||||||
|
* properties, the caller can continue using it. Returns 0 otherwise, the
|
||||||
|
* caller has to request a new shadow gmap in this case.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
|
||||||
|
{
|
||||||
|
if (sg->removed)
|
||||||
|
return 0;
|
||||||
|
return sg->orig_asce == asce && sg->edat_level == edat_level;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -21,6 +21,7 @@
|
||||||
#include "gaccess.h"
|
#include "gaccess.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "trace-s390.h"
|
#include "trace-s390.h"
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
|
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -367,7 +368,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
||||||
reg2, &srcaddr, GACC_FETCH, 0);
|
reg2, &srcaddr, GACC_FETCH, 0);
|
||||||
if (rc)
|
if (rc)
|
||||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||||
rc = gmap_fault(vcpu->arch.gmap, srcaddr, 0);
|
rc = kvm_s390_handle_dat_fault(vcpu, srcaddr, 0);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -376,7 +377,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
||||||
reg1, &dstaddr, GACC_STORE, 0);
|
reg1, &dstaddr, GACC_STORE, 0);
|
||||||
if (rc)
|
if (rc)
|
||||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||||
rc = gmap_fault(vcpu->arch.gmap, dstaddr, FAULT_FLAG_WRITE);
|
rc = kvm_s390_handle_dat_fault(vcpu, dstaddr, FOLL_WRITE);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -549,7 +550,7 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
|
||||||
* If the unpin did not succeed, the guest will exit again for the UVC
|
* If the unpin did not succeed, the guest will exit again for the UVC
|
||||||
* and we will retry the unpin.
|
* and we will retry the unpin.
|
||||||
*/
|
*/
|
||||||
if (rc == -EINVAL)
|
if (rc == -EINVAL || rc == -ENXIO)
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* If we got -EAGAIN here, we simply return it. It will eventually
|
* If we got -EAGAIN here, we simply return it. It will eventually
|
||||||
|
|
|
@ -2893,7 +2893,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
||||||
struct kvm_kernel_irq_routing_entry *e,
|
struct kvm_kernel_irq_routing_entry *e,
|
||||||
const struct kvm_irq_routing_entry *ue)
|
const struct kvm_irq_routing_entry *ue)
|
||||||
{
|
{
|
||||||
u64 uaddr;
|
u64 uaddr_s, uaddr_i;
|
||||||
|
int idx;
|
||||||
|
|
||||||
switch (ue->type) {
|
switch (ue->type) {
|
||||||
/* we store the userspace addresses instead of the guest addresses */
|
/* we store the userspace addresses instead of the guest addresses */
|
||||||
|
@ -2901,14 +2902,16 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
||||||
if (kvm_is_ucontrol(kvm))
|
if (kvm_is_ucontrol(kvm))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
e->set = set_adapter_int;
|
e->set = set_adapter_int;
|
||||||
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
|
|
||||||
if (uaddr == -EFAULT)
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
|
||||||
|
uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
|
||||||
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
|
||||||
|
if (kvm_is_error_hva(uaddr_s) || kvm_is_error_hva(uaddr_i))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
e->adapter.summary_addr = uaddr;
|
e->adapter.summary_addr = uaddr_s;
|
||||||
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
|
e->adapter.ind_addr = uaddr_i;
|
||||||
if (uaddr == -EFAULT)
|
|
||||||
return -EFAULT;
|
|
||||||
e->adapter.ind_addr = uaddr;
|
|
||||||
e->adapter.summary_offset = ue->u.adapter.summary_offset;
|
e->adapter.summary_offset = ue->u.adapter.summary_offset;
|
||||||
e->adapter.ind_offset = ue->u.adapter.ind_offset;
|
e->adapter.ind_offset = ue->u.adapter.ind_offset;
|
||||||
e->adapter.adapter_id = ue->u.adapter.adapter_id;
|
e->adapter.adapter_id = ue->u.adapter.adapter_id;
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include "kvm-s390.h"
|
#include "kvm-s390.h"
|
||||||
#include "gaccess.h"
|
#include "gaccess.h"
|
||||||
#include "pci.h"
|
#include "pci.h"
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
@ -3428,8 +3429,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
VM_EVENT(kvm, 3, "vm created with type %lu", type);
|
VM_EVENT(kvm, 3, "vm created with type %lu", type);
|
||||||
|
|
||||||
if (type & KVM_VM_S390_UCONTROL) {
|
if (type & KVM_VM_S390_UCONTROL) {
|
||||||
|
struct kvm_userspace_memory_region2 fake_memslot = {
|
||||||
|
.slot = KVM_S390_UCONTROL_MEMSLOT,
|
||||||
|
.guest_phys_addr = 0,
|
||||||
|
.userspace_addr = 0,
|
||||||
|
.memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
|
||||||
|
.flags = 0,
|
||||||
|
};
|
||||||
|
|
||||||
kvm->arch.gmap = NULL;
|
kvm->arch.gmap = NULL;
|
||||||
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
|
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
|
||||||
|
/* one flat fake memslot covering the whole address-space */
|
||||||
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
|
||||||
|
mutex_unlock(&kvm->slots_lock);
|
||||||
} else {
|
} else {
|
||||||
if (sclp.hamax == U64_MAX)
|
if (sclp.hamax == U64_MAX)
|
||||||
kvm->arch.mem_limit = TASK_SIZE_MAX;
|
kvm->arch.mem_limit = TASK_SIZE_MAX;
|
||||||
|
@ -4498,6 +4511,75 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
||||||
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
|
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __kvm_s390_fixup_fault_sync(struct gmap *gmap, gpa_t gaddr, unsigned int flags)
|
||||||
|
{
|
||||||
|
struct kvm *kvm = gmap->private;
|
||||||
|
gfn_t gfn = gpa_to_gfn(gaddr);
|
||||||
|
bool unlocked;
|
||||||
|
hva_t vmaddr;
|
||||||
|
gpa_t tmp;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (kvm_is_ucontrol(kvm)) {
|
||||||
|
tmp = __gmap_translate(gmap, gaddr);
|
||||||
|
gfn = gpa_to_gfn(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
vmaddr = gfn_to_hva(kvm, gfn);
|
||||||
|
rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
|
||||||
|
if (!rc)
|
||||||
|
rc = __gmap_link(gmap, gaddr, vmaddr);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __kvm_s390_mprotect_many() - Apply specified protection to guest pages
|
||||||
|
* @gmap: the gmap of the guest
|
||||||
|
* @gpa: the starting guest address
|
||||||
|
* @npages: how many pages to protect
|
||||||
|
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
|
||||||
|
* @bits: pgste notification bits to set
|
||||||
|
*
|
||||||
|
* Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
|
||||||
|
*
|
||||||
|
* Context: kvm->srcu and gmap->mm need to be held in read mode
|
||||||
|
*/
|
||||||
|
int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
|
||||||
|
unsigned long bits)
|
||||||
|
{
|
||||||
|
unsigned int fault_flag = (prot & PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
|
||||||
|
gpa_t end = gpa + npages * PAGE_SIZE;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
for (; gpa < end; gpa = ALIGN(gpa + 1, rc)) {
|
||||||
|
rc = gmap_protect_one(gmap, gpa, prot, bits);
|
||||||
|
if (rc == -EAGAIN) {
|
||||||
|
__kvm_s390_fixup_fault_sync(gmap, gpa, fault_flag);
|
||||||
|
rc = gmap_protect_one(gmap, gpa, prot, bits);
|
||||||
|
}
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kvm_s390_mprotect_notify_prefix(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
gpa_t gaddr = kvm_s390_get_prefix(vcpu);
|
||||||
|
int idx, rc;
|
||||||
|
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||||
|
|
||||||
|
rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT);
|
||||||
|
|
||||||
|
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
retry:
|
retry:
|
||||||
|
@ -4513,9 +4595,8 @@ retry:
|
||||||
*/
|
*/
|
||||||
if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
|
if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
|
||||||
int rc;
|
int rc;
|
||||||
rc = gmap_mprotect_notify(vcpu->arch.gmap,
|
|
||||||
kvm_s390_get_prefix(vcpu),
|
rc = kvm_s390_mprotect_notify_prefix(vcpu);
|
||||||
PAGE_SIZE * 2, PROT_WRITE);
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
|
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -4766,11 +4847,111 @@ static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
|
||||||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
||||||
|
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||||
|
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
|
||||||
|
* @vcpu: the vCPU whose gmap is to be fixed up
|
||||||
|
* @gfn: the guest frame number used for memslots (including fake memslots)
|
||||||
|
* @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps
|
||||||
|
* @flags: FOLL_* flags
|
||||||
|
*
|
||||||
|
* Return: 0 on success, < 0 in case of error.
|
||||||
|
* Context: The mm lock must not be held before calling. May sleep.
|
||||||
|
*/
|
||||||
|
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags)
|
||||||
|
{
|
||||||
|
struct kvm_memory_slot *slot;
|
||||||
|
unsigned int fault_flags;
|
||||||
|
bool writable, unlocked;
|
||||||
|
unsigned long vmaddr;
|
||||||
|
struct page *page;
|
||||||
|
kvm_pfn_t pfn;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
||||||
|
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
||||||
|
return vcpu_post_run_addressing_exception(vcpu);
|
||||||
|
|
||||||
|
fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
|
||||||
|
if (vcpu->arch.gmap->pfault_enabled)
|
||||||
|
flags |= FOLL_NOWAIT;
|
||||||
|
vmaddr = __gfn_to_hva_memslot(slot, gfn);
|
||||||
|
|
||||||
|
try_again:
|
||||||
|
pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page);
|
||||||
|
|
||||||
|
/* Access outside memory, inject addressing exception */
|
||||||
|
if (is_noslot_pfn(pfn))
|
||||||
|
return vcpu_post_run_addressing_exception(vcpu);
|
||||||
|
/* Signal pending: try again */
|
||||||
|
if (pfn == KVM_PFN_ERR_SIGPENDING)
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
|
/* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT) */
|
||||||
|
if (pfn == KVM_PFN_ERR_NEEDS_IO) {
|
||||||
|
trace_kvm_s390_major_guest_pfault(vcpu);
|
||||||
|
if (kvm_arch_setup_async_pf(vcpu))
|
||||||
|
return 0;
|
||||||
|
vcpu->stat.pfault_sync++;
|
||||||
|
/* Could not setup async pfault, try again synchronously */
|
||||||
|
flags &= ~FOLL_NOWAIT;
|
||||||
|
goto try_again;
|
||||||
|
}
|
||||||
|
/* Any other error */
|
||||||
|
if (is_error_pfn(pfn))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
/* Success */
|
||||||
|
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||||
|
/* Mark the userspace PTEs as young and/or dirty, to avoid page fault loops */
|
||||||
|
rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked);
|
||||||
|
if (!rc)
|
||||||
|
rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr);
|
||||||
|
scoped_guard(spinlock, &vcpu->kvm->mmu_lock) {
|
||||||
|
kvm_release_faultin_page(vcpu->kvm, page, false, writable);
|
||||||
|
}
|
||||||
|
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags)
|
||||||
|
{
|
||||||
|
unsigned long gaddr_tmp;
|
||||||
|
gfn_t gfn;
|
||||||
|
|
||||||
|
gfn = gpa_to_gfn(gaddr);
|
||||||
|
if (kvm_is_ucontrol(vcpu->kvm)) {
|
||||||
|
/*
|
||||||
|
* This translates the per-vCPU guest address into a
|
||||||
|
* fake guest address, which can then be used with the
|
||||||
|
* fake memslots that are identity mapping userspace.
|
||||||
|
* This allows ucontrol VMs to use the normal fault
|
||||||
|
* resolution path, like normal VMs.
|
||||||
|
*/
|
||||||
|
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||||
|
gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr);
|
||||||
|
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||||
|
if (gaddr_tmp == -EFAULT) {
|
||||||
|
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
|
||||||
|
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
|
||||||
|
vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
|
||||||
|
return -EREMOTE;
|
||||||
|
}
|
||||||
|
gfn = gpa_to_gfn(gaddr_tmp);
|
||||||
|
}
|
||||||
|
return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned int flags = 0;
|
unsigned int flags = 0;
|
||||||
unsigned long gaddr;
|
unsigned long gaddr;
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
|
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
|
||||||
if (kvm_s390_cur_gmap_fault_is_write())
|
if (kvm_s390_cur_gmap_fault_is_write())
|
||||||
|
@ -4781,9 +4962,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||||
vcpu->stat.exit_null++;
|
vcpu->stat.exit_null++;
|
||||||
break;
|
break;
|
||||||
case PGM_NON_SECURE_STORAGE_ACCESS:
|
case PGM_NON_SECURE_STORAGE_ACCESS:
|
||||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
kvm_s390_assert_primary_as(vcpu);
|
||||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
|
||||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
|
||||||
/*
|
/*
|
||||||
* This is normal operation; a page belonging to a protected
|
* This is normal operation; a page belonging to a protected
|
||||||
* guest has not been imported yet. Try to import the page into
|
* guest has not been imported yet. Try to import the page into
|
||||||
|
@ -4794,9 +4973,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||||
break;
|
break;
|
||||||
case PGM_SECURE_STORAGE_ACCESS:
|
case PGM_SECURE_STORAGE_ACCESS:
|
||||||
case PGM_SECURE_STORAGE_VIOLATION:
|
case PGM_SECURE_STORAGE_VIOLATION:
|
||||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
kvm_s390_assert_primary_as(vcpu);
|
||||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
|
||||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
|
||||||
/*
|
/*
|
||||||
* This can happen after a reboot with asynchronous teardown;
|
* This can happen after a reboot with asynchronous teardown;
|
||||||
* the new guest (normal or protected) will run on top of the
|
* the new guest (normal or protected) will run on top of the
|
||||||
|
@ -4825,40 +5002,15 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||||
case PGM_REGION_FIRST_TRANS:
|
case PGM_REGION_FIRST_TRANS:
|
||||||
case PGM_REGION_SECOND_TRANS:
|
case PGM_REGION_SECOND_TRANS:
|
||||||
case PGM_REGION_THIRD_TRANS:
|
case PGM_REGION_THIRD_TRANS:
|
||||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
kvm_s390_assert_primary_as(vcpu);
|
||||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
return vcpu_dat_fault_handler(vcpu, gaddr, flags);
|
||||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
|
||||||
if (vcpu->arch.gmap->pfault_enabled) {
|
|
||||||
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
|
|
||||||
if (rc == -EFAULT)
|
|
||||||
return vcpu_post_run_addressing_exception(vcpu);
|
|
||||||
if (rc == -EAGAIN) {
|
|
||||||
trace_kvm_s390_major_guest_pfault(vcpu);
|
|
||||||
if (kvm_arch_setup_async_pf(vcpu))
|
|
||||||
return 0;
|
|
||||||
vcpu->stat.pfault_sync++;
|
|
||||||
} else {
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
|
|
||||||
if (rc == -EFAULT) {
|
|
||||||
if (kvm_is_ucontrol(vcpu->kvm)) {
|
|
||||||
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
|
|
||||||
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
|
|
||||||
vcpu->run->s390_ucontrol.pgm_code = 0x10;
|
|
||||||
return -EREMOTE;
|
|
||||||
}
|
|
||||||
return vcpu_post_run_addressing_exception(vcpu);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||||
send_sig(SIGSEGV, current, 0);
|
send_sig(SIGSEGV, current, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||||
|
@ -5737,7 +5889,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
case KVM_S390_VCPU_FAULT: {
|
case KVM_S390_VCPU_FAULT: {
|
||||||
r = gmap_fault(vcpu->arch.gmap, arg, 0);
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
r = vcpu_dat_fault_handler(vcpu, arg, 0);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case KVM_ENABLE_CAP:
|
case KVM_ENABLE_CAP:
|
||||||
|
@ -5853,7 +6007,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
{
|
{
|
||||||
gpa_t size;
|
gpa_t size;
|
||||||
|
|
||||||
if (kvm_is_ucontrol(kvm))
|
if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* When we are protected, we should not change the memory slots */
|
/* When we are protected, we should not change the memory slots */
|
||||||
|
@ -5905,6 +6059,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
if (kvm_is_ucontrol(kvm))
|
||||||
|
return;
|
||||||
|
|
||||||
switch (change) {
|
switch (change) {
|
||||||
case KVM_MR_DELETE:
|
case KVM_MR_DELETE:
|
||||||
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
|
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sclp.h>
|
#include <asm/sclp.h>
|
||||||
|
|
||||||
|
#define KVM_S390_UCONTROL_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
|
||||||
|
|
||||||
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
||||||
{
|
{
|
||||||
fpu_stfpc(&run->s.regs.fpc);
|
fpu_stfpc(&run->s.regs.fpc);
|
||||||
|
@ -279,6 +281,15 @@ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
|
||||||
return gd;
|
return gd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline hva_t gpa_to_hva(struct kvm *kvm, gpa_t gpa)
|
||||||
|
{
|
||||||
|
hva_t hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
|
||||||
|
|
||||||
|
if (!kvm_is_error_hva(hva))
|
||||||
|
hva |= offset_in_page(gpa);
|
||||||
|
return hva;
|
||||||
|
}
|
||||||
|
|
||||||
/* implemented in pv.c */
|
/* implemented in pv.c */
|
||||||
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||||
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||||
|
@ -408,6 +419,14 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
|
||||||
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
|
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
|
||||||
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
|
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
|
||||||
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
|
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||||
|
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags);
|
||||||
|
int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
|
||||||
|
unsigned long bits);
|
||||||
|
|
||||||
|
static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags)
|
||||||
|
{
|
||||||
|
return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* implemented in diag.c */
|
/* implemented in diag.c */
|
||||||
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/sched/mm.h>
|
#include <linux/sched/mm.h>
|
||||||
#include <linux/mmu_notifier.h>
|
#include <linux/mmu_notifier.h>
|
||||||
#include "kvm-s390.h"
|
#include "kvm-s390.h"
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
bool kvm_s390_pv_is_protected(struct kvm *kvm)
|
bool kvm_s390_pv_is_protected(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
|
@ -638,10 +639,28 @@ static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
|
||||||
.tweak[1] = offset,
|
.tweak[1] = offset,
|
||||||
};
|
};
|
||||||
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
|
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
|
||||||
|
unsigned long vmaddr;
|
||||||
|
bool unlocked;
|
||||||
|
|
||||||
*rc = uvcb.header.rc;
|
*rc = uvcb.header.rc;
|
||||||
*rrc = uvcb.header.rrc;
|
*rrc = uvcb.header.rrc;
|
||||||
|
|
||||||
|
if (ret == -ENXIO) {
|
||||||
|
mmap_read_lock(kvm->mm);
|
||||||
|
vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr));
|
||||||
|
if (kvm_is_error_hva(vmaddr)) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
} else {
|
||||||
|
ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
|
||||||
|
if (!ret)
|
||||||
|
ret = __gmap_link(kvm->arch.gmap, addr, vmaddr);
|
||||||
|
}
|
||||||
|
mmap_read_unlock(kvm->mm);
|
||||||
|
if (!ret)
|
||||||
|
return -EAGAIN;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret && ret != -EAGAIN)
|
if (ret && ret != -EAGAIN)
|
||||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
|
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
|
||||||
uvcb.gaddr, *rc, *rrc);
|
uvcb.gaddr, *rc, *rrc);
|
||||||
|
@ -660,6 +679,8 @@ int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
|
||||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
|
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
|
||||||
addr, size);
|
addr, size);
|
||||||
|
|
||||||
|
guard(srcu)(&kvm->srcu);
|
||||||
|
|
||||||
while (offset < size) {
|
while (offset < size) {
|
||||||
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
|
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/mman.h>
|
||||||
|
|
||||||
#include <asm/gmap.h>
|
#include <asm/gmap.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
@ -22,6 +23,11 @@
|
||||||
#include <asm/facility.h>
|
#include <asm/facility.h>
|
||||||
#include "kvm-s390.h"
|
#include "kvm-s390.h"
|
||||||
#include "gaccess.h"
|
#include "gaccess.h"
|
||||||
|
#include "gmap.h"
|
||||||
|
|
||||||
|
enum vsie_page_flags {
|
||||||
|
VSIE_PAGE_IN_USE = 0,
|
||||||
|
};
|
||||||
|
|
||||||
struct vsie_page {
|
struct vsie_page {
|
||||||
struct kvm_s390_sie_block scb_s; /* 0x0000 */
|
struct kvm_s390_sie_block scb_s; /* 0x0000 */
|
||||||
|
@ -46,7 +52,18 @@ struct vsie_page {
|
||||||
gpa_t gvrd_gpa; /* 0x0240 */
|
gpa_t gvrd_gpa; /* 0x0240 */
|
||||||
gpa_t riccbd_gpa; /* 0x0248 */
|
gpa_t riccbd_gpa; /* 0x0248 */
|
||||||
gpa_t sdnx_gpa; /* 0x0250 */
|
gpa_t sdnx_gpa; /* 0x0250 */
|
||||||
__u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
|
/*
|
||||||
|
* guest address of the original SCB. Remains set for free vsie
|
||||||
|
* pages, so we can properly look them up in our addr_to_page
|
||||||
|
* radix tree.
|
||||||
|
*/
|
||||||
|
gpa_t scb_gpa; /* 0x0258 */
|
||||||
|
/*
|
||||||
|
* Flags: must be set/cleared atomically after the vsie page can be
|
||||||
|
* looked up by other CPUs.
|
||||||
|
*/
|
||||||
|
unsigned long flags; /* 0x0260 */
|
||||||
|
__u8 reserved[0x0700 - 0x0268]; /* 0x0268 */
|
||||||
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
|
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
|
||||||
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
|
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
|
||||||
};
|
};
|
||||||
|
@ -584,7 +601,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
||||||
struct kvm *kvm = gmap->private;
|
struct kvm *kvm = gmap->private;
|
||||||
struct vsie_page *cur;
|
struct vsie_page *cur;
|
||||||
unsigned long prefix;
|
unsigned long prefix;
|
||||||
struct page *page;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!gmap_is_shadow(gmap))
|
if (!gmap_is_shadow(gmap))
|
||||||
|
@ -594,10 +610,9 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
||||||
* therefore we can safely reference them all the time.
|
* therefore we can safely reference them all the time.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
||||||
page = READ_ONCE(kvm->arch.vsie.pages[i]);
|
cur = READ_ONCE(kvm->arch.vsie.pages[i]);
|
||||||
if (!page)
|
if (!cur)
|
||||||
continue;
|
continue;
|
||||||
cur = page_to_virt(page);
|
|
||||||
if (READ_ONCE(cur->gmap) != gmap)
|
if (READ_ONCE(cur->gmap) != gmap)
|
||||||
continue;
|
continue;
|
||||||
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
|
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
|
||||||
|
@ -1345,6 +1360,20 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Try getting a given vsie page, returning "true" on success. */
|
||||||
|
static inline bool try_get_vsie_page(struct vsie_page *vsie_page)
|
||||||
|
{
|
||||||
|
if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags))
|
||||||
|
return false;
|
||||||
|
return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */
|
||||||
|
static void put_vsie_page(struct vsie_page *vsie_page)
|
||||||
|
{
|
||||||
|
clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get or create a vsie page for a scb address.
|
* Get or create a vsie page for a scb address.
|
||||||
*
|
*
|
||||||
|
@ -1355,16 +1384,21 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||||
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||||
{
|
{
|
||||||
struct vsie_page *vsie_page;
|
struct vsie_page *vsie_page;
|
||||||
struct page *page;
|
|
||||||
int nr_vcpus;
|
int nr_vcpus;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
|
vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (page) {
|
if (vsie_page) {
|
||||||
if (page_ref_inc_return(page) == 2)
|
if (try_get_vsie_page(vsie_page)) {
|
||||||
return page_to_virt(page);
|
if (vsie_page->scb_gpa == addr)
|
||||||
page_ref_dec(page);
|
return vsie_page;
|
||||||
|
/*
|
||||||
|
* We raced with someone reusing + putting this vsie
|
||||||
|
* page before we grabbed it.
|
||||||
|
*/
|
||||||
|
put_vsie_page(vsie_page);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1375,36 +1409,40 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||||
|
|
||||||
mutex_lock(&kvm->arch.vsie.mutex);
|
mutex_lock(&kvm->arch.vsie.mutex);
|
||||||
if (kvm->arch.vsie.page_count < nr_vcpus) {
|
if (kvm->arch.vsie.page_count < nr_vcpus) {
|
||||||
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
||||||
if (!page) {
|
if (!vsie_page) {
|
||||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
page_ref_inc(page);
|
__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||||
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
|
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
|
||||||
kvm->arch.vsie.page_count++;
|
kvm->arch.vsie.page_count++;
|
||||||
} else {
|
} else {
|
||||||
/* reuse an existing entry that belongs to nobody */
|
/* reuse an existing entry that belongs to nobody */
|
||||||
while (true) {
|
while (true) {
|
||||||
page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
|
vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
|
||||||
if (page_ref_inc_return(page) == 2)
|
if (try_get_vsie_page(vsie_page))
|
||||||
break;
|
break;
|
||||||
page_ref_dec(page);
|
|
||||||
kvm->arch.vsie.next++;
|
kvm->arch.vsie.next++;
|
||||||
kvm->arch.vsie.next %= nr_vcpus;
|
kvm->arch.vsie.next %= nr_vcpus;
|
||||||
}
|
}
|
||||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
|
if (vsie_page->scb_gpa != ULONG_MAX)
|
||||||
|
radix_tree_delete(&kvm->arch.vsie.addr_to_page,
|
||||||
|
vsie_page->scb_gpa >> 9);
|
||||||
}
|
}
|
||||||
page->index = addr;
|
/* Mark it as invalid until it resides in the tree. */
|
||||||
/* double use of the same address */
|
vsie_page->scb_gpa = ULONG_MAX;
|
||||||
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
|
|
||||||
page_ref_dec(page);
|
/* Double use of the same address or allocation failure. */
|
||||||
|
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9,
|
||||||
|
vsie_page)) {
|
||||||
|
put_vsie_page(vsie_page);
|
||||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
vsie_page->scb_gpa = addr;
|
||||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||||
|
|
||||||
vsie_page = page_to_virt(page);
|
|
||||||
memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
|
memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
|
||||||
release_gmap_shadow(vsie_page);
|
release_gmap_shadow(vsie_page);
|
||||||
vsie_page->fault_addr = 0;
|
vsie_page->fault_addr = 0;
|
||||||
|
@ -1412,14 +1450,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||||
return vsie_page;
|
return vsie_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* put a vsie page acquired via get_vsie_page */
|
|
||||||
static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
|
|
||||||
{
|
|
||||||
struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
|
|
||||||
|
|
||||||
page_ref_dec(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
|
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vsie_page *vsie_page;
|
struct vsie_page *vsie_page;
|
||||||
|
@ -1470,7 +1500,7 @@ out_unshadow:
|
||||||
out_unpin_scb:
|
out_unpin_scb:
|
||||||
unpin_scb(vcpu, vsie_page, scb_addr);
|
unpin_scb(vcpu, vsie_page, scb_addr);
|
||||||
out_put:
|
out_put:
|
||||||
put_vsie_page(vcpu->kvm, vsie_page);
|
put_vsie_page(vsie_page);
|
||||||
|
|
||||||
return rc < 0 ? rc : 0;
|
return rc < 0 ? rc : 0;
|
||||||
}
|
}
|
||||||
|
@ -1486,18 +1516,18 @@ void kvm_s390_vsie_init(struct kvm *kvm)
|
||||||
void kvm_s390_vsie_destroy(struct kvm *kvm)
|
void kvm_s390_vsie_destroy(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
struct vsie_page *vsie_page;
|
struct vsie_page *vsie_page;
|
||||||
struct page *page;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&kvm->arch.vsie.mutex);
|
mutex_lock(&kvm->arch.vsie.mutex);
|
||||||
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
||||||
page = kvm->arch.vsie.pages[i];
|
vsie_page = kvm->arch.vsie.pages[i];
|
||||||
kvm->arch.vsie.pages[i] = NULL;
|
kvm->arch.vsie.pages[i] = NULL;
|
||||||
vsie_page = page_to_virt(page);
|
|
||||||
release_gmap_shadow(vsie_page);
|
release_gmap_shadow(vsie_page);
|
||||||
/* free the radix tree entry */
|
/* free the radix tree entry */
|
||||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
|
if (vsie_page->scb_gpa != ULONG_MAX)
|
||||||
__free_page(page);
|
radix_tree_delete(&kvm->arch.vsie.addr_to_page,
|
||||||
|
vsie_page->scb_gpa >> 9);
|
||||||
|
free_page((unsigned long)vsie_page);
|
||||||
}
|
}
|
||||||
kvm->arch.vsie.page_count = 0;
|
kvm->arch.vsie.page_count = 0;
|
||||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -176,8 +176,6 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
table = ptdesc_to_virt(ptdesc);
|
table = ptdesc_to_virt(ptdesc);
|
||||||
__arch_set_page_dat(table, 1);
|
__arch_set_page_dat(table, 1);
|
||||||
/* pt_list is used by gmap only */
|
|
||||||
INIT_LIST_HEAD(&ptdesc->pt_list);
|
|
||||||
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||||
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||||
return table;
|
return table;
|
||||||
|
|
|
@ -1180,7 +1180,7 @@ void kvm_set_cpu_caps(void)
|
||||||
SYNTHESIZED_F(SBPB),
|
SYNTHESIZED_F(SBPB),
|
||||||
SYNTHESIZED_F(IBPB_BRTYPE),
|
SYNTHESIZED_F(IBPB_BRTYPE),
|
||||||
SYNTHESIZED_F(SRSO_NO),
|
SYNTHESIZED_F(SRSO_NO),
|
||||||
SYNTHESIZED_F(SRSO_USER_KERNEL_NO),
|
F(SRSO_USER_KERNEL_NO),
|
||||||
);
|
);
|
||||||
|
|
||||||
kvm_cpu_cap_init(CPUID_8000_0022_EAX,
|
kvm_cpu_cap_init(CPUID_8000_0022_EAX,
|
||||||
|
|
|
@ -7120,6 +7120,19 @@ static void mmu_destroy_caches(void)
|
||||||
kmem_cache_destroy(mmu_page_header_cache);
|
kmem_cache_destroy(mmu_page_header_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The NX recovery thread is spawned on-demand at the first KVM_RUN and
|
||||||
|
* may not be valid even though the VM is globally visible. Do nothing,
|
||||||
|
* as such a VM can't have any possible NX huge pages.
|
||||||
|
*/
|
||||||
|
struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
|
||||||
|
|
||||||
|
if (nx_thread)
|
||||||
|
vhost_task_wake(nx_thread);
|
||||||
|
}
|
||||||
|
|
||||||
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
|
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
if (nx_hugepage_mitigation_hard_disabled)
|
if (nx_hugepage_mitigation_hard_disabled)
|
||||||
|
@ -7180,7 +7193,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
||||||
kvm_mmu_zap_all_fast(kvm);
|
kvm_mmu_zap_all_fast(kvm);
|
||||||
mutex_unlock(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
kvm_wake_nx_recovery_thread(kvm);
|
||||||
}
|
}
|
||||||
mutex_unlock(&kvm_lock);
|
mutex_unlock(&kvm_lock);
|
||||||
}
|
}
|
||||||
|
@ -7315,7 +7328,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
|
||||||
mutex_lock(&kvm_lock);
|
mutex_lock(&kvm_lock);
|
||||||
|
|
||||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
kvm_wake_nx_recovery_thread(kvm);
|
||||||
|
|
||||||
mutex_unlock(&kvm_lock);
|
mutex_unlock(&kvm_lock);
|
||||||
}
|
}
|
||||||
|
@ -7451,14 +7464,20 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
|
||||||
{
|
{
|
||||||
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
||||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||||
|
struct vhost_task *nx_thread;
|
||||||
|
|
||||||
kvm->arch.nx_huge_page_last = get_jiffies_64();
|
kvm->arch.nx_huge_page_last = get_jiffies_64();
|
||||||
kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
|
nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
|
||||||
kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
|
kvm_nx_huge_page_recovery_worker_kill,
|
||||||
kvm, "kvm-nx-lpage-recovery");
|
kvm, "kvm-nx-lpage-recovery");
|
||||||
|
|
||||||
if (kvm->arch.nx_huge_page_recovery_thread)
|
if (!nx_thread)
|
||||||
vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
|
return;
|
||||||
|
|
||||||
|
vhost_task_start(nx_thread);
|
||||||
|
|
||||||
|
/* Make the task visible only once it is fully started. */
|
||||||
|
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||||
|
|
|
@ -12741,6 +12741,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
"does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
|
"does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
once_init(&kvm->arch.nx_once);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_uninit_mmu:
|
out_uninit_mmu:
|
||||||
|
@ -12750,12 +12751,6 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_post_init_vm(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
once_init(&kvm->arch.nx_once);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu_load(vcpu);
|
vcpu_load(vcpu);
|
||||||
|
|
|
@ -1615,7 +1615,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
|
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
|
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
|
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
|
||||||
int kvm_arch_post_init_vm(struct kvm *kvm);
|
|
||||||
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
|
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
|
||||||
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
|
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
|
||||||
|
|
||||||
|
|
|
@ -444,7 +444,7 @@ static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_get_inital_dirty(void)
|
static void test_get_initial_dirty(void)
|
||||||
{
|
{
|
||||||
struct kvm_vm *vm = create_vm_two_memslots();
|
struct kvm_vm *vm = create_vm_two_memslots();
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
@ -651,7 +651,7 @@ struct testdef {
|
||||||
} testlist[] = {
|
} testlist[] = {
|
||||||
{ "migration mode and dirty tracking", test_migration_mode },
|
{ "migration mode and dirty tracking", test_migration_mode },
|
||||||
{ "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
|
{ "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
|
||||||
{ "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
|
{ "GET_CMMA_BITS: all pages are dirty initially", test_get_initial_dirty },
|
||||||
{ "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
|
{ "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -88,10 +88,6 @@ asm("test_skey_asm:\n"
|
||||||
" ahi %r0,1\n"
|
" ahi %r0,1\n"
|
||||||
" st %r1,0(%r5,%r6)\n"
|
" st %r1,0(%r5,%r6)\n"
|
||||||
|
|
||||||
" iske %r1,%r6\n"
|
|
||||||
" ahi %r0,1\n"
|
|
||||||
" diag 0,0,0x44\n"
|
|
||||||
|
|
||||||
" sske %r1,%r6\n"
|
" sske %r1,%r6\n"
|
||||||
" xgr %r1,%r1\n"
|
" xgr %r1,%r1\n"
|
||||||
" iske %r1,%r6\n"
|
" iske %r1,%r6\n"
|
||||||
|
@ -459,10 +455,14 @@ TEST_F(uc_kvm, uc_no_user_region)
|
||||||
};
|
};
|
||||||
|
|
||||||
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, ®ion));
|
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, ®ion));
|
||||||
ASSERT_EQ(EINVAL, errno);
|
ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
|
||||||
|
TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION",
|
||||||
|
strerror(errno), errno);
|
||||||
|
|
||||||
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, ®ion2));
|
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, ®ion2));
|
||||||
ASSERT_EQ(EINVAL, errno);
|
ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
|
||||||
|
TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION2",
|
||||||
|
strerror(errno), errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(uc_kvm, uc_map_unmap)
|
TEST_F(uc_kvm, uc_map_unmap)
|
||||||
|
@ -596,7 +596,9 @@ TEST_F(uc_kvm, uc_skey)
|
||||||
ASSERT_EQ(true, uc_handle_exit(self));
|
ASSERT_EQ(true, uc_handle_exit(self));
|
||||||
ASSERT_EQ(1, sync_regs->gprs[0]);
|
ASSERT_EQ(1, sync_regs->gprs[0]);
|
||||||
|
|
||||||
/* ISKE */
|
/* SSKE + ISKE */
|
||||||
|
sync_regs->gprs[1] = skeyvalue;
|
||||||
|
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
|
||||||
ASSERT_EQ(0, uc_run_once(self));
|
ASSERT_EQ(0, uc_run_once(self));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -608,21 +610,11 @@ TEST_F(uc_kvm, uc_skey)
|
||||||
TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
|
TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
|
||||||
TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
|
TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
|
||||||
TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
|
TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
|
||||||
TEST_REQUIRE(sie_block->ipa != 0xb229);
|
TEST_REQUIRE(sie_block->ipa != 0xb22b);
|
||||||
|
|
||||||
/* ISKE contd. */
|
/* SSKE + ISKE contd. */
|
||||||
ASSERT_EQ(false, uc_handle_exit(self));
|
ASSERT_EQ(false, uc_handle_exit(self));
|
||||||
ASSERT_EQ(2, sync_regs->gprs[0]);
|
ASSERT_EQ(2, sync_regs->gprs[0]);
|
||||||
/* assert initial skey (ACC = 0, R & C = 1) */
|
|
||||||
ASSERT_EQ(0x06, sync_regs->gprs[1]);
|
|
||||||
uc_assert_diag44(self);
|
|
||||||
|
|
||||||
/* SSKE + ISKE */
|
|
||||||
sync_regs->gprs[1] = skeyvalue;
|
|
||||||
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
|
|
||||||
ASSERT_EQ(0, uc_run_once(self));
|
|
||||||
ASSERT_EQ(false, uc_handle_exit(self));
|
|
||||||
ASSERT_EQ(3, sync_regs->gprs[0]);
|
|
||||||
ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
|
ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
|
||||||
uc_assert_diag44(self);
|
uc_assert_diag44(self);
|
||||||
|
|
||||||
|
@ -631,7 +623,7 @@ TEST_F(uc_kvm, uc_skey)
|
||||||
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
|
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
|
||||||
ASSERT_EQ(0, uc_run_once(self));
|
ASSERT_EQ(0, uc_run_once(self));
|
||||||
ASSERT_EQ(false, uc_handle_exit(self));
|
ASSERT_EQ(false, uc_handle_exit(self));
|
||||||
ASSERT_EQ(4, sync_regs->gprs[0]);
|
ASSERT_EQ(3, sync_regs->gprs[0]);
|
||||||
/* assert R reset but rest of skey unchanged */
|
/* assert R reset but rest of skey unchanged */
|
||||||
ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
|
ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
|
||||||
ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
|
ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
|
||||||
|
|
|
@ -1070,15 +1070,6 @@ out_err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Called after the VM is otherwise initialized, but just before adding it to
|
|
||||||
* the vm_list.
|
|
||||||
*/
|
|
||||||
int __weak kvm_arch_post_init_vm(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called just after removing the VM from the vm_list, but before doing any
|
* Called just after removing the VM from the vm_list, but before doing any
|
||||||
* other destruction.
|
* other destruction.
|
||||||
|
@ -1199,10 +1190,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
|
||||||
if (r)
|
if (r)
|
||||||
goto out_err_no_debugfs;
|
goto out_err_no_debugfs;
|
||||||
|
|
||||||
r = kvm_arch_post_init_vm(kvm);
|
|
||||||
if (r)
|
|
||||||
goto out_err;
|
|
||||||
|
|
||||||
mutex_lock(&kvm_lock);
|
mutex_lock(&kvm_lock);
|
||||||
list_add(&kvm->vm_list, &vm_list);
|
list_add(&kvm->vm_list, &vm_list);
|
||||||
mutex_unlock(&kvm_lock);
|
mutex_unlock(&kvm_lock);
|
||||||
|
@ -1212,8 +1199,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
|
||||||
|
|
||||||
return kvm;
|
return kvm;
|
||||||
|
|
||||||
out_err:
|
|
||||||
kvm_destroy_vm_debugfs(kvm);
|
|
||||||
out_err_no_debugfs:
|
out_err_no_debugfs:
|
||||||
kvm_coalesced_mmio_free(kvm);
|
kvm_coalesced_mmio_free(kvm);
|
||||||
out_no_coalesced_mmio:
|
out_no_coalesced_mmio:
|
||||||
|
@ -1971,7 +1956,15 @@ static int kvm_set_memory_region(struct kvm *kvm,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
|
|
||||||
|
/*
|
||||||
|
* The size of userspace-defined memory regions is restricted in order
|
||||||
|
* to play nice with dirty bitmap operations, which are indexed with an
|
||||||
|
* "unsigned int". KVM's internal memory regions don't support dirty
|
||||||
|
* logging, and so are exempt.
|
||||||
|
*/
|
||||||
|
if (id < KVM_USER_MEM_SLOTS &&
|
||||||
|
(mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
slots = __kvm_memslots(kvm, as_id);
|
slots = __kvm_memslots(kvm, as_id);
|
||||||
|
|
Loading…
Add table
Reference in a new issue