2012-11-21 18:34:00 -08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
|
|
|
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MIPS_KVM_HOST_H__
|
|
|
|
#define __MIPS_KVM_HOST_H__
|
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
#include <linux/cpumask.h>
|
2012-11-21 18:34:00 -08:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
2021-12-15 16:45:00 +08:00
|
|
|
#include <asm/asm.h>
|
2016-06-15 19:29:47 +01:00
|
|
|
#include <asm/inst.h>
|
2016-06-09 14:19:19 +01:00
|
|
|
#include <asm/mipsregs.h>
|
|
|
|
|
2020-05-23 15:56:37 +08:00
|
|
|
#include <kvm/iodev.h>
|
|
|
|
|
2014-05-29 10:16:28 +01:00
|
|
|
/* MIPS KVM register ids */
|
|
|
|
#define MIPS_CP0_32(_R, _S) \
|
2014-12-02 15:47:04 +00:00
|
|
|
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
|
2014-05-29 10:16:28 +01:00
|
|
|
|
|
|
|
#define MIPS_CP0_64(_R, _S) \
|
2014-12-02 15:47:04 +00:00
|
|
|
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
|
2014-05-29 10:16:28 +01:00
|
|
|
|
|
|
|
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
|
2017-03-14 10:15:34 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
|
2017-03-14 10:15:34 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
|
2017-03-14 10:15:35 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
|
|
|
|
#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
|
|
|
|
#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
|
2017-03-14 10:15:36 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
|
|
|
|
#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
|
|
|
|
#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
|
2017-03-14 10:15:36 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
|
2017-03-14 10:15:33 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
|
|
|
|
#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
|
2015-02-02 22:55:17 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
|
2014-06-26 13:56:52 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
|
2014-06-26 15:11:29 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
|
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
|
2020-05-23 15:56:39 +08:00
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
2017-03-14 10:15:38 +00:00
|
|
|
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
2020-05-23 15:56:39 +08:00
|
|
|
#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
|
2014-05-29 10:16:28 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
2016-06-15 19:29:56 +01:00
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
|
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
|
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
|
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
|
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
|
|
|
|
#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
|
2014-05-29 10:16:28 +01:00
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2020-05-23 15:56:30 +08:00
|
|
|
#define KVM_MAX_VCPUS 16
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2015-09-18 12:34:53 +02:00
|
|
|
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
extern unsigned long GUESTID_MASK;
|
|
|
|
extern unsigned long GUESTID_FIRST_VERSION;
|
|
|
|
extern unsigned long GUESTID_VERSION_MASK;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2014-03-14 13:06:08 +00:00
|
|
|
#define KVM_INVALID_ADDR 0xdeadbeef
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2016-08-01 09:07:52 +01:00
|
|
|
/*
|
|
|
|
* EVA has overlapping user & kernel address spaces, so user VAs may be >
|
|
|
|
* PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
|
|
|
|
* PAGE_OFFSET.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KVM_HVA_ERR_BAD (-1UL)
|
|
|
|
#define KVM_HVA_ERR_RO_BAD (-2UL)
|
|
|
|
|
|
|
|
static inline bool kvm_is_error_hva(unsigned long addr)
|
|
|
|
{
|
|
|
|
return IS_ERR_VALUE(addr);
|
|
|
|
}
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_vm_stat {
|
2021-06-18 22:27:03 +00:00
|
|
|
struct kvm_vm_stat_generic generic;
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_vcpu_stat {
|
2021-06-18 22:27:03 +00:00
|
|
|
struct kvm_vcpu_stat_generic generic;
|
2016-08-02 14:03:22 +10:00
|
|
|
u64 wait_exits;
|
|
|
|
u64 cache_exits;
|
|
|
|
u64 signal_exits;
|
|
|
|
u64 int_exits;
|
|
|
|
u64 cop_unusable_exits;
|
|
|
|
u64 tlbmod_exits;
|
|
|
|
u64 tlbmiss_ld_exits;
|
|
|
|
u64 tlbmiss_st_exits;
|
|
|
|
u64 addrerr_st_exits;
|
|
|
|
u64 addrerr_ld_exits;
|
|
|
|
u64 syscall_exits;
|
|
|
|
u64 resvd_inst_exits;
|
|
|
|
u64 break_inst_exits;
|
|
|
|
u64 trap_inst_exits;
|
|
|
|
u64 msa_fpe_exits;
|
|
|
|
u64 fpe_exits;
|
|
|
|
u64 msa_disabled_exits;
|
|
|
|
u64 flush_dcache_exits;
|
2017-03-14 10:15:18 +00:00
|
|
|
u64 vz_gpsi_exits;
|
|
|
|
u64 vz_gsfc_exits;
|
|
|
|
u64 vz_hc_exits;
|
|
|
|
u64 vz_grr_exits;
|
|
|
|
u64 vz_gva_exits;
|
|
|
|
u64 vz_ghfc_exits;
|
|
|
|
u64 vz_gpa_exits;
|
|
|
|
u64 vz_resvd_exits;
|
2020-05-23 15:56:38 +08:00
|
|
|
#ifdef CONFIG_CPU_LOONGSON64
|
|
|
|
u64 vz_cpucfg_exits;
|
2017-03-14 10:15:18 +00:00
|
|
|
#endif
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_arch_memory_slot {
|
|
|
|
};
|
|
|
|
|
2020-05-23 15:56:37 +08:00
|
|
|
#ifdef CONFIG_CPU_LOONGSON64
|
|
|
|
struct ipi_state {
|
|
|
|
uint32_t status;
|
|
|
|
uint32_t en;
|
|
|
|
uint32_t set;
|
|
|
|
uint32_t clear;
|
|
|
|
uint64_t buf[4];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct loongson_kvm_ipi;
|
|
|
|
|
|
|
|
struct ipi_io_device {
|
|
|
|
int node_id;
|
|
|
|
struct loongson_kvm_ipi *ipi;
|
|
|
|
struct kvm_io_device device;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct loongson_kvm_ipi {
|
|
|
|
spinlock_t lock;
|
|
|
|
struct kvm *kvm;
|
|
|
|
struct ipi_state ipistate[16];
|
|
|
|
struct ipi_io_device dev_ipi[4];
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_arch {
|
2015-05-01 13:50:18 +01:00
|
|
|
/* Guest physical mm */
|
|
|
|
struct mm_struct gpa_mm;
|
2017-03-14 10:15:31 +00:00
|
|
|
/* Mask of CPUs needing GPA ASID flush */
|
|
|
|
cpumask_t asid_flush_mask;
|
2020-05-23 15:56:37 +08:00
|
|
|
#ifdef CONFIG_CPU_LOONGSON64
|
|
|
|
struct loongson_kvm_ipi ipi;
|
|
|
|
#endif
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
2014-03-14 13:06:08 +00:00
|
|
|
#define N_MIPS_COPROC_REGS 32
|
|
|
|
#define N_MIPS_COPROC_SEL 8
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
struct mips_coproc {
|
|
|
|
unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
|
|
|
|
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
|
|
|
|
unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Coprocessor 0 register names
|
|
|
|
*/
|
2014-03-14 13:06:08 +00:00
|
|
|
#define MIPS_CP0_TLB_INDEX 0
|
|
|
|
#define MIPS_CP0_TLB_RANDOM 1
|
|
|
|
#define MIPS_CP0_TLB_LOW 2
|
|
|
|
#define MIPS_CP0_TLB_LO0 2
|
|
|
|
#define MIPS_CP0_TLB_LO1 3
|
|
|
|
#define MIPS_CP0_TLB_CONTEXT 4
|
|
|
|
#define MIPS_CP0_TLB_PG_MASK 5
|
|
|
|
#define MIPS_CP0_TLB_WIRED 6
|
|
|
|
#define MIPS_CP0_HWRENA 7
|
|
|
|
#define MIPS_CP0_BAD_VADDR 8
|
|
|
|
#define MIPS_CP0_COUNT 9
|
|
|
|
#define MIPS_CP0_TLB_HI 10
|
|
|
|
#define MIPS_CP0_COMPARE 11
|
|
|
|
#define MIPS_CP0_STATUS 12
|
|
|
|
#define MIPS_CP0_CAUSE 13
|
|
|
|
#define MIPS_CP0_EXC_PC 14
|
|
|
|
#define MIPS_CP0_PRID 15
|
|
|
|
#define MIPS_CP0_CONFIG 16
|
|
|
|
#define MIPS_CP0_LLADDR 17
|
|
|
|
#define MIPS_CP0_WATCH_LO 18
|
|
|
|
#define MIPS_CP0_WATCH_HI 19
|
|
|
|
#define MIPS_CP0_TLB_XCONTEXT 20
|
2020-05-23 15:56:39 +08:00
|
|
|
#define MIPS_CP0_DIAG 22
|
2014-03-14 13:06:08 +00:00
|
|
|
#define MIPS_CP0_ECC 26
|
|
|
|
#define MIPS_CP0_CACHE_ERR 27
|
|
|
|
#define MIPS_CP0_TAG_LO 28
|
|
|
|
#define MIPS_CP0_TAG_HI 29
|
|
|
|
#define MIPS_CP0_ERROR_PC 30
|
|
|
|
#define MIPS_CP0_DEBUG 23
|
|
|
|
#define MIPS_CP0_DEPC 24
|
|
|
|
#define MIPS_CP0_PERFCNT 25
|
|
|
|
#define MIPS_CP0_ERRCTL 26
|
|
|
|
#define MIPS_CP0_DATA_LO 28
|
|
|
|
#define MIPS_CP0_DATA_HI 29
|
|
|
|
#define MIPS_CP0_DESAVE 31
|
|
|
|
|
|
|
|
#define MIPS_CP0_CONFIG_SEL 0
|
|
|
|
#define MIPS_CP0_CONFIG1_SEL 1
|
|
|
|
#define MIPS_CP0_CONFIG2_SEL 2
|
|
|
|
#define MIPS_CP0_CONFIG3_SEL 3
|
2014-06-26 15:11:29 +01:00
|
|
|
#define MIPS_CP0_CONFIG4_SEL 4
|
|
|
|
#define MIPS_CP0_CONFIG5_SEL 5
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
#define MIPS_CP0_GUESTCTL2 10
|
|
|
|
#define MIPS_CP0_GUESTCTL2_SEL 5
|
|
|
|
#define MIPS_CP0_GTOFFSET 12
|
|
|
|
#define MIPS_CP0_GTOFFSET_SEL 7
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
/* Resume Flags */
|
2014-03-14 13:06:08 +00:00
|
|
|
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
|
|
|
|
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2014-03-14 13:06:08 +00:00
|
|
|
#define RESUME_GUEST 0
|
|
|
|
#define RESUME_GUEST_DR RESUME_FLAG_DR
|
|
|
|
#define RESUME_HOST RESUME_FLAG_HOST
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
enum emulation_result {
|
|
|
|
EMULATE_DONE, /* no further processing */
|
|
|
|
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
|
|
|
|
EMULATE_FAIL, /* can't emulate this instruction */
|
|
|
|
EMULATE_WAIT, /* WAIT instruction */
|
|
|
|
EMULATE_PRIV_FAIL,
|
2016-11-26 00:37:28 +00:00
|
|
|
EMULATE_EXCEPT, /* A guest exception has been generated */
|
2017-03-14 10:15:14 +00:00
|
|
|
EMULATE_HYPERCALL, /* HYPCALL instruction */
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
2020-05-23 15:56:29 +08:00
|
|
|
#if defined(CONFIG_64BIT)
|
|
|
|
#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
|
|
|
|
#else
|
2014-03-14 13:06:08 +00:00
|
|
|
#define VPN2_MASK 0xffffe000
|
2020-05-23 15:56:29 +08:00
|
|
|
#endif
|
2020-05-23 15:56:28 +08:00
|
|
|
#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
|
2016-06-09 14:19:19 +01:00
|
|
|
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
|
2014-03-14 13:06:08 +00:00
|
|
|
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
|
2016-05-06 14:36:20 +01:00
|
|
|
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
|
2016-06-09 14:19:18 +01:00
|
|
|
#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
|
2016-06-09 14:19:19 +01:00
|
|
|
#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
|
2016-11-28 23:04:52 +00:00
|
|
|
#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
|
2014-06-26 12:11:34 -07:00
|
|
|
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
|
|
|
|
((y) & VPN2_MASK & ~(x).tlb_mask))
|
|
|
|
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
|
2016-05-06 14:36:20 +01:00
|
|
|
TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
struct kvm_mips_tlb {
|
|
|
|
long tlb_mask;
|
|
|
|
long tlb_hi;
|
2016-06-09 14:19:17 +01:00
|
|
|
long tlb_lo[2];
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
2016-06-14 09:40:10 +01:00
|
|
|
#define KVM_MIPS_AUX_FPU 0x1
|
|
|
|
#define KVM_MIPS_AUX_MSA 0x2
|
2014-11-18 14:09:12 +00:00
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_vcpu_arch {
|
2016-06-09 14:19:14 +01:00
|
|
|
void *guest_ebase;
|
2020-06-23 21:14:18 +08:00
|
|
|
int (*vcpu_run)(struct kvm_vcpu *vcpu);
|
2017-03-14 10:15:26 +00:00
|
|
|
|
|
|
|
/* Host registers preserved across guest mode execution */
|
2012-11-21 18:34:00 -08:00
|
|
|
unsigned long host_stack;
|
|
|
|
unsigned long host_gp;
|
2017-03-14 10:15:26 +00:00
|
|
|
unsigned long host_pgd;
|
|
|
|
unsigned long host_entryhi;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* Host CP0 registers used when handling exits from guest */
|
|
|
|
unsigned long host_cp0_badvaddr;
|
|
|
|
unsigned long host_cp0_epc;
|
2016-06-09 14:19:09 +01:00
|
|
|
u32 host_cp0_cause;
|
2017-03-14 10:15:26 +00:00
|
|
|
u32 host_cp0_guestctl0;
|
2015-04-23 16:54:35 +01:00
|
|
|
u32 host_cp0_badinstr;
|
|
|
|
u32 host_cp0_badinstrp;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* GPRS */
|
|
|
|
unsigned long gprs[32];
|
|
|
|
unsigned long hi;
|
|
|
|
unsigned long lo;
|
|
|
|
unsigned long pc;
|
|
|
|
|
|
|
|
/* FPU State */
|
|
|
|
struct mips_fpu_struct fpu;
|
2016-06-14 09:40:10 +01:00
|
|
|
/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
|
|
|
|
unsigned int aux_inuse;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* COP0 State */
|
2023-06-28 19:08:17 +08:00
|
|
|
struct mips_coproc cop0;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2016-10-25 16:11:12 +01:00
|
|
|
/* Resume PC after MMIO completion */
|
|
|
|
unsigned long io_pc;
|
|
|
|
/* GPR used as IO source/target */
|
|
|
|
u32 io_gpr;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2014-05-29 10:16:35 +01:00
|
|
|
struct hrtimer comparecount_timer;
|
2014-05-29 10:16:37 +01:00
|
|
|
/* Count timer control KVM register */
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 count_ctl;
|
2014-05-29 10:16:35 +01:00
|
|
|
/* Count bias from the raw time */
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 count_bias;
|
2014-05-29 10:16:35 +01:00
|
|
|
/* Frequency of timer in Hz */
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 count_hz;
|
2014-05-29 10:16:35 +01:00
|
|
|
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
|
|
|
|
s64 count_dyn_bias;
|
2014-05-29 10:16:37 +01:00
|
|
|
/* Resume time */
|
|
|
|
ktime_t count_resume;
|
2014-05-29 10:16:35 +01:00
|
|
|
/* Period of timer tick in ns */
|
|
|
|
u64 count_period;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* Bitmask of exceptions that are pending */
|
|
|
|
unsigned long pending_exceptions;
|
|
|
|
|
|
|
|
/* Bitmask of pending exceptions to be cleared */
|
|
|
|
unsigned long pending_exceptions_clr;
|
|
|
|
|
2016-12-16 15:57:00 +00:00
|
|
|
/* Cache some mmu pages needed inside spinlock regions */
|
|
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
/* vcpu's vzguestid is different on each host cpu in an smp system */
|
|
|
|
u32 vzguestid[NR_CPUS];
|
|
|
|
|
|
|
|
/* wired guest TLB entries */
|
|
|
|
struct kvm_mips_tlb *wired_tlb;
|
|
|
|
unsigned int wired_tlb_limit;
|
|
|
|
unsigned int wired_tlb_used;
|
2017-03-14 10:15:38 +00:00
|
|
|
|
|
|
|
/* emulated guest MAAR registers */
|
|
|
|
unsigned long maar[6];
|
2017-03-14 10:15:31 +00:00
|
|
|
|
|
|
|
/* Last CPU the VCPU state was loaded on */
|
2012-11-21 18:34:00 -08:00
|
|
|
int last_sched_cpu;
|
2017-03-14 10:15:31 +00:00
|
|
|
/* Last CPU the VCPU actually executed guest code on */
|
|
|
|
int last_exec_cpu;
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* WAIT executed */
|
|
|
|
int wait;
|
2014-11-18 14:09:12 +00:00
|
|
|
|
|
|
|
u8 fpu_enabled;
|
2015-03-05 11:43:36 +00:00
|
|
|
u8 msa_enabled;
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
|
|
|
|
2014-05-29 10:16:33 +01:00
|
|
|
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long temp;
|
|
|
|
do {
|
|
|
|
__asm__ __volatile__(
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set push \n"
|
2016-07-04 19:35:10 +01:00
|
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_LL) " %0, %1 \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
" or %0, %2 \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_SC) " %0, %1 \n"
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set pop \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
: "=&r" (temp), "+m" (*reg)
|
|
|
|
: "r" (val));
|
|
|
|
} while (unlikely(!temp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long temp;
|
|
|
|
do {
|
|
|
|
__asm__ __volatile__(
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set push \n"
|
2016-07-04 19:35:10 +01:00
|
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_LL) " %0, %1 \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
" and %0, %2 \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_SC) " %0, %1 \n"
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set pop \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
: "=&r" (temp), "+m" (*reg)
|
|
|
|
: "r" (~val));
|
|
|
|
} while (unlikely(!temp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
|
|
|
unsigned long change,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long temp;
|
|
|
|
do {
|
|
|
|
__asm__ __volatile__(
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set push \n"
|
2016-07-04 19:35:10 +01:00
|
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_LL) " %0, %1 \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
" and %0, %2 \n"
|
|
|
|
" or %0, %3 \n"
|
2021-12-15 16:45:00 +08:00
|
|
|
" "__stringify(LONG_SC) " %0, %1 \n"
|
2018-11-08 20:14:38 +00:00
|
|
|
" .set pop \n"
|
2014-05-29 10:16:33 +01:00
|
|
|
: "=&r" (temp), "+m" (*reg)
|
|
|
|
: "r" (~change), "r" (val & change));
|
|
|
|
} while (unlikely(!temp));
|
|
|
|
}
|
|
|
|
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
/* Guest register types, used in accessor build below */
|
|
|
|
#define __KVMT32 u32
|
|
|
|
#define __KVMTl unsigned long
|
2014-05-29 10:16:33 +01:00
|
|
|
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
/*
|
|
|
|
* __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
|
|
|
|
* These operate on the saved guest C0 state in RAM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Generate saved context simple accessors */
|
|
|
|
#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
|
static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
|
|
|
|
{ \
|
|
|
|
return cop0->reg[(_reg)][(sel)]; \
|
|
|
|
} \
|
|
|
|
static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
cop0->reg[(_reg)][(sel)] = val; \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate saved context bitwise modifiers */
|
|
|
|
#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
|
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
cop0->reg[(_reg)][(sel)] |= val; \
|
|
|
|
} \
|
|
|
|
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
cop0->reg[(_reg)][(sel)] &= ~val; \
|
|
|
|
} \
|
|
|
|
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type mask, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
unsigned long _mask = mask; \
|
|
|
|
cop0->reg[(_reg)][(sel)] &= ~_mask; \
|
|
|
|
cop0->reg[(_reg)][(sel)] |= val & _mask; \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate saved context atomic bitwise modifiers */
|
|
|
|
#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
|
|
|
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type mask, \
|
|
|
|
__KVMT##type val) \
|
2014-03-14 13:06:08 +00:00
|
|
|
{ \
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
|
|
|
|
val); \
|
2012-11-21 18:34:00 -08:00
|
|
|
}
|
|
|
|
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
/*
|
|
|
|
* __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
|
|
|
|
* These operate on the VZ guest C0 context in hardware.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Generate VZ guest context simple accessors */
|
|
|
|
#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
|
|
|
static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
|
|
|
|
{ \
|
|
|
|
return read_gc0_##name(); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
write_gc0_##name(val); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate VZ guest context bitwise modifiers */
|
|
|
|
#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
|
|
|
static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
set_gc0_##name(val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
clear_gc0_##name(val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type mask, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
change_gc0_##name(mask, val); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate VZ guest context save/restore to/from saved context */
|
|
|
|
#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
|
|
|
|
static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
|
|
|
|
{ \
|
|
|
|
write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
|
|
|
|
{ \
|
|
|
|
cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
|
|
|
|
* These wrap a set of operations to provide them with a different name.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Generate simple accessor wrapper */
|
|
|
|
#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
|
|
|
|
static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
|
|
|
|
{ \
|
|
|
|
return kvm_read_##name2(cop0); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_write_##name1(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
kvm_write_##name2(cop0, val); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate bitwise modifier wrapper */
|
|
|
|
#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
|
|
|
|
static inline void kvm_set_##name1(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
kvm_set_##name2(cop0, val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
kvm_clear_##name2(cop0, val); \
|
|
|
|
} \
|
|
|
|
static inline void kvm_change_##name1(struct mips_coproc *cop0, \
|
|
|
|
__KVMT##type mask, \
|
|
|
|
__KVMT##type val) \
|
|
|
|
{ \
|
|
|
|
kvm_change_##name2(cop0, mask, val); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
|
|
|
|
* These generate accessors operating on the saved context in RAM, and wrap them
|
|
|
|
* with the common guest C0 accessors (for use by common emulation code).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
|
|
|
|
|
#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
|
|
|
|
|
#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VZ (hardware assisted virtualisation)
|
|
|
|
* These macros use the active guest state in VZ mode (hardware registers),
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
|
|
|
|
* These generate accessors operating on the VZ guest context in hardware, and
|
|
|
|
* wrap them with the common guest C0 accessors (for use by common emulation
|
|
|
|
* code).
|
|
|
|
*
|
|
|
|
* Accessors operating on the saved context in RAM are also generated to allow
|
|
|
|
* convenient explicit saving and restoring of the state.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
|
|
|
|
__BUILD_KVM_SAVE_VZ(name, _reg, sel)
|
|
|
|
|
|
|
|
#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
|
|
|
__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't do atomic modifications of COP0 state if hardware can modify it.
|
|
|
|
* Races must be handled explicitly.
|
|
|
|
*/
|
|
|
|
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Define accessors for CP0 registers that are accessible to the guest. These
|
|
|
|
* are primarily used by common emulation code, which may need to access the
|
|
|
|
* registers differently depending on the implementation.
|
|
|
|
*
|
|
|
|
* fns_hw/sw name type reg num select
|
|
|
|
*/
|
|
|
|
__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
|
|
|
|
__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
|
|
|
|
__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
|
|
|
|
__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
|
2017-03-14 10:15:34 +00:00
|
|
|
__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
|
2017-03-14 10:15:34 +00:00
|
|
|
__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
|
|
|
|
__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
|
2017-03-14 10:15:35 +00:00
|
|
|
__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
|
|
|
|
__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
|
|
|
|
__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
|
2017-03-14 10:15:36 +00:00
|
|
|
__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
|
|
|
|
__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
|
|
|
|
__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
|
2017-03-14 10:15:36 +00:00
|
|
|
__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
|
|
|
|
__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
|
2017-03-14 10:15:33 +00:00
|
|
|
__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
|
|
|
|
__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
|
|
|
|
__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
|
|
|
|
__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
|
|
|
|
__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
|
|
|
|
__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
|
|
|
|
__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
|
|
|
__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
|
|
|
|
__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
|
|
|
|
__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
|
|
|
|
__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
|
|
|
|
__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
|
|
|
|
__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
|
|
|
|
__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
|
|
|
|
__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
|
|
|
|
__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
|
|
|
|
__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
|
|
|
|
__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
|
2017-03-14 10:15:38 +00:00
|
|
|
__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
|
2017-03-14 10:15:31 +00:00
|
|
|
__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
|
KVM: MIPS: Abstract guest CP0 register access for VZ
Abstract the MIPS KVM guest CP0 register access macros into inline
functions which are generated by macros. This allows them to be
generated differently for VZ, where they will usually need to access the
hardware guest CP0 context rather than the saved values in RAM.
Accessors for each individual register are generated using these macros:
- __BUILD_KVM_*_SW() for registers which are not present in the VZ
hardware guest context, so kvm_{read,write}_c0_guest_##name() will
access the saved value in RAM regardless of whether VZ is enabled.
- __BUILD_KVM_*_HW() for registers which are present in the VZ hardware
guest context, so kvm_{read,write}_c0_guest_##name() will access the
hardware register when VZ is enabled.
These build the underlying accessors using further macros:
- __BUILD_KVM_*_SAVED() builds e.g. kvm_{read,write}_sw_gc0_##name()
functions for accessing the saved versions of the registers in RAM.
This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with T&E where registers
are always stored in RAM, but are also available with VZ HW registers
to allow them to be accessed while saved.
- __BUILD_KVM_*_VZ() builds e.g. kvm_{read,write}_vz_gc0_##name()
functions for accessing the VZ hardware guest context registers
directly. This is used for implementing the common
kvm_{read,write}_c0_guest_##name() accessors with VZ.
- __BUILD_KVM_*_WRAP() builds wrappers with different names, which
allows the common kvm_{read,write}_c0_guest_##name() functions to be
implemented using the VZ accessors while still having the SAVED
accessors available too.
- __BUILD_KVM_SAVE_VZ() builds functions for saving and restoring VZ
hardware guest context register state to RAM, improving conciseness
of VZ context saving and restoring.
Similar macros exist for generating modifiers (set, clear, change),
either with a normal unlocked read/modify/write, or using atomic LL/SC
sequences.
These changes change the types of 32-bit registers to u32 instead of
unsigned long, which requires some changes to printk() functions in MIPS
KVM.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2017-03-14 10:15:25 +00:00
|
|
|
__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
|
|
|
|
__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
|
|
|
|
|
|
|
|
/* Bitwise operations (on HW state) */
|
|
|
|
__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
|
|
|
|
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
|
|
|
__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
|
|
|
__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
|
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
/* Bitwise operations (on saved state) */
|
|
|
|
__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
|
|
|
|
__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
|
|
|
|
__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
|
|
|
|
__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
|
|
|
|
__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
|
|
|
|
__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
|
|
|
|
|
2014-11-18 14:09:12 +00:00
|
|
|
/* Helpers */
|
|
|
|
|
|
|
|
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
|
|
|
{
|
2016-06-15 19:29:50 +01:00
|
|
|
return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
|
2014-11-18 14:09:12 +00:00
|
|
|
vcpu->fpu_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_mips_guest_can_have_fpu(vcpu) &&
|
2023-06-28 19:08:17 +08:00
|
|
|
kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
|
2014-11-18 14:09:12 +00:00
|
|
|
}
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
|
|
|
|
{
|
|
|
|
return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
|
|
|
|
vcpu->msa_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_mips_guest_can_have_msa(vcpu) &&
|
2023-06-28 19:08:17 +08:00
|
|
|
kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
|
2015-03-05 11:43:36 +00:00
|
|
|
}
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_mips_callbacks {
|
2014-05-29 10:16:40 +01:00
|
|
|
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_syscall)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*handle_break)(struct kvm_vcpu *vcpu);
|
2015-02-06 16:03:57 +00:00
|
|
|
int (*handle_trap)(struct kvm_vcpu *vcpu);
|
2015-02-06 10:56:27 +00:00
|
|
|
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
|
2015-02-06 10:56:27 +00:00
|
|
|
int (*handle_fpe)(struct kvm_vcpu *vcpu);
|
2015-02-06 11:11:56 +00:00
|
|
|
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
|
2017-03-14 10:15:24 +00:00
|
|
|
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
|
2017-03-14 10:15:23 +00:00
|
|
|
int (*hardware_enable)(void);
|
|
|
|
void (*hardware_disable)(void);
|
2017-03-14 10:15:22 +00:00
|
|
|
int (*check_extension)(struct kvm *kvm, long ext);
|
2014-05-29 10:16:40 +01:00
|
|
|
int (*vcpu_init)(struct kvm_vcpu *vcpu);
|
2016-09-08 23:00:24 +01:00
|
|
|
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
|
2014-05-29 10:16:40 +01:00
|
|
|
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
|
2021-03-31 09:38:16 +02:00
|
|
|
void (*prepare_flush_shadow)(struct kvm *kvm);
|
2014-05-29 10:16:40 +01:00
|
|
|
gpa_t (*gva_to_gpa)(gva_t gva);
|
|
|
|
void (*queue_timer_int)(struct kvm_vcpu *vcpu);
|
|
|
|
void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
|
|
|
|
void (*queue_io_int)(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mips_interrupt *irq);
|
|
|
|
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mips_interrupt *irq);
|
|
|
|
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 cause);
|
2014-05-29 10:16:40 +01:00
|
|
|
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 cause);
|
2016-06-15 19:29:49 +01:00
|
|
|
unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
|
2014-05-29 10:16:29 +01:00
|
|
|
int (*get_one_reg)(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg, s64 *v);
|
|
|
|
int (*set_one_reg)(struct kvm_vcpu *vcpu,
|
|
|
|
const struct kvm_one_reg *reg, s64 v);
|
2016-11-12 00:00:13 +00:00
|
|
|
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
|
|
|
|
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
|
2020-06-23 21:14:17 +08:00
|
|
|
int (*vcpu_run)(struct kvm_vcpu *vcpu);
|
|
|
|
void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
};
|
2023-02-24 11:28:32 -08:00
|
|
|
extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
|
2022-11-30 23:09:05 +00:00
|
|
|
int kvm_mips_emulation_init(void);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* Debug: dump vcpu state */
|
|
|
|
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
|
|
|
|
|
2020-06-23 21:14:18 +08:00
|
|
|
extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
|
2016-06-23 17:34:39 +01:00
|
|
|
|
|
|
|
/* Building of entry/exception code */
|
2016-06-23 17:34:45 +01:00
|
|
|
int kvm_mips_entry_setup(void);
|
2016-06-23 17:34:39 +01:00
|
|
|
void *kvm_mips_build_vcpu_run(void *addr);
|
2016-09-10 23:56:46 +01:00
|
|
|
void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
|
2016-06-23 17:34:46 +01:00
|
|
|
void *kvm_mips_build_exception(void *addr, void *handler);
|
2016-06-23 17:34:39 +01:00
|
|
|
void *kvm_mips_build_exit(void *addr);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2015-03-05 11:43:36 +00:00
|
|
|
/* FPU/MSA context management */
|
2014-11-18 14:09:12 +00:00
|
|
|
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
|
|
|
|
void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
|
|
|
|
void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
|
2015-03-05 11:43:36 +00:00
|
|
|
void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
|
|
|
|
void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
|
|
|
|
void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
|
|
|
|
void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
|
2014-11-18 14:09:12 +00:00
|
|
|
void kvm_own_fpu(struct kvm_vcpu *vcpu);
|
2015-03-05 11:43:36 +00:00
|
|
|
void kvm_own_msa(struct kvm_vcpu *vcpu);
|
2014-11-18 14:09:12 +00:00
|
|
|
void kvm_drop_fpu(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
/* TLB handling */
|
2017-03-14 10:15:31 +00:00
|
|
|
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
|
|
|
|
struct kvm_vcpu *vcpu, bool write_fault);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2017-03-14 10:15:27 +00:00
|
|
|
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
|
|
|
|
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
|
|
|
|
unsigned long *gpa);
|
|
|
|
void kvm_vz_local_flush_roottlb_all_guests(void);
|
|
|
|
void kvm_vz_local_flush_guesttlb_all(void);
|
|
|
|
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
|
|
|
|
unsigned int count);
|
|
|
|
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
|
|
|
|
unsigned int count);
|
2020-05-23 15:56:39 +08:00
|
|
|
#ifdef CONFIG_CPU_LOONGSON64
|
|
|
|
void kvm_loongson_clear_guest_vtlb(void);
|
|
|
|
void kvm_loongson_clear_guest_ftlb(void);
|
|
|
|
#endif
|
2016-11-15 00:06:05 +00:00
|
|
|
|
2016-12-16 15:57:00 +00:00
|
|
|
/* MMU handling */
|
|
|
|
|
2015-05-01 13:50:18 +01:00
|
|
|
bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
|
2016-12-06 14:47:47 +00:00
|
|
|
int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
|
2015-05-01 13:50:18 +01:00
|
|
|
pgd_t *kvm_pgd_alloc(void);
|
2016-12-16 15:57:00 +00:00
|
|
|
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
|
|
|
/* Emulation */
|
2016-06-09 14:19:07 +01:00
|
|
|
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
|
2015-04-23 16:54:35 +01:00
|
|
|
int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
|
|
|
|
int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2016-11-28 18:39:24 +00:00
|
|
|
/**
|
|
|
|
* kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
|
|
|
|
* @vcpu: Virtual CPU.
|
|
|
|
*
|
|
|
|
* Returns: Whether the TLBL exception was likely due to an instruction
|
|
|
|
* fetch fault rather than a data load fault.
|
|
|
|
*/
|
|
|
|
static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long badvaddr = vcpu->host_cp0_badvaddr;
|
|
|
|
unsigned long epc = msk_isa16_mode(vcpu->pc);
|
|
|
|
u32 cause = vcpu->host_cp0_cause;
|
|
|
|
|
|
|
|
if (epc == badvaddr)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Branches may be 32-bit or 16-bit instructions.
|
|
|
|
* This isn't exact, but we don't really support MIPS16 or microMIPS yet
|
|
|
|
* in KVM anyway.
|
|
|
|
*/
|
|
|
|
if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:14:17 +08:00
|
|
|
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
|
|
|
|
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
|
2017-03-14 10:15:21 +00:00
|
|
|
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
|
2014-05-29 10:16:37 +01:00
|
|
|
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
|
|
|
|
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
|
2014-05-29 10:16:38 +01:00
|
|
|
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
|
2014-05-29 10:16:35 +01:00
|
|
|
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
|
|
|
|
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2017-03-14 10:15:39 +00:00
|
|
|
/* fairly internal functions requiring some care to use */
|
|
|
|
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
|
|
|
|
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
|
|
|
|
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
|
|
|
|
u32 count, int min_drift);
|
|
|
|
|
|
|
|
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2016-06-15 19:29:47 +01:00
|
|
|
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 cause,
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_vcpu *vcpu);
|
2016-06-15 19:29:47 +01:00
|
|
|
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
2016-06-09 14:19:07 +01:00
|
|
|
u32 cause,
|
2012-11-21 18:34:00 -08:00
|
|
|
struct kvm_vcpu *vcpu);
|
|
|
|
|
2017-03-14 10:15:31 +00:00
|
|
|
/* COP0 */
|
|
|
|
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
|
|
|
|
|
2017-03-14 10:15:14 +00:00
|
|
|
/* Hypercalls (hypcall.c) */
|
|
|
|
|
|
|
|
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
|
|
|
|
union mips_instruction inst);
|
|
|
|
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
|
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
/* Misc */
|
2014-06-26 12:11:36 -07:00
|
|
|
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
|
2012-11-21 18:34:00 -08:00
|
|
|
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
|
2020-05-23 15:56:37 +08:00
|
|
|
extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mips_interrupt *irq);
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2014-08-28 15:13:02 +02:00
|
|
|
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
|
|
|
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
2020-02-18 13:07:27 -08:00
|
|
|
struct kvm_memory_slot *slot) {}
|
2019-02-05 12:54:17 -08:00
|
|
|
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
2014-08-28 15:13:02 +02:00
|
|
|
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
2015-08-27 16:41:15 +02:00
|
|
|
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
|
|
|
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
2012-11-21 18:34:00 -08:00
|
|
|
|
2023-08-11 04:51:14 +00:00
|
|
|
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
|
2021-04-02 11:44:56 +02:00
|
|
|
|
2012-11-21 18:34:00 -08:00
|
|
|
#endif /* __MIPS_KVM_HOST_H__ */
|