2021-04-21 17:56:22 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef ARCH_X86_KVM_REVERSE_CPUID_H
|
|
|
|
#define ARCH_X86_KVM_REVERSE_CPUID_H
|
|
|
|
|
|
|
|
#include <uapi/asm/kvm.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/cpufeatures.h>
|
|
|
|
|
2022-11-25 20:58:39 +08:00
|
|
|
/*
|
|
|
|
* Define a KVM-only feature flag.
|
|
|
|
*
|
|
|
|
* For features that are scattered by cpufeatures.h, __feature_translate() also
|
|
|
|
* needs to be updated to translate the kernel-defined feature into the
|
|
|
|
* KVM-defined feature.
|
|
|
|
*
|
|
|
|
* For features that are 100% KVM-only, i.e. not defined by cpufeatures.h,
|
|
|
|
* forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so
|
|
|
|
* that X86_FEATURE_* can be used in KVM. No __feature_translate() handling is
|
|
|
|
* needed in this case.
|
|
|
|
*/
|
2021-04-21 17:56:22 -07:00
|
|
|
#define KVM_X86_FEATURE(w, f) ((w)*32 + (f))
|
|
|
|
|
|
|
|
/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
|
|
|
|
#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0)
|
|
|
|
#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
|
KVM/VMX: Allow exposing EDECCSSA user leaf function to KVM guest
The new Asynchronous Exit (AEX) notification mechanism (AEX-notify)
allows one enclave to receive a notification in the ERESUME after the
enclave exit due to an AEX. EDECCSSA is a new SGX user leaf function
(ENCLU[EDECCSSA]) to facilitate the AEX notification handling. The new
EDECCSSA is enumerated via CPUID(EAX=0x12,ECX=0x0):EAX[11].
Besides Allowing reporting the new AEX-notify attribute to KVM guests,
also allow reporting the new EDECCSSA user leaf function to KVM guests
so the guest can fully utilize the AEX-notify mechanism.
Similar to existing X86_FEATURE_SGX1 and X86_FEATURE_SGX2, introduce a
new scattered X86_FEATURE_SGX_EDECCSSA bit for the new EDECCSSA, and
report it in KVM's supported CPUIDs.
Note, no additional KVM enabling is required to allow the guest to use
EDECCSSA. It's impossible to trap ENCLU (without completely preventing
the guest from using SGX). Advertise EDECCSSA as supported purely so
that userspace doesn't need to special case EDECCSSA, i.e. doesn't need
to manually check host CPUID.
The inability to trap ENCLU also means that KVM can't prevent the guest
from using EDECCSSA, but that virtualization hole is benign as far as
KVM is concerned. EDECCSSA is simply a fancy way to modify internal
enclave state.
More background about how do AEX-notify and EDECCSSA work:
SGX maintains a Current State Save Area Frame (CSSA) for each enclave
thread. When AEX happens, the enclave thread context is saved to the
CSSA and the CSSA is increased by 1. For a normal ERESUME which doesn't
deliver AEX notification, it restores the saved thread context from the
previously saved SSA and decreases the CSSA. If AEX-notify is enabled
for one enclave, the ERESUME acts differently. Instead of restoring the
saved thread context and decreasing the CSSA, it acts like EENTER which
doesn't decrease the CSSA but establishes a clean slate thread context
using the CSSA for the enclave to handle the notification. After some
handling, the enclave must discard the "new-established" SSA and switch
back to the previously saved SSA (upon AEX). Otherwise, the enclave
will run out of SSA space upon further AEXs and eventually fail to run.
To solve this problem, the new EDECCSSA essentially decreases the CSSA.
It can be used by the enclave notification handler to switch back to the
previous saved SSA when needed, i.e. after it handles the notification.
Signed-off-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Sean Christopherson <seanjc@google.com>
Acked-by: Jarkko Sakkinen <jarkko@kernel.org>
Link: https://lore.kernel.org/all/20221101022422.858944-1-kai.huang%40intel.com
2022-11-01 15:24:22 +13:00
|
|
|
#define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11)
|
2021-04-21 17:56:22 -07:00
|
|
|
|
2022-11-25 20:58:43 +08:00
|
|
|
/* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */
|
|
|
|
#define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4)
|
2022-11-25 20:58:44 +08:00
|
|
|
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
|
2023-08-02 10:29:54 +08:00
|
|
|
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
|
2024-11-05 13:48:25 +08:00
|
|
|
#define X86_FEATURE_AVX_VNNI_INT16 KVM_X86_FEATURE(CPUID_7_1_EDX, 10)
|
2022-11-25 20:58:45 +08:00
|
|
|
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
|
KVM: x86: Advertise AVX10.1 CPUID to userspace
Advertise AVX10.1 related CPUIDs, i.e. report AVX10 support bit via
CPUID.(EAX=07H, ECX=01H):EDX[bit 19] and new CPUID leaf 0x24H so that
guest OS and applications can query the AVX10.1 CPUIDs directly. Intel
AVX10 represents the first major new vector ISA since the introduction of
Intel AVX512, which will establish a common, converged vector instruction
set across all Intel architectures[1].
AVX10.1 is an early version of AVX10, that enumerates the Intel AVX512
instruction set at 128, 256, and 512 bits which is enabled on
Granite Rapids. I.e., AVX10.1 is only a new CPUID enumeration with no
new functionality. New features, e.g. Embedded Rounding and Suppress
All Exceptions (SAE) will be introduced in AVX10.2.
Advertising AVX10.1 is safe because there is nothing to enable for AVX10.1,
i.e. it's purely a new way to enumerate support, thus there will never be
anything for the kernel to enable. Note just the CPUID checking is changed
when using AVX512 related instructions, e.g. if using one AVX512
instruction needs to check (AVX512 AND AVX512DQ), it can check
((AVX512 AND AVX512DQ) OR AVX10.1) after checking XCR0[7:5].
The versions of AVX10 are expected to be inclusive, e.g. version N+1 is
a superset of version N. Per the spec, the version can never be 0, just
advertise AVX10.1 if it's supported in hardware. Moreover, advertising
AVX10_{128,256,512} needs to land in the same commit as advertising basic
AVX10.1 support, otherwise KVM would advertise an impossible CPU model.
E.g. a CPU with AVX512 but not AVX10.1/512 is impossible per the SDM.
As more and more AVX related CPUIDs are added (it would have resulted in
around 40-50 CPUID flags when developing AVX10), the versioning approach
is introduced. But incrementing version numbers are bad for virtualization.
E.g. if AVX10.2 has a feature that shouldn't be enumerated to guests for
whatever reason, then KVM can't enumerate any "later" features either,
because the only way to hide the problematic AVX10.2 feature is to set the
version to AVX10.1 or lower[2]. But most AVX features are just passed
through and don't have virtualization controls, so AVX10 should not be
problematic in practice, so long as Intel honors their promise that future
versions will be supersets of past versions.
[1] https://cdrdv2.intel.com/v1/dl/getContent/784267
[2] https://lore.kernel.org/all/Zkz5Ak0PQlAN8DxK@google.com/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Tao Su <tao1.su@linux.intel.com>
Link: https://lore.kernel.org/r/20240819062327.3269720-1-tao1.su@linux.intel.com
[sean: minor changelog tweaks]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-08-19 14:23:27 +08:00
|
|
|
#define X86_FEATURE_AVX10 KVM_X86_FEATURE(CPUID_7_1_EDX, 19)
|
2022-11-25 20:58:43 +08:00
|
|
|
|
2023-10-23 17:16:35 -07:00
|
|
|
/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
|
|
|
|
#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
|
|
|
|
#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
|
|
|
|
#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
|
|
|
|
#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
|
2024-03-13 09:47:57 -07:00
|
|
|
#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
|
2023-10-23 17:16:35 -07:00
|
|
|
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
|
|
|
|
|
KVM: x86: Advertise AVX10.1 CPUID to userspace
Advertise AVX10.1 related CPUIDs, i.e. report AVX10 support bit via
CPUID.(EAX=07H, ECX=01H):EDX[bit 19] and new CPUID leaf 0x24H so that
guest OS and applications can query the AVX10.1 CPUIDs directly. Intel
AVX10 represents the first major new vector ISA since the introduction of
Intel AVX512, which will establish a common, converged vector instruction
set across all Intel architectures[1].
AVX10.1 is an early version of AVX10, that enumerates the Intel AVX512
instruction set at 128, 256, and 512 bits which is enabled on
Granite Rapids. I.e., AVX10.1 is only a new CPUID enumeration with no
new functionality. New features, e.g. Embedded Rounding and Suppress
All Exceptions (SAE) will be introduced in AVX10.2.
Advertising AVX10.1 is safe because there is nothing to enable for AVX10.1,
i.e. it's purely a new way to enumerate support, thus there will never be
anything for the kernel to enable. Note just the CPUID checking is changed
when using AVX512 related instructions, e.g. if using one AVX512
instruction needs to check (AVX512 AND AVX512DQ), it can check
((AVX512 AND AVX512DQ) OR AVX10.1) after checking XCR0[7:5].
The versions of AVX10 are expected to be inclusive, e.g. version N+1 is
a superset of version N. Per the spec, the version can never be 0, just
advertise AVX10.1 if it's supported in hardware. Moreover, advertising
AVX10_{128,256,512} needs to land in the same commit as advertising basic
AVX10.1 support, otherwise KVM would advertise an impossible CPU model.
E.g. a CPU with AVX512 but not AVX10.1/512 is impossible per the SDM.
As more and more AVX related CPUIDs are added (it would have resulted in
around 40-50 CPUID flags when developing AVX10), the versioning approach
is introduced. But incrementing version numbers are bad for virtualization.
E.g. if AVX10.2 has a feature that shouldn't be enumerated to guests for
whatever reason, then KVM can't enumerate any "later" features either,
because the only way to hide the problematic AVX10.2 feature is to set the
version to AVX10.1 or lower[2]. But most AVX features are just passed
through and don't have virtualization controls, so AVX10 should not be
problematic in practice, so long as Intel honors their promise that future
versions will be supersets of past versions.
[1] https://cdrdv2.intel.com/v1/dl/getContent/784267
[2] https://lore.kernel.org/all/Zkz5Ak0PQlAN8DxK@google.com/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Tao Su <tao1.su@linux.intel.com>
Link: https://lore.kernel.org/r/20240819062327.3269720-1-tao1.su@linux.intel.com
[sean: minor changelog tweaks]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-08-19 14:23:27 +08:00
|
|
|
/* Intel-defined sub-features, CPUID level 0x00000024:0 (EBX) */
|
|
|
|
#define X86_FEATURE_AVX10_128 KVM_X86_FEATURE(CPUID_24_0_EBX, 16)
|
|
|
|
#define X86_FEATURE_AVX10_256 KVM_X86_FEATURE(CPUID_24_0_EBX, 17)
|
|
|
|
#define X86_FEATURE_AVX10_512 KVM_X86_FEATURE(CPUID_24_0_EBX, 18)
|
|
|
|
|
2022-10-13 11:58:44 +02:00
|
|
|
/* CPUID level 0x80000007 (EDX). */
|
|
|
|
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
|
|
|
|
|
2023-06-02 18:10:56 -07:00
|
|
|
/* CPUID level 0x80000022 (EAX) */
|
|
|
|
#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
|
|
|
|
|
2024-09-11 11:00:50 +02:00
|
|
|
/* CPUID level 0x80000021 (ECX) */
|
|
|
|
#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
|
|
|
|
#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
|
|
|
|
|
2021-04-21 17:56:22 -07:00
|
|
|
struct cpuid_reg {
|
|
|
|
u32 function;
|
|
|
|
u32 index;
|
|
|
|
int reg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct cpuid_reg reverse_cpuid[] = {
|
|
|
|
[CPUID_1_EDX] = { 1, 0, CPUID_EDX},
|
|
|
|
[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
|
|
|
|
[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
|
|
|
|
[CPUID_1_ECX] = { 1, 0, CPUID_ECX},
|
|
|
|
[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
|
|
|
|
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
|
|
|
|
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
|
|
|
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
|
|
|
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
|
|
|
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
|
|
|
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
|
|
|
[CPUID_7_ECX] = { 7, 0, CPUID_ECX},
|
|
|
|
[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
|
|
|
|
[CPUID_7_EDX] = { 7, 0, CPUID_EDX},
|
|
|
|
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
|
|
|
|
[CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
|
2021-04-21 19:11:15 -07:00
|
|
|
[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
|
2022-11-25 20:58:43 +08:00
|
|
|
[CPUID_7_1_EDX] = { 7, 1, CPUID_EDX},
|
2022-10-13 11:58:44 +02:00
|
|
|
[CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
|
2023-01-10 16:46:37 -06:00
|
|
|
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
|
2023-06-02 18:10:56 -07:00
|
|
|
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
|
2023-10-23 17:16:35 -07:00
|
|
|
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
|
KVM: x86: Advertise AVX10.1 CPUID to userspace
Advertise AVX10.1 related CPUIDs, i.e. report AVX10 support bit via
CPUID.(EAX=07H, ECX=01H):EDX[bit 19] and new CPUID leaf 0x24H so that
guest OS and applications can query the AVX10.1 CPUIDs directly. Intel
AVX10 represents the first major new vector ISA since the introduction of
Intel AVX512, which will establish a common, converged vector instruction
set across all Intel architectures[1].
AVX10.1 is an early version of AVX10, that enumerates the Intel AVX512
instruction set at 128, 256, and 512 bits which is enabled on
Granite Rapids. I.e., AVX10.1 is only a new CPUID enumeration with no
new functionality. New features, e.g. Embedded Rounding and Suppress
All Exceptions (SAE) will be introduced in AVX10.2.
Advertising AVX10.1 is safe because there is nothing to enable for AVX10.1,
i.e. it's purely a new way to enumerate support, thus there will never be
anything for the kernel to enable. Note just the CPUID checking is changed
when using AVX512 related instructions, e.g. if using one AVX512
instruction needs to check (AVX512 AND AVX512DQ), it can check
((AVX512 AND AVX512DQ) OR AVX10.1) after checking XCR0[7:5].
The versions of AVX10 are expected to be inclusive, e.g. version N+1 is
a superset of version N. Per the spec, the version can never be 0, just
advertise AVX10.1 if it's supported in hardware. Moreover, advertising
AVX10_{128,256,512} needs to land in the same commit as advertising basic
AVX10.1 support, otherwise KVM would advertise an impossible CPU model.
E.g. a CPU with AVX512 but not AVX10.1/512 is impossible per the SDM.
As more and more AVX related CPUIDs are added (it would have resulted in
around 40-50 CPUID flags when developing AVX10), the versioning approach
is introduced. But incrementing version numbers are bad for virtualization.
E.g. if AVX10.2 has a feature that shouldn't be enumerated to guests for
whatever reason, then KVM can't enumerate any "later" features either,
because the only way to hide the problematic AVX10.2 feature is to set the
version to AVX10.1 or lower[2]. But most AVX features are just passed
through and don't have virtualization controls, so AVX10 should not be
problematic in practice, so long as Intel honors their promise that future
versions will be supersets of past versions.
[1] https://cdrdv2.intel.com/v1/dl/getContent/784267
[2] https://lore.kernel.org/all/Zkz5Ak0PQlAN8DxK@google.com/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Tao Su <tao1.su@linux.intel.com>
Link: https://lore.kernel.org/r/20240819062327.3269720-1-tao1.su@linux.intel.com
[sean: minor changelog tweaks]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-08-19 14:23:27 +08:00
|
|
|
[CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
|
2024-09-11 11:00:50 +02:00
|
|
|
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
|
2021-04-21 17:56:22 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reverse CPUID and its derivatives can only be used for hardware-defined
|
|
|
|
* feature words, i.e. words whose bits directly correspond to a CPUID leaf.
|
|
|
|
* Retrieving a feature bit or masking guest CPUID from a Linux-defined word
|
|
|
|
* is nonsensical as the bit number/mask is an arbitrary software-defined value
|
|
|
|
* and can't be used by KVM to query/control guest capabilities. And obviously
|
|
|
|
* the leaf being queried must have an entry in the lookup table.
|
|
|
|
*/
|
|
|
|
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
|
|
|
{
|
2024-04-04 17:16:14 -07:00
|
|
|
BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
|
2021-04-21 17:56:22 -07:00
|
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
|
|
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
|
|
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
|
|
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
|
2024-04-04 17:16:14 -07:00
|
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
|
2021-04-21 17:56:22 -07:00
|
|
|
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
|
|
|
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Translate feature bits that are scattered in the kernel's cpufeatures word
|
|
|
|
* into KVM feature words that align with hardware's definitions.
|
|
|
|
*/
|
|
|
|
static __always_inline u32 __feature_translate(int x86_feature)
|
|
|
|
{
|
2023-10-23 17:16:36 -07:00
|
|
|
#define KVM_X86_TRANSLATE_FEATURE(f) \
|
|
|
|
case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
|
|
|
|
|
|
|
|
switch (x86_feature) {
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(SGX1);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(SGX2);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
|
2024-03-13 09:47:57 -07:00
|
|
|
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
|
2024-09-11 11:00:50 +02:00
|
|
|
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
|
|
|
|
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
|
2023-10-23 17:16:36 -07:00
|
|
|
default:
|
|
|
|
return x86_feature;
|
|
|
|
}
|
2021-04-21 17:56:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline u32 __feature_leaf(int x86_feature)
|
|
|
|
{
|
2024-11-27 17:33:47 -08:00
|
|
|
u32 x86_leaf = __feature_translate(x86_feature) / 32;
|
|
|
|
|
|
|
|
reverse_cpuid_check(x86_leaf);
|
|
|
|
return x86_leaf;
|
2021-04-21 17:56:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
|
|
|
|
* the hardware defined bit number (stored in bits 4:0) and a software defined
|
|
|
|
* "word" (stored in bits 31:5). The word is used to index into arrays of
|
|
|
|
* bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
|
|
|
|
*/
|
|
|
|
static __always_inline u32 __feature_bit(int x86_feature)
|
|
|
|
{
|
|
|
|
x86_feature = __feature_translate(x86_feature);
|
|
|
|
|
|
|
|
reverse_cpuid_check(x86_feature / 32);
|
|
|
|
return 1 << (x86_feature & 31);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
|
|
|
|
|
|
|
|
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
unsigned int x86_leaf = __feature_leaf(x86_feature);
|
|
|
|
|
|
|
|
return reverse_cpuid[x86_leaf];
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
|
|
|
|
u32 reg)
|
|
|
|
{
|
|
|
|
switch (reg) {
|
|
|
|
case CPUID_EAX:
|
|
|
|
return &entry->eax;
|
|
|
|
case CPUID_EBX:
|
|
|
|
return &entry->ebx;
|
|
|
|
case CPUID_ECX:
|
|
|
|
return &entry->ecx;
|
|
|
|
case CPUID_EDX:
|
|
|
|
return &entry->edx;
|
|
|
|
default:
|
|
|
|
BUILD_BUG();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
|
|
|
|
|
|
|
|
return __cpuid_entry_get_reg(entry, cpuid.reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
|
|
|
|
return *reg & __feature_bit(x86_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
return cpuid_entry_get(entry, x86_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
|
|
|
|
*reg &= ~__feature_bit(x86_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature)
|
|
|
|
{
|
|
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
|
|
|
|
*reg |= __feature_bit(x86_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
|
|
|
|
unsigned int x86_feature,
|
|
|
|
bool set)
|
|
|
|
{
|
|
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open coded instead of using cpuid_entry_{clear,set}() to coerce the
|
|
|
|
* compiler into using CMOV instead of Jcc when possible.
|
|
|
|
*/
|
|
|
|
if (set)
|
|
|
|
*reg |= __feature_bit(x86_feature);
|
|
|
|
else
|
|
|
|
*reg &= ~__feature_bit(x86_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* ARCH_X86_KVM_REVERSE_CPUID_H */
|