x86/msr: Rename 'rdmsrl_safe()' to 'rdmsrq_safe()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Ingo Molnar 2025-04-09 22:28:56 +02:00
parent 78255eb239
commit 6fe22abacd
40 changed files with 80 additions and 80 deletions

View file

@ -272,7 +272,7 @@ static int __init amd_power_pmu_init(void)
cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}

View file

@ -269,7 +269,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
*/
for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
ret = rdmsrq_safe(reg, &val);
if (ret)
goto msr_fail;
if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
@ -283,7 +283,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
if (*(u64 *)fixed_cntr_mask) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
ret = rdmsrq_safe(reg, &val);
if (ret)
goto msr_fail;
for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) {
@ -314,11 +314,11 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
reg = x86_pmu_event_addr(reg_safe);
if (rdmsrl_safe(reg, &val))
if (rdmsrq_safe(reg, &val))
goto msr_fail;
val ^= 0xffffUL;
ret = wrmsrl_safe(reg, val);
ret |= rdmsrl_safe(reg, &val_new);
ret |= rdmsrq_safe(reg, &val_new);
if (ret || val != val_new)
goto msr_fail;

View file

@ -5610,7 +5610,7 @@ static bool check_msr(unsigned long msr, u64 mask)
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
if (rdmsrl_safe(msr, &val_old))
if (rdmsrq_safe(msr, &val_old))
return false;
/*
@ -5622,7 +5622,7 @@ static bool check_msr(unsigned long msr, u64 mask)
val_tmp = lbr_from_signext_quirk_wr(val_tmp);
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
rdmsrq_safe(msr, &val_new))
return false;
/*

View file

@ -43,7 +43,7 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
if (msr[bit].test && !msr[bit].test(bit, data))
continue;
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
if (rdmsrl_safe(msr[bit].msr, &val))
if (rdmsrq_safe(msr[bit].msr, &val))
continue;
mask = msr[bit].mask;

View file

@ -611,7 +611,7 @@ static int rapl_check_hw_unit(void)
int i;
/* protect rdmsrq() to handle virtualization */
if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits))
if (rdmsrq_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits))
return -1;
for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++)
rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;

View file

@ -120,7 +120,7 @@ static inline bool apic_is_x2apic_enabled(void)
{
u64 msr;
if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
if (rdmsrq_safe(MSR_IA32_APICBASE, &msr))
return false;
return msr & X2APIC_ENABLE;
}

View file

@ -279,7 +279,7 @@ static inline int wrmsr_safe(u32 msr, u32 low, u32 high)
__err; \
})
static inline int rdmsrl_safe(u32 msr, u64 *p)
static inline int rdmsrq_safe(u32 msr, u64 *p)
{
int err;
@ -381,7 +381,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
return rdmsrl_safe(msr_no, q);
return rdmsrq_safe(msr_no, q);
}
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{

View file

@ -231,7 +231,7 @@ static inline void wrmsrq(unsigned msr, u64 val)
_err; \
})
static inline int rdmsrl_safe(unsigned msr, u64 *p)
static inline int rdmsrq_safe(unsigned msr, u64 *p)
{
int err;

View file

@ -151,7 +151,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
if (boot_cpu_data.x86 < 0x10 ||
rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
return NULL;
/* mmconfig is not enabled */

View file

@ -422,7 +422,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
* Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable SSBD.
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
setup_force_cpu_cap(X86_FEATURE_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
@ -788,7 +788,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
* Disable it on the affected CPUs.
*/
if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
value |= 0x1E;
wrmsrl_safe(MSR_F15H_IC_CFG, value);
}
@ -838,7 +838,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
* suppresses non-branch predictions.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}

View file

@ -99,7 +99,7 @@ static bool __init turbo_disabled(void)
u64 misc_en;
int err;
err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
err = rdmsrq_safe(MSR_IA32_MISC_ENABLE, &misc_en);
if (err)
return false;
@ -110,11 +110,11 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
int err;
err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
err = rdmsrq_safe(MSR_ATOM_CORE_RATIOS, base_freq);
if (err)
return false;
err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
err = rdmsrq_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
if (err)
return false;
@ -152,13 +152,13 @@ static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
int err, i;
u64 msr;
err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;
@ -190,17 +190,17 @@ static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int s
u32 group_size;
int err, i;
err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
if (err)
return false;
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
if (err)
return false;
@ -220,11 +220,11 @@ static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
u64 msr;
int err;
err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;

View file

@ -95,7 +95,7 @@ static bool split_lock_verify_msr(bool on)
{
u64 ctrl, tmp;
if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
if (rdmsrq_safe(MSR_TEST_CTRL, &ctrl))
return false;
if (on)
ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;

View file

@ -148,7 +148,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
*/
info = (struct ppin_info *)id->driver_data;
if (rdmsrl_safe(info->msr_ppin_ctl, &val))
if (rdmsrq_safe(info->msr_ppin_ctl, &val))
goto clear_ppin;
if ((val & 3UL) == 1UL) {
@ -159,7 +159,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
wrmsrl_safe(info->msr_ppin_ctl, val | 2UL);
rdmsrl_safe(info->msr_ppin_ctl, &val);
rdmsrq_safe(info->msr_ppin_ctl, &val);
}
/* Is the enable bit set? */

View file

@ -118,7 +118,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
bool enable_vmx;
u64 msr;
if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) {
if (rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr)) {
clear_cpu_cap(c, X86_FEATURE_VMX);
clear_cpu_cap(c, X86_FEATURE_SGX);
return;

View file

@ -110,7 +110,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
* Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable SSBD.
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
setup_force_cpu_cap(X86_FEATURE_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;

View file

@ -488,7 +488,7 @@ static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) {
if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
}
@ -498,7 +498,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
{
u64 msr;
if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr))
return;
/* Clear all MISC features */

View file

@ -748,7 +748,7 @@ static void check_hw_inj_possible(void)
toggle_hw_mce_inject(cpu, true);
wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status);
wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
if (!status) {

View file

@ -460,7 +460,7 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
case INTEL_SANDYBRIDGE_X:
case INTEL_IVYBRIDGE_X:
case INTEL_HASWELL_X:
if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control))
return;
error_control |= 2;
wrmsrl_safe(MSR_ERROR_CONTROL, error_control);

View file

@ -411,7 +411,7 @@ void setup_default_sgx_lepubkeyhash(void)
* MSRs exist but are read-only (locked and not writable).
*/
if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) ||
rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
rdmsrq_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
sgx_pubkey_hash[0] = 0xa6053e051270b7acULL;
sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL;
sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;

View file

@ -2850,7 +2850,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;

View file

@ -590,7 +590,7 @@ static int kvm_probe_user_return_msr(u32 msr)
int ret;
preempt_disable();
ret = rdmsrl_safe(msr, &val);
ret = rdmsrq_safe(msr, &val);
if (ret)
goto out;
ret = wrmsrl_safe(msr, val);
@ -630,7 +630,7 @@ static void kvm_user_return_msr_cpu_online(void)
int i;
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
rdmsrl_safe(kvm_uret_msrs_list[i], &value);
rdmsrq_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
@ -1660,7 +1660,7 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
*data = MSR_PLATFORM_INFO_CPUID_FAULT;
break;
case MSR_IA32_UCODE_REV:
rdmsrl_safe(index, data);
rdmsrq_safe(index, data);
break;
default:
return kvm_x86_call(get_feature_msr)(index, data);
@ -9736,7 +9736,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
* with an exception. PAT[0] is set to WB on RESET and also by the
* kernel, i.e. failure indicates a kernel bug or broken firmware.
*/
if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) ||
if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) ||
(host_pat & GENMASK(2, 0)) != 6) {
pr_err("host PAT[0] is not WB\n");
return -EIO;
@ -9770,7 +9770,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
}
rdmsrl_safe(MSR_EFER, &kvm_host.efer);
rdmsrq_safe(MSR_EFER, &kvm_host.efer);
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrq(MSR_IA32_XSS, kvm_host.xss);
@ -13652,7 +13652,7 @@ int kvm_spec_ctrl_test_value(u64 value)
local_irq_save(flags);
if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
ret = 1;
else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
ret = 1;

View file

@ -41,7 +41,7 @@ static int msr_read(u32 msr, struct msr *m)
int err;
u64 val;
err = rdmsrl_safe(msr, &val);
err = rdmsrq_safe(msr, &val);
if (!err)
m->q = val;

View file

@ -125,7 +125,7 @@ static void __save_processor_state(struct saved_context *ctxt)
ctxt->cr2 = read_cr2();
ctxt->cr3 = __read_cr3();
ctxt->cr4 = __read_cr4();
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable);
msr_save_context(ctxt);
}
@ -414,7 +414,7 @@ static int msr_build_context(const u32 *msr_id, const int num)
u64 dummy;
msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy);
msr_array[i].info.reg.q = 0;
}
saved_msrs->num = total_num;

View file

@ -234,7 +234,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;

View file

@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,

View file

@ -90,9 +90,9 @@ static int amd_pstate_ut_check_enabled(u32 index)
if (get_shared_mem())
return 0;
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
return ret;
}

View file

@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))

View file

@ -1877,7 +1877,7 @@ void notify_hwp_interrupt(void)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
rdmsrl_safe(MSR_HWP_STATUS, &value);
rdmsrq_safe(MSR_HWP_STATUS, &value);
if (!(value & status_mask))
return;

View file

@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void)
unsigned long long power;
u32 units;
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power))
return 0;
units = (power & 0x1f00) >> 8;
if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power))
return 0;
return (1000000 * power) >> units; /* convert to uJ */

View file

@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data)
*/
cu = topology_core_id(smp_processor_id());
rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
data->cu_on[cu] = 1;
}
@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
*/
data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}

View file

@ -115,13 +115,13 @@ static int __init ifs_init(void)
if (!m)
return -ENODEV;
if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval))
if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval))
return -ENODEV;
if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS))
return -ENODEV;
if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval))
if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval))
return -ENODEV;
ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL);

View file

@ -1082,7 +1082,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
unsigned int index;
for (index = 0; map[index].name ; index++) {
if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
if (rdmsrq_safe(map[index].bit_mask, &pcstate_count))
continue;
pcstate_count *= 1000;
@ -1587,7 +1587,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
/* Save PKGC residency for checking later */
for (i = 0; i < pmcdev->num_of_pkgc; i++) {
if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
return -EIO;
}
@ -1603,7 +1603,7 @@ static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
u64 deepest_pkgc_residency;
if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
return false;
if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
@ -1655,7 +1655,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
for (i = 0; i < pmcdev->num_of_pkgc; i++) {
u64 pc_cnt;
if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) {
dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
msr_map[i].name, pmcdev->pkgc_res_cnt[i],
msr_map[i].name, pc_cnt);

View file

@ -406,7 +406,7 @@ static int isst_if_cpu_online(unsigned int cpu)
isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data);
if (ret) {
/* This is not a fatal error on MSR mailbox only I/F */
isst_cpu_info[cpu].bus_info[0] = -1;
@ -420,12 +420,12 @@ static int isst_if_cpu_online(unsigned int cpu)
if (isst_hpm_support) {
ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (!ret)
goto set_punit_id;
}
ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data);
if (ret) {
isst_cpu_info[cpu].punit_cpu_id = -1;
return ret;
@ -831,8 +831,8 @@ static int __init isst_if_common_init(void)
u64 data;
/* Can fail only on some Skylake-X generations */
if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data))
return -ENODEV;
}

View file

@ -176,11 +176,11 @@ static int __init isst_if_mbox_init(void)
return -ENODEV;
/* Check presence of mailbox MSRs */
ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data);
ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data);
if (ret)
return ret;
ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data);
ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data);
if (ret)
return ret;

View file

@ -157,7 +157,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
u64 data;
int ret;
ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (ret)
return ret;
@ -203,7 +203,7 @@ static int __init tpmi_init(void)
return -ENODEV;
/* Check for MSR 0x54 presence */
ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (ret)
return ret;

View file

@ -48,7 +48,7 @@ static int get_oc_core_priority(unsigned int cpu)
}
for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
ret = rdmsrq_safe(MSR_OC_MAILBOX, &value);
if (ret) {
pr_debug("cpu %d OC mailbox read failed\n", cpu);
break;

View file

@ -116,7 +116,7 @@ static void rapl_msr_update_func(void *info)
struct reg_action *ra = info;
u64 val;
ra->err = rdmsrl_safe(ra->reg.msr, &val);
ra->err = rdmsrq_safe(ra->reg.msr, &val);
if (ra->err)
return;

View file

@ -153,7 +153,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
u64 val;
int err;
err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
err = rdmsrq_safe(MSR_PLATFORM_INFO, &val);
if (err)
return err;

View file

@ -340,7 +340,7 @@ static bool has_pkg_state_counter(void)
/* check if any one of the counter msrs exists */
while (info->msr_index) {
if (!rdmsrl_safe(info->msr_index, &val))
if (!rdmsrq_safe(info->msr_index, &val))
return true;
info++;
}
@ -356,7 +356,7 @@ static u64 pkg_state_counter(void)
while (info->msr_index) {
if (!info->skip) {
if (!rdmsrl_safe(info->msr_index, &val))
if (!rdmsrq_safe(info->msr_index, &val))
count += val;
else
info->skip = true;

View file

@ -81,14 +81,14 @@ static int __init tcc_cooling_init(void)
if (!id)
return -ENODEV;
err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
err = rdmsrq_safe(MSR_PLATFORM_INFO, &val);
if (err)
return err;
if (!(val & TCC_PROGRAMMABLE))
return -ENODEV;
err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
err = rdmsrq_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
if (err)
return err;