mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
x86/msr: Rename 'rdmsrl_safe_on_cpu()' to 'rdmsrq_safe_on_cpu()'
Suggested-by: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juergen Gross <jgross@suse.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Xin Li <xin@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6fa17efe45
commit
5e404cb7ac
9 changed files with 15 additions and 15 deletions
|
@ -1839,7 +1839,7 @@ static __init int pt_init(void)
|
|||
for_each_online_cpu(cpu) {
|
||||
u64 ctl;
|
||||
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
|
||||
ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
|
||||
if (!ret && (ctl & RTIT_CTL_TRACEEN))
|
||||
prior_warn++;
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *
|
|||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
|
@ -379,7 +379,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
return rdmsrq_safe(msr_no, q);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
|
|||
{
|
||||
int err;
|
||||
|
||||
err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
|
||||
err = rdmsrq_safe_on_cpu(cpunum, reg->address, val);
|
||||
if (!err) {
|
||||
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
|
||||
reg->bit_offset);
|
||||
|
@ -65,7 +65,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
|
|||
u64 rd_val;
|
||||
int err;
|
||||
|
||||
err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
|
||||
err = rdmsrq_safe_on_cpu(cpunum, reg->address, &rd_val);
|
||||
if (!err) {
|
||||
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
|
||||
reg->bit_offset);
|
||||
|
@ -147,7 +147,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
|
|||
int ret;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
|
||||
ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
|||
}
|
||||
EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
|
||||
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
u32 low, high;
|
||||
int err;
|
||||
|
@ -230,7 +230,7 @@ int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
|||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
|
||||
EXPORT_SYMBOL(rdmsrq_safe_on_cpu);
|
||||
|
||||
/*
|
||||
* These variants are significantly slower, but allows control over
|
||||
|
|
|
@ -137,7 +137,7 @@ static int amd_pstate_ut_check_perf(u32 index)
|
|||
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
|
||||
lowest_perf = cppc_perf.lowest_perf;
|
||||
} else {
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
|
||||
ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
|
||||
if (ret) {
|
||||
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
|
||||
return ret;
|
||||
|
|
|
@ -391,7 +391,7 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
|
|||
union perf_cached perf = READ_ONCE(cpudata->perf);
|
||||
u64 cap1, numerator;
|
||||
|
||||
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
|
||||
int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
|
||||
&cap1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -2106,13 +2106,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
|
|||
int err;
|
||||
|
||||
/* Get the TDP level (0, 1, 2) to get ratios */
|
||||
err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* TDP MSR are continuous starting at 0x648 */
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
|
||||
err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
|
||||
err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2149,7 +2149,7 @@ static int core_get_max_pstate(int cpu)
|
|||
return tdp_ratio;
|
||||
}
|
||||
|
||||
err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
|
||||
err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
|
||||
if (!err) {
|
||||
int tar_levels;
|
||||
|
||||
|
|
|
@ -535,7 +535,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
|
|||
} else {
|
||||
u64 data;
|
||||
|
||||
ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
|
||||
ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu,
|
||||
msr_cmd->msr, &data);
|
||||
if (!ret) {
|
||||
msr_cmd->data = data;
|
||||
|
|
|
@ -103,7 +103,7 @@ static int rapl_cpu_down_prep(unsigned int cpu)
|
|||
|
||||
static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
|
||||
{
|
||||
if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
|
||||
if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
|
||||
pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue