2019-05-19 13:08:55 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-04-22 10:53:34 +02:00
|
|
|
/*
|
2015-04-26 15:36:46 +02:00
|
|
|
* x86 FPU boot time init code:
|
2015-04-22 10:53:34 +02:00
|
|
|
*/
|
2021-10-15 03:16:39 +02:00
|
|
|
#include <asm/fpu/api.h>
|
2015-04-22 10:53:34 +02:00
|
|
|
#include <asm/tlbflush.h>
|
x86/fpu: Fix early FPU command-line parsing
The function fpu__init_system() is executed before
parse_early_param(). This causes wrong FPU configuration. This
patch fixes this issue by parsing boot_command_line in the
beginning of fpu__init_system().
With all four patches in this series, each parameter disables
features as the following:
eagerfpu=off: eagerfpu, avx, avx2, avx512, mpx
no387: fpu
nofxsr: fxsr, fxsropt, xmm
noxsave: xsave, xsaveopt, xsaves, xsavec, avx, avx2, avx512,
mpx, xgetbv1 noxsaveopt: xsaveopt
noxsaves: xsaves
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/1452119094-7252-2-git-send-email-yu-cheng.yu@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-01-06 14:24:51 -08:00
|
|
|
#include <asm/setup.h>
|
2015-04-22 10:53:34 +02:00
|
|
|
|
2015-07-17 12:28:12 +02:00
|
|
|
#include <linux/sched.h>
|
2017-02-08 18:51:36 +01:00
|
|
|
#include <linux/sched/task.h>
|
x86/fpu: Fix early FPU command-line parsing
The function fpu__init_system() is executed before
parse_early_param(). This causes wrong FPU configuration. This
patch fixes this issue by parsing boot_command_line in the
beginning of fpu__init_system().
With all four patches in this series, each parameter disables
features as the following:
eagerfpu=off: eagerfpu, avx, avx2, avx512, mpx
no387: fpu
nofxsr: fxsr, fxsropt, xmm
noxsave: xsave, xsaveopt, xsaves, xsavec, avx, avx2, avx512,
mpx, xgetbv1 noxsaveopt: xsaveopt
noxsaves: xsaves
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/1452119094-7252-2-git-send-email-yu-cheng.yu@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-01-06 14:24:51 -08:00
|
|
|
#include <linux/init.h>
|
2015-07-17 12:28:12 +02:00
|
|
|
|
2021-10-15 03:16:28 +02:00
|
|
|
#include "internal.h"
|
2021-10-15 03:16:31 +02:00
|
|
|
#include "legacy.h"
|
2021-10-15 03:16:38 +02:00
|
|
|
#include "xstate.h"
|
2021-10-15 03:16:28 +02:00
|
|
|
|
2015-04-26 15:32:40 +02:00
|
|
|
/*
|
|
|
|
* Initialize the registers found in all CPUs, CR0 and CR4:
|
|
|
|
*/
|
|
|
|
static void fpu__init_cpu_generic(void)
|
|
|
|
{
|
|
|
|
unsigned long cr0;
|
|
|
|
unsigned long cr4_mask = 0;
|
|
|
|
|
2016-04-04 22:25:01 +02:00
|
|
|
if (boot_cpu_has(X86_FEATURE_FXSR))
|
2015-04-26 15:32:40 +02:00
|
|
|
cr4_mask |= X86_CR4_OSFXSR;
|
2016-04-04 22:24:57 +02:00
|
|
|
if (boot_cpu_has(X86_FEATURE_XMM))
|
2015-04-26 15:32:40 +02:00
|
|
|
cr4_mask |= X86_CR4_OSXMMEXCPT;
|
|
|
|
if (cr4_mask)
|
|
|
|
cr4_set_bits(cr4_mask);
|
|
|
|
|
|
|
|
cr0 = read_cr0();
|
|
|
|
cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
|
2016-04-04 22:24:58 +02:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_FPU))
|
2015-04-26 15:32:40 +02:00
|
|
|
cr0 |= X86_CR0_EM;
|
|
|
|
write_cr0(cr0);
|
2015-04-29 10:58:03 +02:00
|
|
|
|
|
|
|
/* Flush out any pending x87 state: */
|
2015-08-22 09:52:06 +02:00
|
|
|
#ifdef CONFIG_MATH_EMULATION
|
2016-04-04 22:24:58 +02:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_FPU))
|
2021-10-13 16:55:39 +02:00
|
|
|
fpstate_init_soft(¤t->thread.fpu.fpstate->regs.soft);
|
2015-08-22 09:52:06 +02:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
asm volatile ("fninit");
|
2015-04-26 15:32:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-04-26 15:36:46 +02:00
|
|
|
* Enable all supported FPU features. Called when a CPU is brought online:
|
2015-04-26 15:32:40 +02:00
|
|
|
*/
|
|
|
|
void fpu__init_cpu(void)
|
|
|
|
{
|
|
|
|
fpu__init_cpu_generic();
|
|
|
|
fpu__init_cpu_xstate();
|
|
|
|
}
|
|
|
|
|
2017-01-18 11:15:41 -08:00
|
|
|
static bool fpu__probe_without_cpuid(void)
|
2015-04-26 14:40:54 +02:00
|
|
|
{
|
|
|
|
unsigned long cr0;
|
|
|
|
u16 fsw, fcw;
|
|
|
|
|
|
|
|
fsw = fcw = 0xffff;
|
|
|
|
|
|
|
|
cr0 = read_cr0();
|
|
|
|
cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
|
|
|
|
write_cr0(cr0);
|
|
|
|
|
2017-01-18 11:15:41 -08:00
|
|
|
asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
|
|
|
|
|
|
|
|
pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
|
2016-01-21 15:24:31 -08:00
|
|
|
|
2017-01-18 11:15:41 -08:00
|
|
|
return fsw == 0 && (fcw & 0x103f) == 0x003f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_CPUID) &&
|
|
|
|
!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
|
|
|
|
if (fpu__probe_without_cpuid())
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_FPU);
|
2016-01-21 15:24:31 -08:00
|
|
|
else
|
2017-01-18 11:15:41 -08:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_FPU);
|
2016-01-21 15:24:31 -08:00
|
|
|
}
|
2015-04-26 14:43:44 +02:00
|
|
|
|
|
|
|
#ifndef CONFIG_MATH_EMULATION
|
2017-01-18 11:15:42 -08:00
|
|
|
if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
|
2015-04-26 15:36:46 +02:00
|
|
|
pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
|
2015-04-26 14:43:44 +02:00
|
|
|
for (;;)
|
|
|
|
asm volatile("hlt");
|
|
|
|
}
|
|
|
|
#endif
|
2015-04-26 14:40:54 +02:00
|
|
|
}
|
|
|
|
|
2015-04-22 13:44:25 +02:00
|
|
|
/*
|
|
|
|
* Boot time FPU feature detection code:
|
|
|
|
*/
|
2021-06-23 14:01:30 +02:00
|
|
|
unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
|
2017-05-11 02:58:55 -07:00
|
|
|
EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
|
2015-04-24 10:49:11 +02:00
|
|
|
|
2015-05-04 09:52:42 +02:00
|
|
|
static void __init fpu__init_system_mxcsr(void)
|
2015-04-22 10:53:34 +02:00
|
|
|
{
|
2015-04-24 10:49:11 +02:00
|
|
|
unsigned int mask = 0;
|
2015-04-22 10:53:34 +02:00
|
|
|
|
2016-04-04 22:25:01 +02:00
|
|
|
if (boot_cpu_has(X86_FEATURE_FXSR)) {
|
2015-07-04 09:58:19 +02:00
|
|
|
/* Static because GCC does not get 16-byte stack alignment right: */
|
|
|
|
static struct fxregs_state fxregs __initdata;
|
2015-04-24 10:49:11 +02:00
|
|
|
|
2015-07-04 09:58:19 +02:00
|
|
|
asm volatile("fxsave %0" : "+m" (fxregs));
|
2015-04-24 10:49:11 +02:00
|
|
|
|
2015-07-04 09:58:19 +02:00
|
|
|
mask = fxregs.mxcsr_mask;
|
2015-04-24 10:49:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If zero then use the default features mask,
|
|
|
|
* which has all features set, except the
|
|
|
|
* denormals-are-zero feature bit:
|
|
|
|
*/
|
2015-04-22 10:53:34 +02:00
|
|
|
if (mask == 0)
|
|
|
|
mask = 0x0000ffbf;
|
|
|
|
}
|
|
|
|
mxcsr_feature_mask &= mask;
|
|
|
|
}
|
|
|
|
|
2015-04-26 14:35:54 +02:00
|
|
|
/*
|
|
|
|
* Once per bootup FPU initialization sequences that will run on most x86 CPUs:
|
|
|
|
*/
|
2015-05-04 09:52:42 +02:00
|
|
|
static void __init fpu__init_system_generic(void)
|
2015-04-26 14:35:54 +02:00
|
|
|
{
|
|
|
|
/*
|
2021-10-15 03:16:07 +02:00
|
|
|
* Set up the legacy init FPU context. Will be updated when the
|
|
|
|
* CPU supports XSAVE[S].
|
2015-04-26 14:35:54 +02:00
|
|
|
*/
|
2021-10-15 03:16:07 +02:00
|
|
|
fpstate_init_user(&init_fpstate);
|
2015-04-26 14:35:54 +02:00
|
|
|
|
|
|
|
fpu__init_system_mxcsr();
|
|
|
|
}
|
|
|
|
|
2015-12-21 15:25:30 +01:00
|
|
|
/* Get alignment of the TYPE. */
|
|
|
|
#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enforce that 'MEMBER' is the last field of 'TYPE'.
|
|
|
|
*
|
|
|
|
* Align the computed size with alignment of the TYPE,
|
|
|
|
* because that's how C aligns structs.
|
|
|
|
*/
|
2015-07-17 12:28:12 +02:00
|
|
|
#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
|
2015-12-21 15:25:30 +01:00
|
|
|
BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
|
|
|
|
TYPE_ALIGN(TYPE)))
|
2015-07-17 12:28:11 +02:00
|
|
|
|
|
|
|
/*
|
2015-07-17 12:28:12 +02:00
|
|
|
* We append the 'struct fpu' to the task_struct:
|
2015-07-17 12:28:11 +02:00
|
|
|
*/
|
2015-07-17 12:28:12 +02:00
|
|
|
static void __init fpu__init_task_struct_size(void)
|
2015-07-17 12:28:11 +02:00
|
|
|
{
|
|
|
|
int task_size = sizeof(struct task_struct);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subtract off the static size of the register state.
|
|
|
|
* It potentially has a bunch of padding.
|
|
|
|
*/
|
2021-10-13 16:55:27 +02:00
|
|
|
task_size -= sizeof(current->thread.fpu.__fpstate.regs);
|
2015-07-17 12:28:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add back the dynamically-calculated register state
|
|
|
|
* size.
|
|
|
|
*/
|
2021-10-15 01:09:34 +02:00
|
|
|
task_size += fpu_kernel_cfg.default_size;
|
2015-07-17 12:28:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We dynamically size 'struct fpu', so we require that
|
|
|
|
* it be at the end of 'thread_struct' and that
|
|
|
|
* 'thread_struct' be at the end of 'task_struct'. If
|
|
|
|
* you hit a compile error here, check the structure to
|
|
|
|
* see if something got added to the end.
|
|
|
|
*/
|
2021-10-13 16:55:27 +02:00
|
|
|
CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate);
|
2015-07-17 12:28:11 +02:00
|
|
|
CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
|
|
|
|
CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
|
|
|
|
|
2015-07-17 12:28:12 +02:00
|
|
|
arch_task_struct_size = task_size;
|
2015-07-17 12:28:11 +02:00
|
|
|
}
|
|
|
|
|
2015-04-26 15:32:40 +02:00
|
|
|
/*
|
2016-05-20 10:47:06 -07:00
|
|
|
* Set up the user and kernel xstate sizes based on the legacy FPU context size.
|
2015-04-26 15:32:40 +02:00
|
|
|
*
|
|
|
|
* We set this up first, and later it will be overwritten by
|
|
|
|
* fpu__init_system_xstate() if the CPU knows about xstates.
|
|
|
|
*/
|
2015-05-04 09:52:42 +02:00
|
|
|
static void __init fpu__init_system_xstate_size_legacy(void)
|
2015-04-22 10:53:34 +02:00
|
|
|
{
|
2021-10-15 01:09:34 +02:00
|
|
|
unsigned int size;
|
|
|
|
|
2015-04-22 10:53:34 +02:00
|
|
|
/*
|
2021-10-15 01:09:34 +02:00
|
|
|
* Note that the size configuration might be overwritten later
|
|
|
|
* during fpu__init_system_xstate().
|
2015-04-22 10:53:34 +02:00
|
|
|
*/
|
2021-10-21 15:55:09 -07:00
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
|
2021-10-15 01:09:34 +02:00
|
|
|
size = sizeof(struct swregs_state);
|
2021-10-21 15:55:09 -07:00
|
|
|
} else if (cpu_feature_enabled(X86_FEATURE_FXSR)) {
|
2021-10-15 01:09:34 +02:00
|
|
|
size = sizeof(struct fxregs_state);
|
2021-10-21 15:55:09 -07:00
|
|
|
fpu_user_cfg.legacy_features = XFEATURE_MASK_FPSSE;
|
|
|
|
} else {
|
2021-10-15 01:09:34 +02:00
|
|
|
size = sizeof(struct fregs_state);
|
2021-10-21 15:55:09 -07:00
|
|
|
fpu_user_cfg.legacy_features = XFEATURE_MASK_FP;
|
|
|
|
}
|
2016-05-20 10:47:05 -07:00
|
|
|
|
2021-10-15 01:09:34 +02:00
|
|
|
fpu_kernel_cfg.max_size = size;
|
|
|
|
fpu_kernel_cfg.default_size = size;
|
|
|
|
fpu_user_cfg.max_size = size;
|
|
|
|
fpu_user_cfg.default_size = size;
|
2021-10-13 16:55:46 +02:00
|
|
|
fpstate_reset(¤t->thread.fpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init fpu__init_init_fpstate(void)
|
|
|
|
{
|
|
|
|
/* Bring init_fpstate size and features up to date */
|
2021-10-15 01:09:34 +02:00
|
|
|
init_fpstate.size = fpu_kernel_cfg.max_size;
|
2021-10-15 01:09:35 +02:00
|
|
|
init_fpstate.xfeatures = fpu_kernel_cfg.max_features;
|
2015-04-22 10:53:34 +02:00
|
|
|
}
|
|
|
|
|
x86/fpu: Split fpu__cpu_init() into early-boot and cpu-boot parts
There are two kinds of FPU initialization sequences necessary to bring FPU
functionality up: once per system bootup activities, such as detection,
feature initialization, etc. of attributes that are shared by all CPUs
in the system - and per cpu initialization sequences run when a CPU is
brought online (either during bootup or during CPU hotplug onlining),
such as CR0/CR4 register setting, etc.
The FPU code is mixing these roles together, with no clear distinction.
Start sorting this out by splitting the main FPU detection routine
(fpu__cpu_init()) into two parts: fpu__init_system() for
one per system init activities, and fpu__init_cpu() for the
per CPU onlining init activities.
Note that xstate_init() is called from both variants for the time being,
because it has a dual nature as well. We'll fix that in upcoming patches.
Just do the split and call it as we used to before, don't introduce any
change in initialization behavior yet, beyond duplicate (and harmless)
fpu__init_cpu() and xstate_init() calls - which we'll fix in later
patches.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-04-25 04:34:48 +02:00
|
|
|
/*
|
2015-04-26 15:36:46 +02:00
|
|
|
* Called on the boot CPU once per system bootup, to set up the initial
|
|
|
|
* FPU state that is later cloned into all processes:
|
x86/fpu: Split fpu__cpu_init() into early-boot and cpu-boot parts
There are two kinds of FPU initialization sequences necessary to bring FPU
functionality up: once per system bootup activities, such as detection,
feature initialization, etc. of attributes that are shared by all CPUs
in the system - and per cpu initialization sequences run when a CPU is
brought online (either during bootup or during CPU hotplug onlining),
such as CR0/CR4 register setting, etc.
The FPU code is mixing these roles together, with no clear distinction.
Start sorting this out by splitting the main FPU detection routine
(fpu__cpu_init()) into two parts: fpu__init_system() for
one per system init activities, and fpu__init_cpu() for the
per CPU onlining init activities.
Note that xstate_init() is called from both variants for the time being,
because it has a dual nature as well. We'll fix that in upcoming patches.
Just do the split and call it as we used to before, don't introduce any
change in initialization behavior yet, beyond duplicate (and harmless)
fpu__init_cpu() and xstate_init() calls - which we'll fix in later
patches.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-04-25 04:34:48 +02:00
|
|
|
*/
|
2015-05-04 09:52:42 +02:00
|
|
|
void __init fpu__init_system(struct cpuinfo_x86 *c)
|
x86/fpu: Split fpu__cpu_init() into early-boot and cpu-boot parts
There are two kinds of FPU initialization sequences necessary to bring FPU
functionality up: once per system bootup activities, such as detection,
feature initialization, etc. of attributes that are shared by all CPUs
in the system - and per cpu initialization sequences run when a CPU is
brought online (either during bootup or during CPU hotplug onlining),
such as CR0/CR4 register setting, etc.
The FPU code is mixing these roles together, with no clear distinction.
Start sorting this out by splitting the main FPU detection routine
(fpu__cpu_init()) into two parts: fpu__init_system() for
one per system init activities, and fpu__init_cpu() for the
per CPU onlining init activities.
Note that xstate_init() is called from both variants for the time being,
because it has a dual nature as well. We'll fix that in upcoming patches.
Just do the split and call it as we used to before, don't introduce any
change in initialization behavior yet, beyond duplicate (and harmless)
fpu__init_cpu() and xstate_init() calls - which we'll fix in later
patches.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-04-25 04:34:48 +02:00
|
|
|
{
|
2021-10-13 16:55:27 +02:00
|
|
|
fpstate_reset(¤t->thread.fpu);
|
2015-04-26 15:07:18 +02:00
|
|
|
fpu__init_system_early_generic(c);
|
|
|
|
|
2015-04-26 15:36:46 +02:00
|
|
|
/*
|
|
|
|
* The FPU has to be operational for some of the
|
|
|
|
* later FPU init activities:
|
|
|
|
*/
|
x86/fpu: Split fpu__cpu_init() into early-boot and cpu-boot parts
There are two kinds of FPU initialization sequences necessary to bring FPU
functionality up: once per system bootup activities, such as detection,
feature initialization, etc. of attributes that are shared by all CPUs
in the system - and per cpu initialization sequences run when a CPU is
brought online (either during bootup or during CPU hotplug onlining),
such as CR0/CR4 register setting, etc.
The FPU code is mixing these roles together, with no clear distinction.
Start sorting this out by splitting the main FPU detection routine
(fpu__cpu_init()) into two parts: fpu__init_system() for
one per system init activities, and fpu__init_cpu() for the
per CPU onlining init activities.
Note that xstate_init() is called from both variants for the time being,
because it has a dual nature as well. We'll fix that in upcoming patches.
Just do the split and call it as we used to before, don't introduce any
change in initialization behavior yet, beyond duplicate (and harmless)
fpu__init_cpu() and xstate_init() calls - which we'll fix in later
patches.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-04-25 04:34:48 +02:00
|
|
|
fpu__init_cpu();
|
2015-04-22 10:53:34 +02:00
|
|
|
|
2015-04-26 14:35:54 +02:00
|
|
|
fpu__init_system_generic();
|
2015-04-26 15:23:37 +02:00
|
|
|
fpu__init_system_xstate_size_legacy();
|
2021-10-15 01:09:34 +02:00
|
|
|
fpu__init_system_xstate(fpu_kernel_cfg.max_size);
|
2015-07-17 12:28:12 +02:00
|
|
|
fpu__init_task_struct_size();
|
2021-10-13 16:55:46 +02:00
|
|
|
fpu__init_init_fpstate();
|
2015-04-22 10:53:34 +02:00
|
|
|
}
|