mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "Misc changes: - context switch micro-optimization - debug printout micro-optimization - comment enhancements and typo fix" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Replace seq_printf() with seq_puts() x86/asm: Fix typo in arch/x86/kernel/asm_offset_64.c sched/x86: Add a comment clarifying LDT context switching sched/x86_64: Don't save flags on context switch
This commit is contained in:
commit
9d0cf6f564
7 changed files with 51 additions and 41 deletions
|
@ -52,7 +52,16 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
/* Stop flush ipis for the previous mm */
|
/* Stop flush ipis for the previous mm */
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||||
|
|
||||||
/* Load the LDT, if the LDT is different: */
|
/*
|
||||||
|
* Load the LDT, if the LDT is different.
|
||||||
|
*
|
||||||
|
* It's possible leave_mm(prev) has been called. If so,
|
||||||
|
* then prev->context.ldt could be out of sync with the
|
||||||
|
* LDT descriptor or the LDT register. This can only happen
|
||||||
|
* if prev->context.ldt is non-null, since we never free
|
||||||
|
* an LDT. But LDTs can't be shared across mms, so
|
||||||
|
* prev->context.ldt won't be equal to next->context.ldt.
|
||||||
|
*/
|
||||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||||
load_LDT_nolock(&next->context);
|
load_LDT_nolock(&next->context);
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,12 +79,12 @@ do { \
|
||||||
#else /* CONFIG_X86_32 */
|
#else /* CONFIG_X86_32 */
|
||||||
|
|
||||||
/* frame pointer must be last for get_wchan */
|
/* frame pointer must be last for get_wchan */
|
||||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
|
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
|
||||||
|
|
||||||
#define __EXTRA_CLOBBER \
|
#define __EXTRA_CLOBBER \
|
||||||
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
||||||
"r12", "r13", "r14", "r15"
|
"r12", "r13", "r14", "r15", "flags"
|
||||||
|
|
||||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||||
#define __switch_canary \
|
#define __switch_canary \
|
||||||
|
@ -100,7 +100,11 @@ do { \
|
||||||
#define __switch_canary_iparam
|
#define __switch_canary_iparam
|
||||||
#endif /* CC_STACKPROTECTOR */
|
#endif /* CC_STACKPROTECTOR */
|
||||||
|
|
||||||
/* Save restore flags to clear handle leaking NT */
|
/*
|
||||||
|
* There is no need to save or restore flags, because flags are always
|
||||||
|
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||||
|
* has no effect.
|
||||||
|
*/
|
||||||
#define switch_to(prev, next, last) \
|
#define switch_to(prev, next, last) \
|
||||||
asm volatile(SAVE_CONTEXT \
|
asm volatile(SAVE_CONTEXT \
|
||||||
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
||||||
|
|
|
@ -47,7 +47,6 @@ int main(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
|
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
|
||||||
ENTRY(bx);
|
|
||||||
ENTRY(bx);
|
ENTRY(bx);
|
||||||
ENTRY(cx);
|
ENTRY(cx);
|
||||||
ENTRY(dx);
|
ENTRY(dx);
|
||||||
|
|
|
@ -72,7 +72,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
if (c->x86_mask || c->cpuid_level >= 0)
|
if (c->x86_mask || c->cpuid_level >= 0)
|
||||||
seq_printf(m, "stepping\t: %d\n", c->x86_mask);
|
seq_printf(m, "stepping\t: %d\n", c->x86_mask);
|
||||||
else
|
else
|
||||||
seq_printf(m, "stepping\t: unknown\n");
|
seq_puts(m, "stepping\t: unknown\n");
|
||||||
if (c->microcode)
|
if (c->microcode)
|
||||||
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
|
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
|
||||||
|
|
||||||
|
@ -92,12 +92,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
show_cpuinfo_core(m, c, cpu);
|
show_cpuinfo_core(m, c, cpu);
|
||||||
show_cpuinfo_misc(m, c);
|
show_cpuinfo_misc(m, c);
|
||||||
|
|
||||||
seq_printf(m, "flags\t\t:");
|
seq_puts(m, "flags\t\t:");
|
||||||
for (i = 0; i < 32*NCAPINTS; i++)
|
for (i = 0; i < 32*NCAPINTS; i++)
|
||||||
if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
|
if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
|
||||||
seq_printf(m, " %s", x86_cap_flags[i]);
|
seq_printf(m, " %s", x86_cap_flags[i]);
|
||||||
|
|
||||||
seq_printf(m, "\nbugs\t\t:");
|
seq_puts(m, "\nbugs\t\t:");
|
||||||
for (i = 0; i < 32*NBUGINTS; i++) {
|
for (i = 0; i < 32*NBUGINTS; i++) {
|
||||||
unsigned int bug_bit = 32*NCAPINTS + i;
|
unsigned int bug_bit = 32*NCAPINTS + i;
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
|
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
|
||||||
c->x86_phys_bits, c->x86_virt_bits);
|
c->x86_phys_bits, c->x86_virt_bits);
|
||||||
|
|
||||||
seq_printf(m, "power management:");
|
seq_puts(m, "power management:");
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
if (c->x86_power & (1 << i)) {
|
if (c->x86_power & (1 << i)) {
|
||||||
if (i < ARRAY_SIZE(x86_power_flags) &&
|
if (i < ARRAY_SIZE(x86_power_flags) &&
|
||||||
|
@ -131,7 +131,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
seq_printf(m, "\n\n");
|
seq_puts(m, "\n\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,78 +59,78 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||||
seq_printf(p, "%*s: ", prec, "NMI");
|
seq_printf(p, "%*s: ", prec, "NMI");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
||||||
seq_printf(p, " Non-maskable interrupts\n");
|
seq_puts(p, " Non-maskable interrupts\n");
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
seq_printf(p, "%*s: ", prec, "LOC");
|
seq_printf(p, "%*s: ", prec, "LOC");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
||||||
seq_printf(p, " Local timer interrupts\n");
|
seq_puts(p, " Local timer interrupts\n");
|
||||||
|
|
||||||
seq_printf(p, "%*s: ", prec, "SPU");
|
seq_printf(p, "%*s: ", prec, "SPU");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
||||||
seq_printf(p, " Spurious interrupts\n");
|
seq_puts(p, " Spurious interrupts\n");
|
||||||
seq_printf(p, "%*s: ", prec, "PMI");
|
seq_printf(p, "%*s: ", prec, "PMI");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
||||||
seq_printf(p, " Performance monitoring interrupts\n");
|
seq_puts(p, " Performance monitoring interrupts\n");
|
||||||
seq_printf(p, "%*s: ", prec, "IWI");
|
seq_printf(p, "%*s: ", prec, "IWI");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
|
seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
|
||||||
seq_printf(p, " IRQ work interrupts\n");
|
seq_puts(p, " IRQ work interrupts\n");
|
||||||
seq_printf(p, "%*s: ", prec, "RTR");
|
seq_printf(p, "%*s: ", prec, "RTR");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
|
seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
|
||||||
seq_printf(p, " APIC ICR read retries\n");
|
seq_puts(p, " APIC ICR read retries\n");
|
||||||
#endif
|
#endif
|
||||||
if (x86_platform_ipi_callback) {
|
if (x86_platform_ipi_callback) {
|
||||||
seq_printf(p, "%*s: ", prec, "PLT");
|
seq_printf(p, "%*s: ", prec, "PLT");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
|
seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
|
||||||
seq_printf(p, " Platform interrupts\n");
|
seq_puts(p, " Platform interrupts\n");
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
seq_printf(p, "%*s: ", prec, "RES");
|
seq_printf(p, "%*s: ", prec, "RES");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
||||||
seq_printf(p, " Rescheduling interrupts\n");
|
seq_puts(p, " Rescheduling interrupts\n");
|
||||||
seq_printf(p, "%*s: ", prec, "CAL");
|
seq_printf(p, "%*s: ", prec, "CAL");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
|
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
|
||||||
irq_stats(j)->irq_tlb_count);
|
irq_stats(j)->irq_tlb_count);
|
||||||
seq_printf(p, " Function call interrupts\n");
|
seq_puts(p, " Function call interrupts\n");
|
||||||
seq_printf(p, "%*s: ", prec, "TLB");
|
seq_printf(p, "%*s: ", prec, "TLB");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
||||||
seq_printf(p, " TLB shootdowns\n");
|
seq_puts(p, " TLB shootdowns\n");
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||||
seq_printf(p, "%*s: ", prec, "TRM");
|
seq_printf(p, "%*s: ", prec, "TRM");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
||||||
seq_printf(p, " Thermal event interrupts\n");
|
seq_puts(p, " Thermal event interrupts\n");
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||||
seq_printf(p, "%*s: ", prec, "THR");
|
seq_printf(p, "%*s: ", prec, "THR");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
||||||
seq_printf(p, " Threshold APIC interrupts\n");
|
seq_puts(p, " Threshold APIC interrupts\n");
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_X86_MCE
|
#ifdef CONFIG_X86_MCE
|
||||||
seq_printf(p, "%*s: ", prec, "MCE");
|
seq_printf(p, "%*s: ", prec, "MCE");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
|
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
|
||||||
seq_printf(p, " Machine check exceptions\n");
|
seq_puts(p, " Machine check exceptions\n");
|
||||||
seq_printf(p, "%*s: ", prec, "MCP");
|
seq_printf(p, "%*s: ", prec, "MCP");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
||||||
seq_printf(p, " Machine check polls\n");
|
seq_puts(p, " Machine check polls\n");
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
|
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
|
||||||
seq_printf(p, "%*s: ", prec, "THR");
|
seq_printf(p, "%*s: ", prec, "THR");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
|
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
|
||||||
seq_printf(p, " Hypervisor callback interrupts\n");
|
seq_puts(p, " Hypervisor callback interrupts\n");
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
||||||
#if defined(CONFIG_X86_IO_APIC)
|
#if defined(CONFIG_X86_IO_APIC)
|
||||||
|
|
|
@ -824,7 +824,7 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
{
|
{
|
||||||
if (*pos == 0) {
|
if (*pos == 0) {
|
||||||
++*pos;
|
++*pos;
|
||||||
seq_printf(seq, "PAT memtype list:\n");
|
seq_puts(seq, "PAT memtype list:\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return memtype_get_idx(*pos);
|
return memtype_get_idx(*pos);
|
||||||
|
|
|
@ -1367,20 +1367,18 @@ static int ptc_seq_show(struct seq_file *file, void *data)
|
||||||
|
|
||||||
cpu = *(loff_t *)data;
|
cpu = *(loff_t *)data;
|
||||||
if (!cpu) {
|
if (!cpu) {
|
||||||
seq_printf(file,
|
seq_puts(file,
|
||||||
"# cpu bauoff sent stime self locals remotes ncpus localhub ");
|
"# cpu bauoff sent stime self locals remotes ncpus localhub ");
|
||||||
seq_printf(file,
|
seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
|
||||||
"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
|
seq_puts(file,
|
||||||
seq_printf(file,
|
"numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
|
||||||
"numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
|
seq_puts(file,
|
||||||
seq_printf(file,
|
"rok resetp resett giveup sto bz throt disable ");
|
||||||
"rok resetp resett giveup sto bz throt disable ");
|
seq_puts(file,
|
||||||
seq_printf(file,
|
"enable wars warshw warwaits enters ipidis plugged ");
|
||||||
"enable wars warshw warwaits enters ipidis plugged ");
|
seq_puts(file,
|
||||||
seq_printf(file,
|
"ipiover glim cong swack recv rtime all one mult ");
|
||||||
"ipiover glim cong swack recv rtime all one mult ");
|
seq_puts(file, "none retry canc nocan reset rcan\n");
|
||||||
seq_printf(file,
|
|
||||||
"none retry canc nocan reset rcan\n");
|
|
||||||
}
|
}
|
||||||
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
|
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
|
||||||
bcp = &per_cpu(bau_control, cpu);
|
bcp = &per_cpu(bau_control, cpu);
|
||||||
|
|
Loading…
Add table
Reference in a new issue