mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
powerpc/64s: Disable preemption in hash lazy mmu mode
apply_to_page_range on kernel pages does not disable preemption, which is a requirement for hash's lazy mmu mode, which keeps track of the TLBs to flush with a per-cpu array. Reported-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221013151647.1857994-1-npiggin@gmail.com
This commit is contained in:
parent
b12eb279ff
commit
b9ef323ea1
1 changed files with 6 additions and 0 deletions
|
@ -32,6 +32,11 @@ static inline void arch_enter_lazy_mmu_mode(void)
|
||||||
|
|
||||||
if (radix_enabled())
|
if (radix_enabled())
|
||||||
return;
|
return;
|
||||||
|
/*
|
||||||
|
* apply_to_page_range can call us this preempt enabled when
|
||||||
|
* operating on kernel page tables.
|
||||||
|
*/
|
||||||
|
preempt_disable();
|
||||||
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
||||||
batch->active = 1;
|
batch->active = 1;
|
||||||
}
|
}
|
||||||
|
@ -47,6 +52,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||||
if (batch->index)
|
if (batch->index)
|
||||||
__flush_tlb_pending(batch);
|
__flush_tlb_pending(batch);
|
||||||
batch->active = 0;
|
batch->active = 0;
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
||||||
|
|
Loading…
Add table
Reference in a new issue