2019-05-29 07:18:00 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-10-25 14:30:32 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 SiFive
|
|
|
|
*/
|
|
|
|
|
2023-10-18 18:10:07 +05:30
|
|
|
#include <linux/acpi.h>
|
2022-10-21 11:52:39 +05:30
|
|
|
#include <linux/of.h>
|
2024-03-12 16:53:41 -07:00
|
|
|
#include <linux/prctl.h>
|
2023-10-18 18:10:07 +05:30
|
|
|
#include <asm/acpi.h>
|
2017-10-25 14:30:32 -07:00
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
2019-03-27 00:41:25 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#include <asm/sbi.h>
|
|
|
|
|
2019-10-28 13:10:36 +01:00
|
|
|
static void ipi_remote_fence_i(void *info)
|
|
|
|
{
|
|
|
|
return local_flush_icache_all();
|
|
|
|
}
|
|
|
|
|
2019-03-27 00:41:25 +00:00
|
|
|
void flush_icache_all(void)
|
|
|
|
{
|
2021-09-18 18:02:21 +02:00
|
|
|
local_flush_icache_all();
|
|
|
|
|
2024-03-26 21:49:46 -07:00
|
|
|
if (num_online_cpus() < 2)
|
|
|
|
return;
|
|
|
|
else if (riscv_use_sbi_for_rfence())
|
2019-10-28 13:10:36 +01:00
|
|
|
sbi_remote_fence_i(NULL);
|
|
|
|
else
|
|
|
|
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
2019-03-27 00:41:25 +00:00
|
|
|
}
|
2019-12-16 20:07:04 -08:00
|
|
|
EXPORT_SYMBOL(flush_icache_all);
|
2019-03-27 00:41:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Performs an icache flush for the given MM context. RISC-V has no direct
|
|
|
|
* mechanism for instruction cache shoot downs, so instead we send an IPI that
|
|
|
|
* informs the remote harts they need to flush their local instruction caches.
|
|
|
|
* To avoid pathologically slow behavior in a common case (a bunch of
|
|
|
|
* single-hart processes on a many-hart machine, ie 'make -j') we avoid the
|
|
|
|
* IPIs for harts that are not currently executing a MM context and instead
|
|
|
|
* schedule a deferred local instruction cache flush to be performed before
|
|
|
|
* execution resumes on each hart.
|
|
|
|
*/
|
|
|
|
void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
|
|
{
|
|
|
|
unsigned int cpu;
|
2019-10-28 13:10:36 +01:00
|
|
|
cpumask_t others, *mask;
|
2019-03-27 00:41:25 +00:00
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
/* Mark every hart's icache as needing a flush for this MM. */
|
|
|
|
mask = &mm->context.icache_stale_mask;
|
|
|
|
cpumask_setall(mask);
|
|
|
|
/* Flush this hart's I$ now, and mark it as flushed. */
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
cpumask_clear_cpu(cpu, mask);
|
|
|
|
local_flush_icache_all();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush the I$ of other harts concurrently executing, and mark them as
|
|
|
|
* flushed.
|
|
|
|
*/
|
|
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
|
|
local |= cpumask_empty(&others);
|
2019-10-28 13:10:36 +01:00
|
|
|
if (mm == current->active_mm && local) {
|
2019-03-27 00:41:25 +00:00
|
|
|
/*
|
|
|
|
* It's assumed that at least one strongly ordered operation is
|
|
|
|
* performed on this hart between setting a hart's cpumask bit
|
|
|
|
* and scheduling this MM context on that hart. Sending an SBI
|
|
|
|
* remote message will do this, but in the case where no
|
|
|
|
* messages are sent we still need to order this hart's writes
|
|
|
|
* with flush_icache_deferred().
|
|
|
|
*/
|
|
|
|
smp_mb();
|
riscv: Use IPIs for remote cache/TLB flushes by default
An IPI backend is always required in an SMP configuration, but an SBI
implementation is not. For example, SBI will be unavailable when the
kernel runs in M mode. For this reason, consider IPI delivery of cache
and TLB flushes to be the base case, and any other implementation (such
as the SBI remote fence extension) to be an optimization.
Generally, if IPIs can be delivered without firmware assistance, they
are assumed to be faster than SBI calls due to the SBI context switch
overhead. However, when SBI is used as the IPI backend, then the context
switch cost must be paid anyway, and performing the cache/TLB flush
directly in the SBI implementation is more efficient than injecting an
interrupt to S-mode. This is the only existing scenario where
riscv_ipi_set_virq_range() is called with use_for_rfence set to false.
sbi_ipi_init() already checks riscv_ipi_have_virq_range(), so it only
calls riscv_ipi_set_virq_range() when no other IPI device is available.
This allows moving the static key and dropping the use_for_rfence
parameter. This decouples the static key from the irqchip driver probe
order.
Furthermore, the static branch only makes sense when CONFIG_RISCV_SBI is
enabled. Optherwise, IPIs must be used. Add a fallback definition of
riscv_use_sbi_for_rfence() which handles this case and removes the need
to check CONFIG_RISCV_SBI elsewhere, such as in cacheflush.c.
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240327045035.368512-4-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-03-26 21:49:44 -07:00
|
|
|
} else if (riscv_use_sbi_for_rfence()) {
|
2022-01-20 01:09:18 -08:00
|
|
|
sbi_remote_fence_i(&others);
|
2019-10-28 13:10:36 +01:00
|
|
|
} else {
|
|
|
|
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
|
2019-03-27 00:41:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2019-10-28 13:10:41 +01:00
|
|
|
#ifdef CONFIG_MMU
|
2024-02-02 13:47:11 +01:00
|
|
|
void flush_icache_pte(struct mm_struct *mm, pte_t pte)
|
2017-10-25 14:30:32 -07:00
|
|
|
{
|
2023-08-02 16:13:50 +01:00
|
|
|
struct folio *folio = page_folio(pte_page(pte));
|
2017-10-25 14:30:32 -07:00
|
|
|
|
2023-08-02 16:13:50 +01:00
|
|
|
if (!test_bit(PG_dcache_clean, &folio->flags)) {
|
2024-02-02 13:47:11 +01:00
|
|
|
flush_icache_mm(mm, false);
|
2023-08-02 16:13:50 +01:00
|
|
|
set_bit(PG_dcache_clean, &folio->flags);
|
2023-01-26 22:53:06 -05:00
|
|
|
}
|
2017-10-25 14:30:32 -07:00
|
|
|
}
|
2019-10-28 13:10:41 +01:00
|
|
|
#endif /* CONFIG_MMU */
|
2022-10-21 11:52:39 +05:30
|
|
|
|
|
|
|
unsigned int riscv_cbom_block_size;
|
|
|
|
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
|
|
|
|
|
2023-02-24 17:26:27 +01:00
|
|
|
unsigned int riscv_cboz_block_size;
|
|
|
|
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
|
|
|
|
|
2023-06-15 00:55:03 +08:00
|
|
|
static void __init cbo_get_block_size(struct device_node *node,
|
|
|
|
const char *name, u32 *block_size,
|
|
|
|
unsigned long *first_hartid)
|
2022-10-21 11:52:39 +05:30
|
|
|
{
|
2023-02-24 17:26:25 +01:00
|
|
|
unsigned long hartid;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
if (riscv_of_processor_hartid(node, &hartid))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, name, &val))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!*block_size) {
|
|
|
|
*block_size = val;
|
|
|
|
*first_hartid = hartid;
|
|
|
|
} else if (*block_size != val) {
|
|
|
|
pr_warn("%s mismatched between harts %lu and %lu\n",
|
|
|
|
name, *first_hartid, hartid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-15 00:55:03 +08:00
|
|
|
void __init riscv_init_cbo_blocksizes(void)
|
2022-10-21 11:52:39 +05:30
|
|
|
{
|
2023-02-24 17:26:27 +01:00
|
|
|
unsigned long cbom_hartid, cboz_hartid;
|
|
|
|
u32 cbom_block_size = 0, cboz_block_size = 0;
|
2022-10-21 11:52:39 +05:30
|
|
|
struct device_node *node;
|
2023-10-18 18:10:07 +05:30
|
|
|
struct acpi_table_header *rhct;
|
|
|
|
acpi_status status;
|
|
|
|
|
|
|
|
if (acpi_disabled) {
|
|
|
|
for_each_of_cpu_node(node) {
|
|
|
|
/* set block-size for cbom and/or cboz extension if available */
|
|
|
|
cbo_get_block_size(node, "riscv,cbom-block-size",
|
|
|
|
&cbom_block_size, &cbom_hartid);
|
|
|
|
cbo_get_block_size(node, "riscv,cboz-block-size",
|
|
|
|
&cboz_block_size, &cboz_hartid);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return;
|
2022-10-21 11:52:39 +05:30
|
|
|
|
2023-10-18 18:10:07 +05:30
|
|
|
acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
|
|
|
|
acpi_put_table((struct acpi_table_header *)rhct);
|
2022-10-21 11:52:39 +05:30
|
|
|
}
|
|
|
|
|
2023-02-24 17:26:27 +01:00
|
|
|
if (cbom_block_size)
|
|
|
|
riscv_cbom_block_size = cbom_block_size;
|
|
|
|
|
|
|
|
if (cboz_block_size)
|
|
|
|
riscv_cboz_block_size = cboz_block_size;
|
2022-10-21 11:52:39 +05:30
|
|
|
}
|
2024-03-12 16:53:41 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void set_icache_stale_mask(void)
|
|
|
|
{
|
|
|
|
cpumask_t *mask;
|
|
|
|
bool stale_cpu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark every other hart's icache as needing a flush for
|
|
|
|
* this MM. Maintain the previous value of the current
|
|
|
|
* cpu to handle the case when this function is called
|
|
|
|
* concurrently on different harts.
|
|
|
|
*/
|
|
|
|
mask = ¤t->mm->context.icache_stale_mask;
|
|
|
|
stale_cpu = cpumask_test_cpu(smp_processor_id(), mask);
|
|
|
|
|
|
|
|
cpumask_setall(mask);
|
2024-03-12 16:53:43 -07:00
|
|
|
cpumask_assign_cpu(smp_processor_id(), mask, stale_cpu);
|
2024-03-12 16:53:41 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* riscv_set_icache_flush_ctx() - Enable/disable icache flushing instructions in
|
|
|
|
* userspace.
|
|
|
|
* @ctx: Set the type of icache flushing instructions permitted/prohibited in
|
|
|
|
* userspace. Supported values described below.
|
|
|
|
*
|
|
|
|
* Supported values for ctx:
|
|
|
|
*
|
|
|
|
* * %PR_RISCV_CTX_SW_FENCEI_ON: Allow fence.i in user space.
|
|
|
|
*
|
|
|
|
* * %PR_RISCV_CTX_SW_FENCEI_OFF: Disallow fence.i in user space. All threads in
|
|
|
|
* a process will be affected when ``scope == PR_RISCV_SCOPE_PER_PROCESS``.
|
|
|
|
* Therefore, caution must be taken; use this flag only when you can guarantee
|
|
|
|
* that no thread in the process will emit fence.i from this point onward.
|
|
|
|
*
|
|
|
|
* @scope: Set scope of where icache flushing instructions are allowed to be
|
|
|
|
* emitted. Supported values described below.
|
|
|
|
*
|
|
|
|
* Supported values for scope:
|
|
|
|
*
|
|
|
|
* * %PR_RISCV_SCOPE_PER_PROCESS: Ensure the icache of any thread in this process
|
|
|
|
* is coherent with instruction storage upon
|
|
|
|
* migration.
|
|
|
|
*
|
|
|
|
* * %PR_RISCV_SCOPE_PER_THREAD: Ensure the icache of the current thread is
|
|
|
|
* coherent with instruction storage upon
|
|
|
|
* migration.
|
|
|
|
*
|
|
|
|
* When ``scope == PR_RISCV_SCOPE_PER_PROCESS``, all threads in the process are
|
|
|
|
* permitted to emit icache flushing instructions. Whenever any thread in the
|
|
|
|
* process is migrated, the corresponding hart's icache will be guaranteed to be
|
|
|
|
* consistent with instruction storage. This does not enforce any guarantees
|
|
|
|
* outside of migration. If a thread modifies an instruction that another thread
|
|
|
|
* may attempt to execute, the other thread must still emit an icache flushing
|
|
|
|
* instruction before attempting to execute the potentially modified
|
|
|
|
* instruction. This must be performed by the user-space program.
|
|
|
|
*
|
|
|
|
* In per-thread context (eg. ``scope == PR_RISCV_SCOPE_PER_THREAD``) only the
|
|
|
|
* thread calling this function is permitted to emit icache flushing
|
|
|
|
* instructions. When the thread is migrated, the corresponding hart's icache
|
|
|
|
* will be guaranteed to be consistent with instruction storage.
|
|
|
|
*
|
|
|
|
* On kernels configured without SMP, this function is a nop as migrations
|
|
|
|
* across harts will not occur.
|
|
|
|
*/
|
|
|
|
int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
switch (ctx) {
|
|
|
|
case PR_RISCV_CTX_SW_FENCEI_ON:
|
|
|
|
switch (scope) {
|
|
|
|
case PR_RISCV_SCOPE_PER_PROCESS:
|
|
|
|
current->mm->context.force_icache_flush = true;
|
|
|
|
break;
|
|
|
|
case PR_RISCV_SCOPE_PER_THREAD:
|
|
|
|
current->thread.force_icache_flush = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PR_RISCV_CTX_SW_FENCEI_OFF:
|
|
|
|
switch (scope) {
|
|
|
|
case PR_RISCV_SCOPE_PER_PROCESS:
|
|
|
|
current->mm->context.force_icache_flush = false;
|
|
|
|
|
|
|
|
set_icache_stale_mask();
|
|
|
|
break;
|
|
|
|
case PR_RISCV_SCOPE_PER_THREAD:
|
|
|
|
current->thread.force_icache_flush = false;
|
|
|
|
|
|
|
|
set_icache_stale_mask();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
switch (ctx) {
|
|
|
|
case PR_RISCV_CTX_SW_FENCEI_ON:
|
|
|
|
case PR_RISCV_CTX_SW_FENCEI_OFF:
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|