linux/arch/riscv/kernel/suspend.c
Samuel Holland 1b57747e97
riscv: Enable cbo.zero only when all harts support Zicboz
Currently, we enable cbo.zero for usermode on each hart that supports
the Zicboz extension. This means that the [ms]envcfg CSR value may
differ between harts. Other features, such as pointer masking and CFI,
require setting [ms]envcfg bits on a per-thread basis. The combination
of these two adds quite some complexity and overhead to context
switching, as we would need to maintain two separate masks for the
per-hart and per-thread bits. Andrew Jones, who originally added Zicboz
support, writes[1][2]:

  I've approached Zicboz the same way I would approach all
  extensions, which is to be per-hart. I'm not currently aware of
  a platform that is / will be composed of harts where some have
  Zicboz and others don't, but there's nothing stopping a platform
  like that from being built.

  So, how about we add code that confirms Zicboz is on all harts.
  If any hart does not have it, then we complain loudly and disable
  it on all the other harts. If it was just a hardware description
  bug, then it'll get fixed. If there's actually a platform which
  doesn't have Zicboz on all harts, then, when the issue is reported,
  we can decide to not support it, support it with defconfig, or
  support it under a Kconfig guard which must be enabled by the user.

Let's follow his suggested solution and require the extension to be
available on all harts, so the envcfg CSR value does not need to change
when a thread migrates between harts. Since we are doing this for all
extensions with fields in envcfg, the CSR itself only needs to be saved/
restored when it is present on all harts.

This should not be a regression as no known hardware has asymmetric
Zicboz support, but if anyone reports seeing the warning, we will
re-evaluate our solution.

Link: https://lore.kernel.org/linux-riscv/20240322-168f191eeb8479b2ea169a5e@orel/ [1]
Link: https://lore.kernel.org/linux-riscv/20240323-28943722feb57a41fb0ff488@orel/ [2]
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
Reviewed-by: Deepak Gupta <debug@rivosinc.com>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
Tested-by: Charlie Jenkins <charlie@rivosinc.com>
Link: https://lore.kernel.org/r/20240814081126.956287-2-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-10-05 08:51:13 -07:00

183 lines
4.4 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "suspend: " fmt
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/suspend.h>
void suspend_save_csrs(struct suspend_context *context)
{
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG))
context->envcfg = csr_read(CSR_ENVCFG);
context->tvec = csr_read(CSR_TVEC);
context->ie = csr_read(CSR_IE);
/*
* No need to save/restore IP CSR (i.e. MIP or SIP) because:
*
* 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
* external devices (such as interrupt controller, timer, etc).
* 2. For MMU (S-mode) kernel, the bits in SIP are set by
* M-mode firmware and external devices (such as interrupt
* controller, etc).
*/
#ifdef CONFIG_MMU
context->satp = csr_read(CSR_SATP);
#endif
}
void suspend_restore_csrs(struct suspend_context *context)
{
csr_write(CSR_SCRATCH, 0);
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG))
csr_write(CSR_ENVCFG, context->envcfg);
csr_write(CSR_TVEC, context->tvec);
csr_write(CSR_IE, context->ie);
#ifdef CONFIG_MMU
csr_write(CSR_SATP, context->satp);
#endif
}
int cpu_suspend(unsigned long arg,
int (*finish)(unsigned long arg,
unsigned long entry,
unsigned long context))
{
int rc = 0;
struct suspend_context context = { 0 };
/* Finisher should be non-NULL */
if (!finish)
return -EINVAL;
/* Save additional CSRs*/
suspend_save_csrs(&context);
/*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka finishers) hence disable
* graph tracing during their execution.
*/
pause_graph_tracing();
/* Save context on stack */
if (__cpu_suspend_enter(&context)) {
/* Call the finisher */
rc = finish(arg, __pa_symbol(__cpu_resume_enter),
(ulong)&context);
/*
* Should never reach here, unless the suspend finisher
* fails. Successful cpu_suspend() should return from
* __cpu_resume_entry()
*/
if (!rc)
rc = -EOPNOTSUPP;
}
/* Enable function graph tracer */
unpause_graph_tracing();
/* Restore additional CSRs */
suspend_restore_csrs(&context);
return rc;
}
#ifdef CONFIG_RISCV_SBI
static int sbi_system_suspend(unsigned long sleep_type,
unsigned long resume_addr,
unsigned long opaque)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
sleep_type, resume_addr, opaque, 0, 0, 0);
if (ret.error)
return sbi_err_map_linux_errno(ret.error);
return ret.value;
}
static int sbi_system_suspend_enter(suspend_state_t state)
{
return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
}
static const struct platform_suspend_ops sbi_system_suspend_ops = {
.valid = suspend_valid_only_mem,
.enter = sbi_system_suspend_enter,
};
static int __init sbi_system_suspend_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_SUSP) > 0) {
pr_info("SBI SUSP extension detected\n");
if (IS_ENABLED(CONFIG_SUSPEND))
suspend_set_ops(&sbi_system_suspend_ops);
}
return 0;
}
arch_initcall(sbi_system_suspend_init);
static int sbi_suspend_finisher(unsigned long suspend_type,
unsigned long resume_addr,
unsigned long opaque)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
suspend_type, resume_addr, opaque, 0, 0, 0);
return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
}
int riscv_sbi_hart_suspend(u32 state)
{
if (state & SBI_HSM_SUSP_NON_RET_BIT)
return cpu_suspend(state, sbi_suspend_finisher);
else
return sbi_suspend_finisher(state, 0, 0);
}
bool riscv_sbi_suspend_state_is_valid(u32 state)
{
if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_RET_PLATFORM)
return false;
if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
return false;
return true;
}
bool riscv_sbi_hsm_is_supported(void)
{
/*
* The SBI HSM suspend function is only available when:
* 1) SBI version is 0.3 or higher
* 2) SBI HSM extension is available
*/
if (sbi_spec_version < sbi_mk_version(0, 3) ||
!sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
return false;
}
return true;
}
#endif /* CONFIG_RISCV_SBI */