LoongArch: vDSO: Switch to generic storage implementation

The generic storage implementation provides the same features as the
custom one. However it can be shared between architectures, making
maintenance easier.

Co-developed-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250204-vdso-store-rng-v3-10-13a4669dfc8c@linutronix.de
This commit is contained in:
Thomas Weißschuh 2025-02-04 13:05:42 +01:00 committed by Thomas Gleixner
parent 46fe55b204
commit d2862bb9d9
11 changed files with 39 additions and 177 deletions

View file

@ -30,6 +30,7 @@ config LOONGARCH
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
@ -106,6 +107,7 @@ config LOONGARCH
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select GPIOLIB
select HAS_IOPORT

View file

@ -31,7 +31,6 @@ struct loongarch_vdso_info {
unsigned long size;
unsigned long offset_sigreturn;
struct vm_special_mapping code_mapping;
struct vm_special_mapping data_mapping;
};
extern struct loongarch_vdso_info vdso_info;

View file

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _VDSO_ARCH_DATA_H
#define _VDSO_ARCH_DATA_H
#ifndef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/vdso.h>
struct vdso_pcpu_data {
u32 node;
} ____cacheline_aligned_in_smp;
struct vdso_arch_data {
struct vdso_pcpu_data pdata[NR_CPUS];
};
#endif /* __ASSEMBLY__ */
#endif

View file

@ -28,11 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
{
return &_loongarch_data.rng_data;
}
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETRANDOM_H */

View file

@ -72,7 +72,7 @@ static __always_inline int clock_getres_fallback(
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_data *vd)
const struct vdso_time_data *vd)
{
uint64_t count;
@ -89,18 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
return _vdso_data;
}
#ifdef CONFIG_TIME_NS
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return _timens_data;
}
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */

View file

@ -12,43 +12,9 @@
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/vdso.h>
#include <vdso/datapage.h>
struct vdso_pcpu_data {
u32 node;
} ____cacheline_aligned_in_smp;
struct loongarch_vdso_data {
struct vdso_pcpu_data pdata[NR_CPUS];
struct vdso_rng_data rng_data;
};
/*
* The layout of vvar:
*
* high
* +---------------------+--------------------------+
* | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE |
* +---------------------+--------------------------+
* | time-ns vdso data | PAGE_SIZE |
* +---------------------+--------------------------+
* | generic vdso data | PAGE_SIZE |
* +---------------------+--------------------------+
* low
*/
#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT)
enum vvar_pages {
VVAR_GENERIC_PAGE_OFFSET,
VVAR_TIMENS_PAGE_OFFSET,
VVAR_LOONGARCH_PAGES_START,
VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1,
VVAR_NR_PAGES,
};
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
extern struct loongarch_vdso_data _loongarch_data __attribute__((visibility("hidden")));
#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
#endif /* __ASSEMBLY__ */

View file

@ -6,23 +6,6 @@
#include <vdso/datapage.h>
extern struct vdso_data *vdso_data;
extern struct vdso_rng_data *vdso_rng_data;
static __always_inline
struct vdso_data *__loongarch_get_k_vdso_data(void)
{
return vdso_data;
}
#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
static __always_inline
struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void)
{
return vdso_rng_data;
}
#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>

View file

@ -315,6 +315,6 @@ static void __used output_vdso_defines(void)
{
COMMENT("LoongArch vDSO offsets.");
DEFINE(__VVAR_PAGES, VVAR_NR_PAGES);
DEFINE(__VDSO_PAGES, VDSO_NR_PAGES);
BLANK();
}

View file

@ -14,7 +14,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time_namespace.h>
#include <linux/vdso_datastore.h>
#include <asm/page.h>
#include <asm/vdso.h>
@ -25,18 +25,6 @@
extern char vdso_start[], vdso_end[];
/* Kernel-provided data used by the VDSO. */
static union vdso_data_store generic_vdso_data __page_aligned_data;
static union {
u8 page[LOONGARCH_VDSO_DATA_SIZE];
struct loongarch_vdso_data vdata;
} loongarch_vdso_data __page_aligned_data;
struct vdso_data *vdso_data = generic_vdso_data.data;
struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data;
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
current->mm->context.vdso = (void *)(new_vma->vm_start);
@ -44,53 +32,12 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
return 0;
}
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long pfn;
struct page *timens_page = find_timens_vvar_page(vma);
switch (vmf->pgoff) {
case VVAR_GENERIC_PAGE_OFFSET:
if (!timens_page)
pfn = sym_to_pfn(vdso_data);
else
pfn = page_to_pfn(timens_page);
break;
#ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET:
/*
* If a task belongs to a time namespace then a namespace specific
* VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
* VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
* See also the comment near timens_setup_vdso_data().
*/
if (!timens_page)
return VM_FAULT_SIGBUS;
else
pfn = sym_to_pfn(vdso_data);
break;
#endif /* CONFIG_TIME_NS */
case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
break;
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_pfn(vma, vmf->address, pfn);
}
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
.code_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
},
.data_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
},
.offset_sigreturn = vdso_offset_sigreturn,
};
@ -101,7 +48,7 @@ static int __init init_vdso(void)
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
for_each_possible_cpu(cpu)
vdso_pdata[cpu].node = cpu_to_node(cpu);
vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
vdso_info.code_mapping.pages =
@ -115,37 +62,6 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
return (struct vdso_data *)(vvar_page);
}
/*
* The vvar mapping contains data for a specific time namespace, so when a
* task changes namespace we must unmap its vvar data for the old namespace.
* Subsequent faults will map in data for the new namespace.
*
* For more details see timens_setup_vdso_data().
*/
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
zap_vma_pages(vma);
}
mmap_read_unlock(mm);
return 0;
}
#endif
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
@ -181,9 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
VM_READ | VM_MAYREAD | VM_PFNMAP,
&info->data_mapping);
vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;

View file

@ -5,6 +5,7 @@
*/
#include <asm/page.h>
#include <generated/asm-offsets.h>
#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
@ -12,11 +13,8 @@ OUTPUT_ARCH(loongarch)
SECTIONS
{
PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
#ifdef CONFIG_TIME_NS
PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
#endif
PROVIDE(_loongarch_data = _vdso_data + 2 * PAGE_SIZE);
VDSO_VVAR_SYMS
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text

View file

@ -19,27 +19,19 @@ static __always_inline int read_cpu_id(void)
return cpu_id;
}
static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
{
return _loongarch_data.pdata;
}
extern
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
if (node) {
data = get_pcpu_data();
*node = data[cpu_id].node;
}
if (node)
*node = vdso_u_arch_data.pdata[cpu_id].node;
return 0;
}