linux/include/rv/ltl_monitor.h
Nam Cao 0a94925255 rv/ltl: Do not execute the Buchi automaton twice on start condition
On start condition of a Buchi automaton, the automaton is executed twice.

This is fine for now, as all the current LTL operators do not care about
this. But it would break the 'next' operator, which will be introduced in a
follow-up patch.

Prepare for the introduction of the 'next' operator, only execute the
automaton once on start condition.

Cc: John Ogness <john.ogness@linutronix.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Gabriele Monaco <gmonaco@redhat.com>
Link: https://lore.kernel.org/9379f4e7b9c1c69a6dca3e20a22936c850a25ca7.1752239482.git.namcao@linutronix.de
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
2025-07-22 20:34:41 -04:00

186 lines
4.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/**
* This file must be combined with the $(MODEL_NAME).h file generated by
* tools/verification/rvgen.
*/
#include <linux/args.h>
#include <linux/rv.h>
#include <linux/stringify.h>
#include <linux/seq_buf.h>
#include <rv/instrumentation.h>
#include <trace/events/task.h>
#include <trace/events/sched.h>
#ifndef MONITOR_NAME
#error "Please include $(MODEL_NAME).h generated by rvgen"
#endif
#ifdef CONFIG_RV_REACTORS
#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
static struct rv_monitor RV_MONITOR_NAME;
static void rv_cond_react(struct task_struct *task)
{
if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
return;
RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
task->comm, task->pid);
}
#else
static void rv_cond_react(struct task_struct *task)
{
}
#endif
static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
{
return &task->rv[ltl_monitor_slot].ltl_mon;
}
static void ltl_task_init(struct task_struct *task, bool task_creation)
{
struct ltl_monitor *mon = ltl_get_monitor(task);
memset(&mon->states, 0, sizeof(mon->states));
for (int i = 0; i < LTL_NUM_ATOM; ++i)
__set_bit(i, mon->unknown_atoms);
ltl_atoms_init(task, mon, task_creation);
ltl_atoms_fetch(task, mon);
}
static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
{
ltl_task_init(task, true);
}
static int ltl_monitor_init(void)
{
struct task_struct *g, *p;
int ret, cpu;
ret = rv_get_task_monitor_slot();
if (ret < 0)
return ret;
ltl_monitor_slot = ret;
rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
read_lock(&tasklist_lock);
for_each_process_thread(g, p)
ltl_task_init(p, false);
for_each_present_cpu(cpu)
ltl_task_init(idle_task(cpu), false);
read_unlock(&tasklist_lock);
return 0;
}
static void ltl_monitor_destroy(void)
{
rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
rv_put_task_monitor_slot(ltl_monitor_slot);
ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
}
static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
{
CONCATENATE(trace_error_, MONITOR_NAME)(task);
rv_cond_react(task);
}
static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
{
if (rv_ltl_all_atoms_known(mon))
ltl_start(task, mon);
}
static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
{
__clear_bit(atom, mon->unknown_atoms);
if (value)
__set_bit(atom, mon->atoms);
else
__clear_bit(atom, mon->atoms);
}
static void
ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
{
const char *format_str = "%s";
DECLARE_SEQ_BUF(atoms, 64);
char states[32], next[32];
int i;
if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
return;
snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
for (i = 0; i < LTL_NUM_ATOM; ++i) {
if (test_bit(i, mon->atoms)) {
seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
format_str = ",%s";
}
}
CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
}
static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
{
DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
if (!rv_ltl_valid_state(mon))
return;
for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
if (test_bit(i, mon->states))
ltl_possible_next_states(mon, i, next_states);
}
ltl_trace_event(task, mon, next_states);
memcpy(mon->states, next_states, sizeof(next_states));
if (!rv_ltl_valid_state(mon))
ltl_illegal_state(task, mon);
}
static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
{
struct ltl_monitor *mon = ltl_get_monitor(task);
ltl_atom_set(mon, atom, value);
ltl_atoms_fetch(task, mon);
if (!rv_ltl_valid_state(mon)) {
ltl_attempt_start(task, mon);
return;
}
ltl_validate(task, mon);
}
static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
{
struct ltl_monitor *mon = ltl_get_monitor(task);
ltl_atom_update(task, atom, value);
ltl_atom_set(mon, atom, !value);
ltl_validate(task, mon);
}