mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
sched/deadline: Make dl_rq->pushable_dl_tasks update drive dl_rq->overloaded
dl_rq->dl_nr_migratory is increased whenever a DL entity is enqueued and it has
nr_cpus_allowed > 1. Unlike the pushable_dl_tasks tree, dl_rq->dl_nr_migratory
includes a dl_rq's current task. This means a dl_rq can have a migratable
current, N non-migratable queued tasks, and be flagged as overloaded and have
its CPU set in the dlo_mask, despite having an empty pushable_tasks tree.
Make an dl_rq's overload logic be driven by {enqueue,dequeue}_pushable_dl_task(),
in other words make DL RQs only be flagged as overloaded if they have at
least one runnable-but-not-current migratable task.
o push_dl_task() is unaffected, as it is a no-op if there are no pushable
tasks.
o pull_dl_task() now no longer scans runqueues whose sole migratable task is
their current one, which it can't do anything about anyway.
It may also now pull tasks to a DL RQ with dl_nr_running > 1 if only its
current task is migratable.
Since dl_rq->dl_nr_migratory becomes unused, remove it.
RT had the exact same mechanism (rt_rq->rt_nr_migratory) which was dropped
in favour of relying on rt_rq->pushable_tasks, see:
612f769edd
("sched/rt: Make rt_rq->pushable_tasks updates drive rto_mask")
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lore.kernel.org/r/20230928150251.463109-1-vschneid@redhat.com
This commit is contained in:
parent
612f769edd
commit
5fe7765997
3 changed files with 14 additions and 45 deletions
|
@ -509,7 +509,6 @@ void init_dl_rq(struct dl_rq *dl_rq)
|
|||
/* zero means no -deadline tasks */
|
||||
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
|
||||
|
||||
dl_rq->dl_nr_migratory = 0;
|
||||
dl_rq->overloaded = 0;
|
||||
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
|
||||
#else
|
||||
|
@ -553,39 +552,6 @@ static inline void dl_clear_overload(struct rq *rq)
|
|||
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
|
||||
}
|
||||
|
||||
static void update_dl_migration(struct dl_rq *dl_rq)
|
||||
{
|
||||
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
|
||||
if (!dl_rq->overloaded) {
|
||||
dl_set_overload(rq_of_dl_rq(dl_rq));
|
||||
dl_rq->overloaded = 1;
|
||||
}
|
||||
} else if (dl_rq->overloaded) {
|
||||
dl_clear_overload(rq_of_dl_rq(dl_rq));
|
||||
dl_rq->overloaded = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory++;
|
||||
|
||||
update_dl_migration(dl_rq);
|
||||
}
|
||||
|
||||
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory--;
|
||||
|
||||
update_dl_migration(dl_rq);
|
||||
}
|
||||
|
||||
#define __node_2_pdl(node) \
|
||||
rb_entry((node), struct task_struct, pushable_dl_tasks)
|
||||
|
||||
|
@ -594,6 +560,11 @@ static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
|
|||
return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
|
||||
}
|
||||
|
||||
static inline int has_pushable_dl_tasks(struct rq *rq)
|
||||
{
|
||||
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
|
||||
}
|
||||
|
||||
/*
|
||||
* The list of pushable -deadline task is not a plist, like in
|
||||
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
|
||||
|
@ -609,6 +580,11 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
|
|||
__pushable_less);
|
||||
if (leftmost)
|
||||
rq->dl.earliest_dl.next = p->dl.deadline;
|
||||
|
||||
if (!rq->dl.overloaded) {
|
||||
dl_set_overload(rq);
|
||||
rq->dl.overloaded = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
|
||||
|
@ -625,11 +601,11 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
|
|||
dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
|
||||
|
||||
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
||||
}
|
||||
|
||||
static inline int has_pushable_dl_tasks(struct rq *rq)
|
||||
{
|
||||
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
|
||||
if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
|
||||
dl_clear_overload(rq);
|
||||
rq->dl.overloaded = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int push_dl_task(struct rq *rq);
|
||||
|
@ -1504,7 +1480,6 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|||
add_nr_running(rq_of_dl_rq(dl_rq), 1);
|
||||
|
||||
inc_dl_deadline(dl_rq, deadline);
|
||||
inc_dl_migration(dl_se, dl_rq);
|
||||
}
|
||||
|
||||
static inline
|
||||
|
@ -1518,7 +1493,6 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|||
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
|
||||
|
||||
dec_dl_deadline(dl_rq, dl_se->deadline);
|
||||
dec_dl_migration(dl_se, dl_rq);
|
||||
}
|
||||
|
||||
static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
|
||||
|
@ -2291,9 +2265,6 @@ static int push_dl_task(struct rq *rq)
|
|||
struct rq *later_rq;
|
||||
int ret = 0;
|
||||
|
||||
if (!rq->dl.overloaded)
|
||||
return 0;
|
||||
|
||||
next_task = pick_next_pushable_dl_task(rq);
|
||||
if (!next_task)
|
||||
return 0;
|
||||
|
|
|
@ -745,7 +745,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
|
|||
|
||||
PU(dl_nr_running);
|
||||
#ifdef CONFIG_SMP
|
||||
PU(dl_nr_migratory);
|
||||
dl_bw = &cpu_rq(cpu)->rd->dl_bw;
|
||||
#else
|
||||
dl_bw = &dl_rq->dl_bw;
|
||||
|
|
|
@ -707,7 +707,6 @@ struct dl_rq {
|
|||
u64 next;
|
||||
} earliest_dl;
|
||||
|
||||
unsigned int dl_nr_migratory;
|
||||
int overloaded;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue