mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
memcg: make count_memcg_events re-entrant safe against irqs
Let's make count_memcg_events re-entrant safe against irqs. The only thing needed is to convert the usage of __this_cpu_add() to this_cpu_add(). In addition, with re-entrant safety, there is no need to disable irqs. Also add warnings for in_nmi() as it is not safe against nmi context. Link: https://lkml.kernel.org/r/20250514184158.3471331-5-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8814e3b869
commit
e52401e724
5 changed files with 19 additions and 36 deletions
|
@ -942,19 +942,8 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
unsigned long count);
|
||||
|
||||
static inline void count_memcg_events(struct mem_cgroup *memcg,
|
||||
enum vm_event_item idx,
|
||||
unsigned long count)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__count_memcg_events(memcg, idx, count);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
unsigned long count);
|
||||
|
||||
static inline void count_memcg_folio_events(struct folio *folio,
|
||||
enum vm_event_item idx, unsigned long nr)
|
||||
|
@ -1418,12 +1407,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
|
|||
}
|
||||
|
||||
static inline void count_memcg_events(struct mem_cgroup *memcg,
|
||||
enum vm_event_item idx,
|
||||
unsigned long count)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __count_memcg_events(struct mem_cgroup *memcg,
|
||||
enum vm_event_item idx,
|
||||
unsigned long count)
|
||||
{
|
||||
|
|
|
@ -512,9 +512,9 @@ static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
|
|||
{
|
||||
/* pagein of a big page is an event. So, ignore page size */
|
||||
if (nr_pages > 0)
|
||||
__count_memcg_events(memcg, PGPGIN, 1);
|
||||
count_memcg_events(memcg, PGPGIN, 1);
|
||||
else {
|
||||
__count_memcg_events(memcg, PGPGOUT, 1);
|
||||
count_memcg_events(memcg, PGPGOUT, 1);
|
||||
nr_pages = -nr_pages; /* for event */
|
||||
}
|
||||
|
||||
|
@ -689,7 +689,7 @@ void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
|
|||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__count_memcg_events(memcg, PGPGOUT, pgpgout);
|
||||
count_memcg_events(memcg, PGPGOUT, pgpgout);
|
||||
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
|
||||
memcg1_check_events(memcg, nid);
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -823,12 +823,12 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
|||
}
|
||||
|
||||
/**
|
||||
* __count_memcg_events - account VM events in a cgroup
|
||||
* count_memcg_events - account VM events in a cgroup
|
||||
* @memcg: the memory cgroup
|
||||
* @idx: the event item
|
||||
* @count: the number of events that occurred
|
||||
*/
|
||||
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
unsigned long count)
|
||||
{
|
||||
int i = memcg_events_index(idx);
|
||||
|
@ -842,7 +842,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
|||
|
||||
cpu = get_cpu();
|
||||
|
||||
__this_cpu_add(memcg->vmstats_percpu->events[i], count);
|
||||
this_cpu_add(memcg->vmstats_percpu->events[i], count);
|
||||
memcg_rstat_updated(memcg, count, cpu);
|
||||
trace_count_memcg_events(memcg, idx, count);
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ static void lru_activate(struct lruvec *lruvec, struct folio *folio)
|
|||
trace_mm_lru_activate(folio);
|
||||
|
||||
__count_vm_events(PGACTIVATE, nr_pages);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -581,7 +581,7 @@ static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
|
|||
|
||||
if (active) {
|
||||
__count_vm_events(PGDEACTIVATE, nr_pages);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
|
||||
nr_pages);
|
||||
}
|
||||
}
|
||||
|
@ -599,7 +599,7 @@ static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
|
|||
lruvec_add_folio(lruvec, folio);
|
||||
|
||||
__count_vm_events(PGDEACTIVATE, nr_pages);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
|
||||
}
|
||||
|
||||
static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
|
||||
|
@ -625,7 +625,7 @@ static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
|
|||
lruvec_add_folio(lruvec, folio);
|
||||
|
||||
__count_vm_events(PGLAZYFREE, nr_pages);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
14
mm/vmscan.c
14
mm/vmscan.c
|
@ -2028,7 +2028,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
|
|||
item = PGSCAN_KSWAPD + reclaimer_offset(sc);
|
||||
if (!cgroup_reclaim(sc))
|
||||
__count_vm_events(item, nr_scanned);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
|
||||
count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
|
||||
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
|
||||
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
@ -2048,7 +2048,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
|
|||
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
||||
if (!cgroup_reclaim(sc))
|
||||
__count_vm_events(item, nr_reclaimed);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
|
||||
count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
|
||||
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
||||
|
@ -2138,7 +2138,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|||
|
||||
if (!cgroup_reclaim(sc))
|
||||
__count_vm_events(PGREFILL, nr_scanned);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
|
||||
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
||||
|
@ -2195,7 +2195,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|||
nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
|
||||
|
||||
__count_vm_events(PGDEACTIVATE, nr_deactivate);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
|
||||
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
|
||||
|
||||
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
@ -4612,8 +4612,8 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
|
|||
__count_vm_events(item, isolated);
|
||||
__count_vm_events(PGREFILL, sorted);
|
||||
}
|
||||
__count_memcg_events(memcg, item, isolated);
|
||||
__count_memcg_events(memcg, PGREFILL, sorted);
|
||||
count_memcg_events(memcg, item, isolated);
|
||||
count_memcg_events(memcg, PGREFILL, sorted);
|
||||
__count_vm_events(PGSCAN_ANON + type, isolated);
|
||||
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
|
||||
scanned, skipped, isolated,
|
||||
|
@ -4763,7 +4763,7 @@ retry:
|
|||
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
||||
if (!cgroup_reclaim(sc))
|
||||
__count_vm_events(item, reclaimed);
|
||||
__count_memcg_events(memcg, item, reclaimed);
|
||||
count_memcg_events(memcg, item, reclaimed);
|
||||
__count_vm_events(PGSTEAL_ANON + type, reclaimed);
|
||||
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
|
Loading…
Add table
Reference in a new issue