kfence: count unexpectedly skipped allocations

Maintain a counter to count allocations that are skipped due to being
incompatible (oversized, incompatible gfp flags) or no capacity.

This is to compute the fraction of allocations that could not be
serviced by KFENCE, which we expect to be rare.

Link: https://lkml.kernel.org/r/20210923104803.2620285-2-elver@google.com
Signed-off-by: Marco Elver <elver@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Acked-by: Alexander Potapenko <glider@google.com>
Cc: Aleksandr Nogikh <nogikh@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Taras Madan <tarasmadan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Marco Elver 2021-11-05 13:45:28 -07:00 committed by Linus Torvalds
parent f39f21b3dd
commit 9a19aeb566

View file

@ -112,6 +112,8 @@ enum kfence_counter_id {
KFENCE_COUNTER_FREES, KFENCE_COUNTER_FREES,
KFENCE_COUNTER_ZOMBIES, KFENCE_COUNTER_ZOMBIES,
KFENCE_COUNTER_BUGS, KFENCE_COUNTER_BUGS,
KFENCE_COUNTER_SKIP_INCOMPAT,
KFENCE_COUNTER_SKIP_CAPACITY,
KFENCE_COUNTER_COUNT, KFENCE_COUNTER_COUNT,
}; };
static atomic_long_t counters[KFENCE_COUNTER_COUNT]; static atomic_long_t counters[KFENCE_COUNTER_COUNT];
@ -121,6 +123,8 @@ static const char *const counter_names[] = {
[KFENCE_COUNTER_FREES] = "total frees", [KFENCE_COUNTER_FREES] = "total frees",
[KFENCE_COUNTER_ZOMBIES] = "zombie allocations", [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
[KFENCE_COUNTER_BUGS] = "total bugs", [KFENCE_COUNTER_BUGS] = "total bugs",
[KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
[KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
}; };
static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
@ -271,8 +275,10 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
list_del_init(&meta->list); list_del_init(&meta->list);
} }
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
if (!meta) if (!meta) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
return NULL; return NULL;
}
if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
/* /*
@ -740,8 +746,10 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
* Perform size check before switching kfence_allocation_gate, so that * Perform size check before switching kfence_allocation_gate, so that
* we don't disable KFENCE without making an allocation. * we don't disable KFENCE without making an allocation.
*/ */
if (size > PAGE_SIZE) if (size > PAGE_SIZE) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL; return NULL;
}
/* /*
* Skip allocations from non-default zones, including DMA. We cannot * Skip allocations from non-default zones, including DMA. We cannot
@ -749,8 +757,10 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
* properties (e.g. reside in DMAable memory). * properties (e.g. reside in DMAable memory).
*/ */
if ((flags & GFP_ZONEMASK) || if ((flags & GFP_ZONEMASK) ||
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL; return NULL;
}
/* /*
* allocation_gate only needs to become non-zero, so it doesn't make * allocation_gate only needs to become non-zero, so it doesn't make