mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
Probes updates for v6.7:
- cleanups: . kprobes: Fixes typo in kprobes samples. . tracing/eprobes: Remove 'break' after return. - kretprobe/fprobe performance improvements: . lib: Introduce new `objpool`, which is a high performance lockless object queue. This uses per-cpu ring array to allocate/release objects from the pre-allocated object pool. Since the index of ring array is a 32bit sequential counter, we can retry to push/pop the object pointer from the ring without lock (as seq-lock does). . lib: Add an objpool test module to test the functionality and evaluate the performance under some circumstances. . kprobes/fprobe: Improve kretprobe and rethook scalability performance with objpool. This improves both legacy kretprobe and fprobe exit handler (which is based on rethook) to be scalable on SMP systems. Even with 8-threads parallel test, it shows a great scalability improvement. . Remove unneeded freelist.h which is replaced by objpool. . objpool: Add maintainers entry for the objpool. . objpool: Fix to remove unused include header lines. -----BEGIN PGP SIGNATURE----- iQFPBAABCgA5FiEEh7BulGwFlgAOi5DV2/sHvwUrPxsFAmVA54obHG1hc2FtaS5o aXJhbWF0c3VAZ21haWwuY29tAAoJENv7B78FKz8busoH/3mG/rJwVVJw70zTLlfs ko4U1wn16aImYQYYLXkZLlYsKr6Y2dzNkb5C4CEI2r47EZjTamHatGZ6MSwvAtPb u9oloHEbRbE6yM+EjrE1JAKT9FwC+21/yZCN2zACZKJRwCwQRzxGIXUwGTWtDNdE NySLBDyMoR6zZJsFy8YueFBAJxcZdWIPK6mQH2Y5awVQA4tV7tQEe92KFqUYWTd5 exbfBbcVG8MBWmrPqRI46Hxh0NWOnPCqFwGqX8Q7hE/yrQnTPzJ+2ZsbYFkGRk6A pM5wRCdwO5+OlcHEcEHBMQSGCmFgk6m1UMG8RvbCKyF3cwHbxzlelbjzHosKQvSh EKQ= =/vZK -----END PGP SIGNATURE----- Merge tag 'probes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull probes updates from Masami Hiramatsu: "Cleanups: - kprobes: Fixes typo in kprobes samples - tracing/eprobes: Remove 'break' after return kretprobe/fprobe performance improvements: - lib: Introduce new `objpool`, which is a high performance lockless object queue. This uses per-cpu ring array to allocate/release objects from the pre-allocated object pool. Since the index of ring array is a 32bit sequential counter, we can retry to push/pop the object pointer from the ring without lock (as seq-lock does) - lib: Add an objpool test module to test the functionality and evaluate the performance under some circumstances - kprobes/fprobe: Improve kretprobe and rethook scalability performance with objpool. This improves both legacy kretprobe and fprobe exit handler (which is based on rethook) to be scalable on SMP systems. Even with 8-threads parallel test, it shows a great scalability improvement - Remove unneeded freelist.h which is replaced by objpool - objpool: Add maintainers entry for the objpool - objpool: Fix to remove unused include header lines" * tag 'probes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: kprobes: unused header files removed MAINTAINERS: objpool added kprobes: freelist.h removed kprobes: kretprobe scalability improvement lib: objpool test module added lib: objpool added: ring-array based lockless MPMC tracing/eprobe: drop unneeded breaks samples: kprobes: Fixes a typo
This commit is contained in:
commit
05bf73aa27
14 changed files with 1273 additions and 280 deletions
|
@ -15553,6 +15553,13 @@ F: include/linux/objagg.h
|
||||||
F: lib/objagg.c
|
F: lib/objagg.c
|
||||||
F: lib/test_objagg.c
|
F: lib/test_objagg.c
|
||||||
|
|
||||||
|
OBJPOOL
|
||||||
|
M: Matt Wu <wuqiang.matt@bytedance.com>
|
||||||
|
S: Supported
|
||||||
|
F: include/linux/objpool.h
|
||||||
|
F: lib/objpool.c
|
||||||
|
F: lib/test_objpool.c
|
||||||
|
|
||||||
OBJTOOL
|
OBJTOOL
|
||||||
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
M: Josh Poimboeuf <jpoimboe@kernel.org>
|
||||||
M: Peter Zijlstra <peterz@infradead.org>
|
M: Peter Zijlstra <peterz@infradead.org>
|
||||||
|
|
|
@ -1,129 +0,0 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
|
|
||||||
#ifndef FREELIST_H
|
|
||||||
#define FREELIST_H
|
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright: cameron@moodycamel.com
|
|
||||||
*
|
|
||||||
* A simple CAS-based lock-free free list. Not the fastest thing in the world
|
|
||||||
* under heavy contention, but simple and correct (assuming nodes are never
|
|
||||||
* freed until after the free list is destroyed), and fairly speedy under low
|
|
||||||
* contention.
|
|
||||||
*
|
|
||||||
* Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct freelist_node {
|
|
||||||
atomic_t refs;
|
|
||||||
struct freelist_node *next;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct freelist_head {
|
|
||||||
struct freelist_node *head;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define REFS_ON_FREELIST 0x80000000
|
|
||||||
#define REFS_MASK 0x7FFFFFFF
|
|
||||||
|
|
||||||
static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Since the refcount is zero, and nobody can increase it once it's
|
|
||||||
* zero (except us, and we run only one copy of this method per node at
|
|
||||||
* a time, i.e. the single thread case), then we know we can safely
|
|
||||||
* change the next pointer of the node; however, once the refcount is
|
|
||||||
* back above zero, then other threads could increase it (happens under
|
|
||||||
* heavy contention, when the refcount goes to zero in between a load
|
|
||||||
* and a refcount increment of a node in try_get, then back up to
|
|
||||||
* something non-zero, then the refcount increment is done by the other
|
|
||||||
* thread) -- so if the CAS to add the node to the actual list fails,
|
|
||||||
* decrese the refcount and leave the add operation to the next thread
|
|
||||||
* who puts the refcount back to zero (which could be us, hence the
|
|
||||||
* loop).
|
|
||||||
*/
|
|
||||||
struct freelist_node *head = READ_ONCE(list->head);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
WRITE_ONCE(node->next, head);
|
|
||||||
atomic_set_release(&node->refs, 1);
|
|
||||||
|
|
||||||
if (!try_cmpxchg_release(&list->head, &head, node)) {
|
|
||||||
/*
|
|
||||||
* Hmm, the add failed, but we can only try again when
|
|
||||||
* the refcount goes back to zero.
|
|
||||||
*/
|
|
||||||
if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* We know that the should-be-on-freelist bit is 0 at this point, so
|
|
||||||
* it's safe to set it using a fetch_add.
|
|
||||||
*/
|
|
||||||
if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
|
|
||||||
/*
|
|
||||||
* Oh look! We were the last ones referencing this node, and we
|
|
||||||
* know we want to add it to the free list, so let's do it!
|
|
||||||
*/
|
|
||||||
__freelist_add(node, list);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
|
|
||||||
{
|
|
||||||
struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
|
|
||||||
unsigned int refs;
|
|
||||||
|
|
||||||
while (head) {
|
|
||||||
prev = head;
|
|
||||||
refs = atomic_read(&head->refs);
|
|
||||||
if ((refs & REFS_MASK) == 0 ||
|
|
||||||
!atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
|
|
||||||
head = smp_load_acquire(&list->head);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Good, reference count has been incremented (it wasn't at
|
|
||||||
* zero), which means we can read the next and not worry about
|
|
||||||
* it changing between now and the time we do the CAS.
|
|
||||||
*/
|
|
||||||
next = READ_ONCE(head->next);
|
|
||||||
if (try_cmpxchg_acquire(&list->head, &head, next)) {
|
|
||||||
/*
|
|
||||||
* Yay, got the node. This means it was on the list,
|
|
||||||
* which means should-be-on-freelist must be false no
|
|
||||||
* matter the refcount (because nobody else knows it's
|
|
||||||
* been taken off yet, it can't have been put back on).
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Decrease refcount twice, once for our ref, and once
|
|
||||||
* for the list's ref.
|
|
||||||
*/
|
|
||||||
atomic_fetch_add(-2, &head->refs);
|
|
||||||
|
|
||||||
return head;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* OK, the head must have changed on us, but we still need to decrement
|
|
||||||
* the refcount we increased.
|
|
||||||
*/
|
|
||||||
refs = atomic_fetch_add(-1, &prev->refs);
|
|
||||||
if (refs == REFS_ON_FREELIST + 1)
|
|
||||||
__freelist_add(prev, list);
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* FREELIST_H */
|
|
|
@ -26,8 +26,7 @@
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
#include <linux/refcount.h>
|
#include <linux/objpool.h>
|
||||||
#include <linux/freelist.h>
|
|
||||||
#include <linux/rethook.h>
|
#include <linux/rethook.h>
|
||||||
#include <asm/kprobes.h>
|
#include <asm/kprobes.h>
|
||||||
|
|
||||||
|
@ -141,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
|
||||||
*/
|
*/
|
||||||
struct kretprobe_holder {
|
struct kretprobe_holder {
|
||||||
struct kretprobe *rp;
|
struct kretprobe *rp;
|
||||||
refcount_t ref;
|
struct objpool_head pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kretprobe {
|
struct kretprobe {
|
||||||
|
@ -154,7 +153,6 @@ struct kretprobe {
|
||||||
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
||||||
struct rethook *rh;
|
struct rethook *rh;
|
||||||
#else
|
#else
|
||||||
struct freelist_head freelist;
|
|
||||||
struct kretprobe_holder *rph;
|
struct kretprobe_holder *rph;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -165,10 +163,7 @@ struct kretprobe_instance {
|
||||||
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
||||||
struct rethook_node node;
|
struct rethook_node node;
|
||||||
#else
|
#else
|
||||||
union {
|
struct rcu_head rcu;
|
||||||
struct freelist_node freelist;
|
|
||||||
struct rcu_head rcu;
|
|
||||||
};
|
|
||||||
struct llist_node llist;
|
struct llist_node llist;
|
||||||
struct kretprobe_holder *rph;
|
struct kretprobe_holder *rph;
|
||||||
kprobe_opcode_t *ret_addr;
|
kprobe_opcode_t *ret_addr;
|
||||||
|
|
181
include/linux/objpool.h
Normal file
181
include/linux/objpool.h
Normal file
|
@ -0,0 +1,181 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef _LINUX_OBJPOOL_H
|
||||||
|
#define _LINUX_OBJPOOL_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/refcount.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* objpool: ring-array based lockless MPMC queue
|
||||||
|
*
|
||||||
|
* Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
|
||||||
|
*
|
||||||
|
* objpool is a scalable implementation of high performance queue for
|
||||||
|
* object allocation and reclamation, such as kretprobe instances.
|
||||||
|
*
|
||||||
|
* With leveraging percpu ring-array to mitigate hot spots of memory
|
||||||
|
* contention, it delivers near-linear scalability for high parallel
|
||||||
|
* scenarios. The objpool is best suited for the following cases:
|
||||||
|
* 1) Memory allocation or reclamation are prohibited or too expensive
|
||||||
|
* 2) Consumers are of different priorities, such as irqs and threads
|
||||||
|
*
|
||||||
|
* Limitations:
|
||||||
|
* 1) Maximum objects (capacity) is fixed after objpool creation
|
||||||
|
* 2) All pre-allocated objects are managed in percpu ring array,
|
||||||
|
* which consumes more memory than linked lists
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct objpool_slot - percpu ring array of objpool
|
||||||
|
* @head: head sequence of the local ring array (to retrieve at)
|
||||||
|
* @tail: tail sequence of the local ring array (to append at)
|
||||||
|
* @last: the last sequence number marked as ready for retrieve
|
||||||
|
* @mask: bits mask for modulo capacity to compute array indexes
|
||||||
|
* @entries: object entries on this slot
|
||||||
|
*
|
||||||
|
* Represents a cpu-local array-based ring buffer, its size is specialized
|
||||||
|
* during initialization of object pool. The percpu objpool node is to be
|
||||||
|
* allocated from local memory for NUMA system, and to be kept compact in
|
||||||
|
* continuous memory: CPU assigned number of objects are stored just after
|
||||||
|
* the body of objpool_node.
|
||||||
|
*
|
||||||
|
* Real size of the ring array is far too smaller than the value range of
|
||||||
|
* head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask)
|
||||||
|
* of head and tail are used as the actual position in the ring array. In
|
||||||
|
* general the ring array is acting like a small sliding window, which is
|
||||||
|
* always moving forward in the loop of [0, 2^32).
|
||||||
|
*/
|
||||||
|
struct objpool_slot {
|
||||||
|
uint32_t head;
|
||||||
|
uint32_t tail;
|
||||||
|
uint32_t last;
|
||||||
|
uint32_t mask;
|
||||||
|
void *entries[];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct objpool_head;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* caller-specified callback for object initial setup, it's only called
|
||||||
|
* once for each object (just after the memory allocation of the object)
|
||||||
|
*/
|
||||||
|
typedef int (*objpool_init_obj_cb)(void *obj, void *context);
|
||||||
|
|
||||||
|
/* caller-specified cleanup callback for objpool destruction */
|
||||||
|
typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct objpool_head - object pooling metadata
|
||||||
|
* @obj_size: object size, aligned to sizeof(void *)
|
||||||
|
* @nr_objs: total objs (to be pre-allocated with objpool)
|
||||||
|
* @nr_cpus: local copy of nr_cpu_ids
|
||||||
|
* @capacity: max objs can be managed by one objpool_slot
|
||||||
|
* @gfp: gfp flags for kmalloc & vmalloc
|
||||||
|
* @ref: refcount of objpool
|
||||||
|
* @flags: flags for objpool management
|
||||||
|
* @cpu_slots: pointer to the array of objpool_slot
|
||||||
|
* @release: resource cleanup callback
|
||||||
|
* @context: caller-provided context
|
||||||
|
*/
|
||||||
|
struct objpool_head {
|
||||||
|
int obj_size;
|
||||||
|
int nr_objs;
|
||||||
|
int nr_cpus;
|
||||||
|
int capacity;
|
||||||
|
gfp_t gfp;
|
||||||
|
refcount_t ref;
|
||||||
|
unsigned long flags;
|
||||||
|
struct objpool_slot **cpu_slots;
|
||||||
|
objpool_fini_cb release;
|
||||||
|
void *context;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */
|
||||||
|
#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_init() - initialize objpool and pre-allocated objects
|
||||||
|
* @pool: the object pool to be initialized, declared by caller
|
||||||
|
* @nr_objs: total objects to be pre-allocated by this object pool
|
||||||
|
* @object_size: size of an object (should be > 0)
|
||||||
|
* @gfp: flags for memory allocation (via kmalloc or vmalloc)
|
||||||
|
* @context: user context for object initialization callback
|
||||||
|
* @objinit: object initialization callback for extra setup
|
||||||
|
* @release: cleanup callback for extra cleanup task
|
||||||
|
*
|
||||||
|
* return value: 0 for success, otherwise error code
|
||||||
|
*
|
||||||
|
* All pre-allocated objects are to be zeroed after memory allocation.
|
||||||
|
* Caller could do extra initialization in objinit callback. objinit()
|
||||||
|
* will be called just after slot allocation and called only once for
|
||||||
|
* each object. After that the objpool won't touch any content of the
|
||||||
|
* objects. It's caller's duty to perform reinitialization after each
|
||||||
|
* pop (object allocation) or do clearance before each push (object
|
||||||
|
* reclamation).
|
||||||
|
*/
|
||||||
|
int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
|
||||||
|
gfp_t gfp, void *context, objpool_init_obj_cb objinit,
|
||||||
|
objpool_fini_cb release);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_pop() - allocate an object from objpool
|
||||||
|
* @pool: object pool
|
||||||
|
*
|
||||||
|
* return value: object ptr or NULL if failed
|
||||||
|
*/
|
||||||
|
void *objpool_pop(struct objpool_head *pool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_push() - reclaim the object and return back to objpool
|
||||||
|
* @obj: object ptr to be pushed to objpool
|
||||||
|
* @pool: object pool
|
||||||
|
*
|
||||||
|
* return: 0 or error code (it fails only when user tries to push
|
||||||
|
* the same object multiple times or wrong "objects" into objpool)
|
||||||
|
*/
|
||||||
|
int objpool_push(void *obj, struct objpool_head *pool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_drop() - discard the object and deref objpool
|
||||||
|
* @obj: object ptr to be discarded
|
||||||
|
* @pool: object pool
|
||||||
|
*
|
||||||
|
* return: 0 if objpool was released; -EAGAIN if there are still
|
||||||
|
* outstanding objects
|
||||||
|
*
|
||||||
|
* objpool_drop is normally for the release of outstanding objects
|
||||||
|
* after objpool cleanup (objpool_fini). Thinking of this example:
|
||||||
|
* kretprobe is unregistered and objpool_fini() is called to release
|
||||||
|
* all remained objects, but there are still objects being used by
|
||||||
|
* unfinished kretprobes (like blockable function: sys_accept). So
|
||||||
|
* only when the last outstanding object is dropped could the whole
|
||||||
|
* objpool be released along with the call of objpool_drop()
|
||||||
|
*/
|
||||||
|
int objpool_drop(void *obj, struct objpool_head *pool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_free() - release objpool forcely (all objects to be freed)
|
||||||
|
* @pool: object pool to be released
|
||||||
|
*/
|
||||||
|
void objpool_free(struct objpool_head *pool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* objpool_fini() - deref object pool (also releasing unused objects)
|
||||||
|
* @pool: object pool to be dereferenced
|
||||||
|
*
|
||||||
|
* objpool_fini() will try to release all remained free objects and
|
||||||
|
* then drop an extra reference of the objpool. If all objects are
|
||||||
|
* already returned to objpool (so called synchronous use cases),
|
||||||
|
* the objpool itself will be freed together. But if there are still
|
||||||
|
* outstanding objects (so called asynchronous use cases, such like
|
||||||
|
* blockable kretprobe), the objpool won't be released until all
|
||||||
|
* the outstanding objects are dropped, but the caller must assure
|
||||||
|
* there are no concurrent objpool_push() on the fly. Normally RCU
|
||||||
|
* is being required to make sure all ongoing objpool_push() must
|
||||||
|
* be finished before calling objpool_fini(), so does test_objpool,
|
||||||
|
* kretprobe or rethook
|
||||||
|
*/
|
||||||
|
void objpool_fini(struct objpool_head *pool);
|
||||||
|
|
||||||
|
#endif /* _LINUX_OBJPOOL_H */
|
|
@ -6,11 +6,10 @@
|
||||||
#define _LINUX_RETHOOK_H
|
#define _LINUX_RETHOOK_H
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/freelist.h>
|
#include <linux/objpool.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
#include <linux/llist.h>
|
#include <linux/llist.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/refcount.h>
|
|
||||||
|
|
||||||
struct rethook_node;
|
struct rethook_node;
|
||||||
|
|
||||||
|
@ -30,14 +29,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long,
|
||||||
struct rethook {
|
struct rethook {
|
||||||
void *data;
|
void *data;
|
||||||
rethook_handler_t handler;
|
rethook_handler_t handler;
|
||||||
struct freelist_head pool;
|
struct objpool_head pool;
|
||||||
refcount_t ref;
|
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct rethook_node - The rethook shadow-stack entry node.
|
* struct rethook_node - The rethook shadow-stack entry node.
|
||||||
* @freelist: The freelist, linked to struct rethook::pool.
|
|
||||||
* @rcu: The rcu_head for deferred freeing.
|
* @rcu: The rcu_head for deferred freeing.
|
||||||
* @llist: The llist, linked to a struct task_struct::rethooks.
|
* @llist: The llist, linked to a struct task_struct::rethooks.
|
||||||
* @rethook: The pointer to the struct rethook.
|
* @rethook: The pointer to the struct rethook.
|
||||||
|
@ -48,20 +45,16 @@ struct rethook {
|
||||||
* on each entry of the shadow stack.
|
* on each entry of the shadow stack.
|
||||||
*/
|
*/
|
||||||
struct rethook_node {
|
struct rethook_node {
|
||||||
union {
|
struct rcu_head rcu;
|
||||||
struct freelist_node freelist;
|
|
||||||
struct rcu_head rcu;
|
|
||||||
};
|
|
||||||
struct llist_node llist;
|
struct llist_node llist;
|
||||||
struct rethook *rethook;
|
struct rethook *rethook;
|
||||||
unsigned long ret_addr;
|
unsigned long ret_addr;
|
||||||
unsigned long frame;
|
unsigned long frame;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
|
struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num);
|
||||||
void rethook_stop(struct rethook *rh);
|
void rethook_stop(struct rethook *rh);
|
||||||
void rethook_free(struct rethook *rh);
|
void rethook_free(struct rethook *rh);
|
||||||
void rethook_add_node(struct rethook *rh, struct rethook_node *node);
|
|
||||||
struct rethook_node *rethook_try_get(struct rethook *rh);
|
struct rethook_node *rethook_try_get(struct rethook *rh);
|
||||||
void rethook_recycle(struct rethook_node *node);
|
void rethook_recycle(struct rethook_node *node);
|
||||||
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
|
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
|
||||||
|
@ -98,4 +91,3 @@ void rethook_flush_task(struct task_struct *tk);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1877,13 +1877,27 @@ static struct notifier_block kprobe_exceptions_nb = {
|
||||||
#ifdef CONFIG_KRETPROBES
|
#ifdef CONFIG_KRETPROBES
|
||||||
|
|
||||||
#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
|
#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
|
||||||
|
|
||||||
|
/* callbacks for objpool of kretprobe instances */
|
||||||
|
static int kretprobe_init_inst(void *nod, void *context)
|
||||||
|
{
|
||||||
|
struct kretprobe_instance *ri = nod;
|
||||||
|
|
||||||
|
ri->rph = context;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
static int kretprobe_fini_pool(struct objpool_head *head, void *context)
|
||||||
|
{
|
||||||
|
kfree(context);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void free_rp_inst_rcu(struct rcu_head *head)
|
static void free_rp_inst_rcu(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
|
struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
|
||||||
|
struct kretprobe_holder *rph = ri->rph;
|
||||||
|
|
||||||
if (refcount_dec_and_test(&ri->rph->ref))
|
objpool_drop(ri, &rph->pool);
|
||||||
kfree(ri->rph);
|
|
||||||
kfree(ri);
|
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(free_rp_inst_rcu);
|
NOKPROBE_SYMBOL(free_rp_inst_rcu);
|
||||||
|
|
||||||
|
@ -1892,7 +1906,7 @@ static void recycle_rp_inst(struct kretprobe_instance *ri)
|
||||||
struct kretprobe *rp = get_kretprobe(ri);
|
struct kretprobe *rp = get_kretprobe(ri);
|
||||||
|
|
||||||
if (likely(rp))
|
if (likely(rp))
|
||||||
freelist_add(&ri->freelist, &rp->freelist);
|
objpool_push(ri, &rp->rph->pool);
|
||||||
else
|
else
|
||||||
call_rcu(&ri->rcu, free_rp_inst_rcu);
|
call_rcu(&ri->rcu, free_rp_inst_rcu);
|
||||||
}
|
}
|
||||||
|
@ -1929,23 +1943,12 @@ NOKPROBE_SYMBOL(kprobe_flush_task);
|
||||||
|
|
||||||
static inline void free_rp_inst(struct kretprobe *rp)
|
static inline void free_rp_inst(struct kretprobe *rp)
|
||||||
{
|
{
|
||||||
struct kretprobe_instance *ri;
|
struct kretprobe_holder *rph = rp->rph;
|
||||||
struct freelist_node *node;
|
|
||||||
int count = 0;
|
|
||||||
|
|
||||||
node = rp->freelist.head;
|
if (!rph)
|
||||||
while (node) {
|
return;
|
||||||
ri = container_of(node, struct kretprobe_instance, freelist);
|
rp->rph = NULL;
|
||||||
node = node->next;
|
objpool_fini(&rph->pool);
|
||||||
|
|
||||||
kfree(ri);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (refcount_sub_and_test(count, &rp->rph->ref)) {
|
|
||||||
kfree(rp->rph);
|
|
||||||
rp->rph = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This assumes the 'tsk' is the current task or the is not running. */
|
/* This assumes the 'tsk' is the current task or the is not running. */
|
||||||
|
@ -2087,19 +2090,17 @@ NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
|
||||||
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
|
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
|
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
|
||||||
|
struct kretprobe_holder *rph = rp->rph;
|
||||||
struct kretprobe_instance *ri;
|
struct kretprobe_instance *ri;
|
||||||
struct freelist_node *fn;
|
|
||||||
|
|
||||||
fn = freelist_try_get(&rp->freelist);
|
ri = objpool_pop(&rph->pool);
|
||||||
if (!fn) {
|
if (!ri) {
|
||||||
rp->nmissed++;
|
rp->nmissed++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ri = container_of(fn, struct kretprobe_instance, freelist);
|
|
||||||
|
|
||||||
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
|
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
|
||||||
freelist_add(&ri->freelist, &rp->freelist);
|
objpool_push(ri, &rph->pool);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2193,7 +2194,6 @@ int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long o
|
||||||
int register_kretprobe(struct kretprobe *rp)
|
int register_kretprobe(struct kretprobe *rp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct kretprobe_instance *inst;
|
|
||||||
int i;
|
int i;
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
|
@ -2227,19 +2227,12 @@ int register_kretprobe(struct kretprobe *rp)
|
||||||
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
|
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
|
||||||
|
|
||||||
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
||||||
rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler);
|
rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
|
||||||
if (!rp->rh)
|
sizeof(struct kretprobe_instance) +
|
||||||
return -ENOMEM;
|
rp->data_size, rp->maxactive);
|
||||||
|
if (IS_ERR(rp->rh))
|
||||||
|
return PTR_ERR(rp->rh);
|
||||||
|
|
||||||
for (i = 0; i < rp->maxactive; i++) {
|
|
||||||
inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL);
|
|
||||||
if (inst == NULL) {
|
|
||||||
rethook_free(rp->rh);
|
|
||||||
rp->rh = NULL;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
rethook_add_node(rp->rh, &inst->node);
|
|
||||||
}
|
|
||||||
rp->nmissed = 0;
|
rp->nmissed = 0;
|
||||||
/* Establish function entry probe point */
|
/* Establish function entry probe point */
|
||||||
ret = register_kprobe(&rp->kp);
|
ret = register_kprobe(&rp->kp);
|
||||||
|
@ -2248,24 +2241,18 @@ int register_kretprobe(struct kretprobe *rp)
|
||||||
rp->rh = NULL;
|
rp->rh = NULL;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_KRETPROBE_ON_RETHOOK */
|
#else /* !CONFIG_KRETPROBE_ON_RETHOOK */
|
||||||
rp->freelist.head = NULL;
|
|
||||||
rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
|
rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
|
||||||
if (!rp->rph)
|
if (!rp->rph)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
rp->rph->rp = rp;
|
if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
|
||||||
for (i = 0; i < rp->maxactive; i++) {
|
sizeof(struct kretprobe_instance), GFP_KERNEL,
|
||||||
inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL);
|
rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
|
||||||
if (inst == NULL) {
|
kfree(rp->rph);
|
||||||
refcount_set(&rp->rph->ref, i);
|
rp->rph = NULL;
|
||||||
free_rp_inst(rp);
|
return -ENOMEM;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
inst->rph = rp->rph;
|
|
||||||
freelist_add(&inst->freelist, &rp->freelist);
|
|
||||||
}
|
}
|
||||||
refcount_set(&rp->rph->ref, i);
|
rp->rph->rp = rp;
|
||||||
|
|
||||||
rp->nmissed = 0;
|
rp->nmissed = 0;
|
||||||
/* Establish function entry probe point */
|
/* Establish function entry probe point */
|
||||||
ret = register_kprobe(&rp->kp);
|
ret = register_kprobe(&rp->kp);
|
||||||
|
|
|
@ -187,7 +187,7 @@ static void fprobe_init(struct fprobe *fp)
|
||||||
|
|
||||||
static int fprobe_init_rethook(struct fprobe *fp, int num)
|
static int fprobe_init_rethook(struct fprobe *fp, int num)
|
||||||
{
|
{
|
||||||
int i, size;
|
int size;
|
||||||
|
|
||||||
if (num <= 0)
|
if (num <= 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -205,26 +205,18 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
|
||||||
if (size <= 0)
|
if (size <= 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
|
/* Initialize rethook */
|
||||||
if (!fp->rethook)
|
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler,
|
||||||
return -ENOMEM;
|
sizeof(struct fprobe_rethook_node), size);
|
||||||
for (i = 0; i < size; i++) {
|
if (IS_ERR(fp->rethook))
|
||||||
struct fprobe_rethook_node *node;
|
return PTR_ERR(fp->rethook);
|
||||||
|
|
||||||
node = kzalloc(sizeof(*node) + fp->entry_data_size, GFP_KERNEL);
|
|
||||||
if (!node) {
|
|
||||||
rethook_free(fp->rethook);
|
|
||||||
fp->rethook = NULL;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
rethook_add_node(fp->rethook, &node->node);
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fprobe_fail_cleanup(struct fprobe *fp)
|
static void fprobe_fail_cleanup(struct fprobe *fp)
|
||||||
{
|
{
|
||||||
if (fp->rethook) {
|
if (!IS_ERR_OR_NULL(fp->rethook)) {
|
||||||
/* Don't need to cleanup rethook->handler because this is not used. */
|
/* Don't need to cleanup rethook->handler because this is not used. */
|
||||||
rethook_free(fp->rethook);
|
rethook_free(fp->rethook);
|
||||||
fp->rethook = NULL;
|
fp->rethook = NULL;
|
||||||
|
@ -379,14 +371,14 @@ int unregister_fprobe(struct fprobe *fp)
|
||||||
if (!fprobe_is_registered(fp))
|
if (!fprobe_is_registered(fp))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (fp->rethook)
|
if (!IS_ERR_OR_NULL(fp->rethook))
|
||||||
rethook_stop(fp->rethook);
|
rethook_stop(fp->rethook);
|
||||||
|
|
||||||
ret = unregister_ftrace_function(&fp->ops);
|
ret = unregister_ftrace_function(&fp->ops);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (fp->rethook)
|
if (!IS_ERR_OR_NULL(fp->rethook))
|
||||||
rethook_free(fp->rethook);
|
rethook_free(fp->rethook);
|
||||||
|
|
||||||
ftrace_free_filter(&fp->ops);
|
ftrace_free_filter(&fp->ops);
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
#include <linux/rethook.h>
|
#include <linux/rethook.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sort.h>
|
|
||||||
|
|
||||||
/* Return hook list (shadow stack by list) */
|
/* Return hook list (shadow stack by list) */
|
||||||
|
|
||||||
|
@ -36,21 +35,7 @@ void rethook_flush_task(struct task_struct *tk)
|
||||||
static void rethook_free_rcu(struct rcu_head *head)
|
static void rethook_free_rcu(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct rethook *rh = container_of(head, struct rethook, rcu);
|
struct rethook *rh = container_of(head, struct rethook, rcu);
|
||||||
struct rethook_node *rhn;
|
objpool_fini(&rh->pool);
|
||||||
struct freelist_node *node;
|
|
||||||
int count = 1;
|
|
||||||
|
|
||||||
node = rh->pool.head;
|
|
||||||
while (node) {
|
|
||||||
rhn = container_of(node, struct rethook_node, freelist);
|
|
||||||
node = node->next;
|
|
||||||
kfree(rhn);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The rh->ref is the number of pooled node + 1 */
|
|
||||||
if (refcount_sub_and_test(count, &rh->ref))
|
|
||||||
kfree(rh);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -83,54 +68,62 @@ void rethook_free(struct rethook *rh)
|
||||||
call_rcu(&rh->rcu, rethook_free_rcu);
|
call_rcu(&rh->rcu, rethook_free_rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static int rethook_init_node(void *nod, void *context)
|
||||||
* rethook_alloc() - Allocate struct rethook.
|
|
||||||
* @data: a data to pass the @handler when hooking the return.
|
|
||||||
* @handler: the return hook callback function.
|
|
||||||
*
|
|
||||||
* Allocate and initialize a new rethook with @data and @handler.
|
|
||||||
* Return NULL if memory allocation fails or @handler is NULL.
|
|
||||||
* Note that @handler == NULL means this rethook is going to be freed.
|
|
||||||
*/
|
|
||||||
struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
|
|
||||||
{
|
{
|
||||||
struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
|
struct rethook_node *node = nod;
|
||||||
|
|
||||||
if (!rh || !handler) {
|
node->rethook = context;
|
||||||
kfree(rh);
|
return 0;
|
||||||
return NULL;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
rh->data = data;
|
static int rethook_fini_pool(struct objpool_head *head, void *context)
|
||||||
rh->handler = handler;
|
{
|
||||||
rh->pool.head = NULL;
|
kfree(context);
|
||||||
refcount_set(&rh->ref, 1);
|
return 0;
|
||||||
|
|
||||||
return rh;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rethook_add_node() - Add a new node to the rethook.
|
* rethook_alloc() - Allocate struct rethook.
|
||||||
* @rh: the struct rethook.
|
* @data: a data to pass the @handler when hooking the return.
|
||||||
* @node: the struct rethook_node to be added.
|
* @handler: the return hook callback function, must NOT be NULL
|
||||||
|
* @size: node size: rethook node and additional data
|
||||||
|
* @num: number of rethook nodes to be preallocated
|
||||||
*
|
*
|
||||||
* Add @node to @rh. User must allocate @node (as a part of user's
|
* Allocate and initialize a new rethook with @data and @handler.
|
||||||
* data structure.) The @node fields are initialized in this function.
|
* Return pointer of new rethook, or error codes for failures.
|
||||||
|
*
|
||||||
|
* Note that @handler == NULL means this rethook is going to be freed.
|
||||||
*/
|
*/
|
||||||
void rethook_add_node(struct rethook *rh, struct rethook_node *node)
|
struct rethook *rethook_alloc(void *data, rethook_handler_t handler,
|
||||||
|
int size, int num)
|
||||||
{
|
{
|
||||||
node->rethook = rh;
|
struct rethook *rh;
|
||||||
freelist_add(&node->freelist, &rh->pool);
|
|
||||||
refcount_inc(&rh->ref);
|
if (!handler || num <= 0 || size < sizeof(struct rethook_node))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
|
||||||
|
if (!rh)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
rh->data = data;
|
||||||
|
rh->handler = handler;
|
||||||
|
|
||||||
|
/* initialize the objpool for rethook nodes */
|
||||||
|
if (objpool_init(&rh->pool, num, size, GFP_KERNEL, rh,
|
||||||
|
rethook_init_node, rethook_fini_pool)) {
|
||||||
|
kfree(rh);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
return rh;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_rethook_node_rcu(struct rcu_head *head)
|
static void free_rethook_node_rcu(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct rethook_node *node = container_of(head, struct rethook_node, rcu);
|
struct rethook_node *node = container_of(head, struct rethook_node, rcu);
|
||||||
|
struct rethook *rh = node->rethook;
|
||||||
|
|
||||||
if (refcount_dec_and_test(&node->rethook->ref))
|
objpool_drop(node, &rh->pool);
|
||||||
kfree(node->rethook);
|
|
||||||
kfree(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -145,7 +138,7 @@ void rethook_recycle(struct rethook_node *node)
|
||||||
lockdep_assert_preemption_disabled();
|
lockdep_assert_preemption_disabled();
|
||||||
|
|
||||||
if (likely(READ_ONCE(node->rethook->handler)))
|
if (likely(READ_ONCE(node->rethook->handler)))
|
||||||
freelist_add(&node->freelist, &node->rethook->pool);
|
objpool_push(node, &node->rethook->pool);
|
||||||
else
|
else
|
||||||
call_rcu(&node->rcu, free_rethook_node_rcu);
|
call_rcu(&node->rcu, free_rethook_node_rcu);
|
||||||
}
|
}
|
||||||
|
@ -161,7 +154,6 @@ NOKPROBE_SYMBOL(rethook_recycle);
|
||||||
struct rethook_node *rethook_try_get(struct rethook *rh)
|
struct rethook_node *rethook_try_get(struct rethook *rh)
|
||||||
{
|
{
|
||||||
rethook_handler_t handler = READ_ONCE(rh->handler);
|
rethook_handler_t handler = READ_ONCE(rh->handler);
|
||||||
struct freelist_node *fn;
|
|
||||||
|
|
||||||
lockdep_assert_preemption_disabled();
|
lockdep_assert_preemption_disabled();
|
||||||
|
|
||||||
|
@ -178,11 +170,7 @@ struct rethook_node *rethook_try_get(struct rethook *rh)
|
||||||
if (unlikely(!rcu_is_watching()))
|
if (unlikely(!rcu_is_watching()))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
fn = freelist_try_get(&rh->pool);
|
return (struct rethook_node *)objpool_pop(&rh->pool);
|
||||||
if (!fn)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return container_of(fn, struct rethook_node, freelist);
|
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(rethook_try_get);
|
NOKPROBE_SYMBOL(rethook_try_get);
|
||||||
|
|
||||||
|
|
|
@ -788,12 +788,9 @@ find_and_get_event(const char *system, const char *event_name)
|
||||||
name = trace_event_name(tp_event);
|
name = trace_event_name(tp_event);
|
||||||
if (!name || strcmp(event_name, name))
|
if (!name || strcmp(event_name, name))
|
||||||
continue;
|
continue;
|
||||||
if (!trace_event_try_get_ref(tp_event)) {
|
if (!trace_event_try_get_ref(tp_event))
|
||||||
return NULL;
|
return NULL;
|
||||||
break;
|
|
||||||
}
|
|
||||||
return tp_event;
|
return tp_event;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2954,6 +2954,17 @@ config TEST_CLOCKSOURCE_WATCHDOG
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config TEST_OBJPOOL
|
||||||
|
tristate "Test module for correctness and stress of objpool"
|
||||||
|
default n
|
||||||
|
depends on m && DEBUG_KERNEL
|
||||||
|
help
|
||||||
|
This builds the "test_objpool" module that should be used for
|
||||||
|
correctness verification and concurrent testings of objects
|
||||||
|
allocation and reclamation.
|
||||||
|
|
||||||
|
If unsure, say N.
|
||||||
|
|
||||||
endif # RUNTIME_TESTING_MENU
|
endif # RUNTIME_TESTING_MENU
|
||||||
|
|
||||||
config ARCH_USE_MEMTEST
|
config ARCH_USE_MEMTEST
|
||||||
|
|
|
@ -34,7 +34,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||||
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
|
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
|
||||||
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
|
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
|
||||||
nmi_backtrace.o win_minmax.o memcat_p.o \
|
nmi_backtrace.o win_minmax.o memcat_p.o \
|
||||||
buildid.o
|
buildid.o objpool.o
|
||||||
|
|
||||||
lib-$(CONFIG_PRINTK) += dump_stack.o
|
lib-$(CONFIG_PRINTK) += dump_stack.o
|
||||||
lib-$(CONFIG_SMP) += cpumask.o
|
lib-$(CONFIG_SMP) += cpumask.o
|
||||||
|
@ -107,6 +107,8 @@ obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
|
||||||
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
|
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
|
||||||
CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
|
CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
|
||||||
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
|
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
|
||||||
|
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
|
||||||
|
|
||||||
#
|
#
|
||||||
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
|
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
|
||||||
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
|
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
|
||||||
|
|
280
lib/objpool.c
Normal file
280
lib/objpool.c
Normal file
|
@ -0,0 +1,280 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/objpool.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
#include <linux/irqflags.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/log2.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* objpool: ring-array based lockless MPMC/FIFO queues
|
||||||
|
*
|
||||||
|
* Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* initialize percpu objpool_slot */
|
||||||
|
static int
|
||||||
|
objpool_init_percpu_slot(struct objpool_head *pool,
|
||||||
|
struct objpool_slot *slot,
|
||||||
|
int nodes, void *context,
|
||||||
|
objpool_init_obj_cb objinit)
|
||||||
|
{
|
||||||
|
void *obj = (void *)&slot->entries[pool->capacity];
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* initialize elements of percpu objpool_slot */
|
||||||
|
slot->mask = pool->capacity - 1;
|
||||||
|
|
||||||
|
for (i = 0; i < nodes; i++) {
|
||||||
|
if (objinit) {
|
||||||
|
int rc = objinit(obj, context);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
slot->entries[slot->tail & slot->mask] = obj;
|
||||||
|
obj = obj + pool->obj_size;
|
||||||
|
slot->tail++;
|
||||||
|
slot->last = slot->tail;
|
||||||
|
pool->nr_objs++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allocate and initialize percpu slots */
|
||||||
|
static int
|
||||||
|
objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
|
||||||
|
void *context, objpool_init_obj_cb objinit)
|
||||||
|
{
|
||||||
|
int i, cpu_count = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < pool->nr_cpus; i++) {
|
||||||
|
|
||||||
|
struct objpool_slot *slot;
|
||||||
|
int nodes, size, rc;
|
||||||
|
|
||||||
|
/* skip the cpu node which could never be present */
|
||||||
|
if (!cpu_possible(i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* compute how many objects to be allocated with this slot */
|
||||||
|
nodes = nr_objs / num_possible_cpus();
|
||||||
|
if (cpu_count < (nr_objs % num_possible_cpus()))
|
||||||
|
nodes++;
|
||||||
|
cpu_count++;
|
||||||
|
|
||||||
|
size = struct_size(slot, entries, pool->capacity) +
|
||||||
|
pool->obj_size * nodes;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* here we allocate percpu-slot & objs together in a single
|
||||||
|
* allocation to make it more compact, taking advantage of
|
||||||
|
* warm caches and TLB hits. in default vmalloc is used to
|
||||||
|
* reduce the pressure of kernel slab system. as we know,
|
||||||
|
* mimimal size of vmalloc is one page since vmalloc would
|
||||||
|
* always align the requested size to page size
|
||||||
|
*/
|
||||||
|
if (pool->gfp & GFP_ATOMIC)
|
||||||
|
slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
|
||||||
|
else
|
||||||
|
slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
|
||||||
|
cpu_to_node(i), __builtin_return_address(0));
|
||||||
|
if (!slot)
|
||||||
|
return -ENOMEM;
|
||||||
|
memset(slot, 0, size);
|
||||||
|
pool->cpu_slots[i] = slot;
|
||||||
|
|
||||||
|
/* initialize the objpool_slot of cpu node i */
|
||||||
|
rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* cleanup all percpu slots of the object pool */
|
||||||
|
static void objpool_fini_percpu_slots(struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!pool->cpu_slots)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < pool->nr_cpus; i++)
|
||||||
|
kvfree(pool->cpu_slots[i]);
|
||||||
|
kfree(pool->cpu_slots);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* initialize object pool and pre-allocate objects */
|
||||||
|
int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
|
||||||
|
gfp_t gfp, void *context, objpool_init_obj_cb objinit,
|
||||||
|
objpool_fini_cb release)
|
||||||
|
{
|
||||||
|
int rc, capacity, slot_size;
|
||||||
|
|
||||||
|
/* check input parameters */
|
||||||
|
if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX ||
|
||||||
|
object_size <= 0 || object_size > OBJPOOL_OBJECT_SIZE_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* align up to unsigned long size */
|
||||||
|
object_size = ALIGN(object_size, sizeof(long));
|
||||||
|
|
||||||
|
/* calculate capacity of percpu objpool_slot */
|
||||||
|
capacity = roundup_pow_of_two(nr_objs);
|
||||||
|
if (!capacity)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* initialize objpool pool */
|
||||||
|
memset(pool, 0, sizeof(struct objpool_head));
|
||||||
|
pool->nr_cpus = nr_cpu_ids;
|
||||||
|
pool->obj_size = object_size;
|
||||||
|
pool->capacity = capacity;
|
||||||
|
pool->gfp = gfp & ~__GFP_ZERO;
|
||||||
|
pool->context = context;
|
||||||
|
pool->release = release;
|
||||||
|
slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
|
||||||
|
pool->cpu_slots = kzalloc(slot_size, pool->gfp);
|
||||||
|
if (!pool->cpu_slots)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* initialize per-cpu slots */
|
||||||
|
rc = objpool_init_percpu_slots(pool, nr_objs, context, objinit);
|
||||||
|
if (rc)
|
||||||
|
objpool_fini_percpu_slots(pool);
|
||||||
|
else
|
||||||
|
refcount_set(&pool->ref, pool->nr_objs + 1);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_init);
|
||||||
|
|
||||||
|
/* adding object to slot, abort if the slot was already full */
|
||||||
|
static inline int
|
||||||
|
objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
|
||||||
|
{
|
||||||
|
struct objpool_slot *slot = pool->cpu_slots[cpu];
|
||||||
|
uint32_t head, tail;
|
||||||
|
|
||||||
|
/* loading tail and head as a local snapshot, tail first */
|
||||||
|
tail = READ_ONCE(slot->tail);
|
||||||
|
|
||||||
|
do {
|
||||||
|
head = READ_ONCE(slot->head);
|
||||||
|
/* fault caught: something must be wrong */
|
||||||
|
WARN_ON_ONCE(tail - head > pool->nr_objs);
|
||||||
|
} while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
|
||||||
|
|
||||||
|
/* now the tail position is reserved for the given obj */
|
||||||
|
WRITE_ONCE(slot->entries[tail & slot->mask], obj);
|
||||||
|
/* update sequence to make this obj available for pop() */
|
||||||
|
smp_store_release(&slot->last, tail + 1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* reclaim an object to object pool */
|
||||||
|
int objpool_push(void *obj, struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* disable local irq to avoid preemption & interruption */
|
||||||
|
raw_local_irq_save(flags);
|
||||||
|
rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
|
||||||
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_push);
|
||||||
|
|
||||||
|
/* try to retrieve object from slot */
|
||||||
|
static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
|
||||||
|
{
|
||||||
|
struct objpool_slot *slot = pool->cpu_slots[cpu];
|
||||||
|
/* load head snapshot, other cpus may change it */
|
||||||
|
uint32_t head = smp_load_acquire(&slot->head);
|
||||||
|
|
||||||
|
while (head != READ_ONCE(slot->last)) {
|
||||||
|
void *obj;
|
||||||
|
|
||||||
|
/* obj must be retrieved before moving forward head */
|
||||||
|
obj = READ_ONCE(slot->entries[head & slot->mask]);
|
||||||
|
|
||||||
|
/* move head forward to mark it's consumption */
|
||||||
|
if (try_cmpxchg_release(&slot->head, &head, head + 1))
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allocate an object from object pool */
|
||||||
|
void *objpool_pop(struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
void *obj = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
int i, cpu;
|
||||||
|
|
||||||
|
/* disable local irq to avoid preemption & interruption */
|
||||||
|
raw_local_irq_save(flags);
|
||||||
|
|
||||||
|
cpu = raw_smp_processor_id();
|
||||||
|
for (i = 0; i < num_possible_cpus(); i++) {
|
||||||
|
obj = objpool_try_get_slot(pool, cpu);
|
||||||
|
if (obj)
|
||||||
|
break;
|
||||||
|
cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
|
||||||
|
}
|
||||||
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_pop);
|
||||||
|
|
||||||
|
/* release whole objpool forcely */
|
||||||
|
void objpool_free(struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
if (!pool->cpu_slots)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* release percpu slots */
|
||||||
|
objpool_fini_percpu_slots(pool);
|
||||||
|
|
||||||
|
/* call user's cleanup callback if provided */
|
||||||
|
if (pool->release)
|
||||||
|
pool->release(pool, pool->context);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_free);
|
||||||
|
|
||||||
|
/* drop the allocated object, rather reclaim it to objpool */
|
||||||
|
int objpool_drop(void *obj, struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
if (!obj || !pool)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (refcount_dec_and_test(&pool->ref)) {
|
||||||
|
objpool_free(pool);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_drop);
|
||||||
|
|
||||||
|
/* drop unused objects and defref objpool for releasing */
|
||||||
|
void objpool_fini(struct objpool_head *pool)
|
||||||
|
{
|
||||||
|
int count = 1; /* extra ref for objpool itself */
|
||||||
|
|
||||||
|
/* drop all remained objects from objpool */
|
||||||
|
while (objpool_pop(pool))
|
||||||
|
count++;
|
||||||
|
|
||||||
|
if (refcount_sub_and_test(count, &pool->ref))
|
||||||
|
objpool_free(pool);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(objpool_fini);
|
690
lib/test_objpool.c
Normal file
690
lib/test_objpool.c
Normal file
|
@ -0,0 +1,690 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test module for lockless object pool
|
||||||
|
*
|
||||||
|
* Copyright: wuqiang.matt@bytedance.com
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/moduleparam.h>
|
||||||
|
#include <linux/completion.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/hrtimer.h>
|
||||||
|
#include <linux/objpool.h>
|
||||||
|
|
||||||
|
#define OT_NR_MAX_BULK (16)
|
||||||
|
|
||||||
|
/* memory usage */
|
||||||
|
struct ot_mem_stat {
|
||||||
|
atomic_long_t alloc;
|
||||||
|
atomic_long_t free;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* object allocation results */
|
||||||
|
struct ot_obj_stat {
|
||||||
|
unsigned long nhits;
|
||||||
|
unsigned long nmiss;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* control & results per testcase */
|
||||||
|
struct ot_data {
|
||||||
|
struct rw_semaphore start;
|
||||||
|
struct completion wait;
|
||||||
|
struct completion rcu;
|
||||||
|
atomic_t nthreads ____cacheline_aligned_in_smp;
|
||||||
|
atomic_t stop ____cacheline_aligned_in_smp;
|
||||||
|
struct ot_mem_stat kmalloc;
|
||||||
|
struct ot_mem_stat vmalloc;
|
||||||
|
struct ot_obj_stat objects;
|
||||||
|
u64 duration;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* testcase */
|
||||||
|
struct ot_test {
|
||||||
|
int async; /* synchronous or asynchronous */
|
||||||
|
int mode; /* only mode 0 supported */
|
||||||
|
int objsz; /* object size */
|
||||||
|
int duration; /* ms */
|
||||||
|
int delay; /* ms */
|
||||||
|
int bulk_normal;
|
||||||
|
int bulk_irq;
|
||||||
|
unsigned long hrtimer; /* ms */
|
||||||
|
const char *name;
|
||||||
|
struct ot_data data;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* per-cpu worker */
|
||||||
|
struct ot_item {
|
||||||
|
struct objpool_head *pool; /* pool head */
|
||||||
|
struct ot_test *test; /* test parameters */
|
||||||
|
|
||||||
|
void (*worker)(struct ot_item *item, int irq);
|
||||||
|
|
||||||
|
/* hrtimer control */
|
||||||
|
ktime_t hrtcycle;
|
||||||
|
struct hrtimer hrtimer;
|
||||||
|
|
||||||
|
int bulk[2]; /* for thread and irq */
|
||||||
|
int delay;
|
||||||
|
u32 niters;
|
||||||
|
|
||||||
|
/* summary per thread */
|
||||||
|
struct ot_obj_stat stat[2]; /* thread and irq */
|
||||||
|
u64 duration;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* memory leakage checking
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void *ot_kzalloc(struct ot_test *test, long size)
|
||||||
|
{
|
||||||
|
void *ptr = kzalloc(size, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (ptr)
|
||||||
|
atomic_long_add(size, &test->data.kmalloc.alloc);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_kfree(struct ot_test *test, void *ptr, long size)
|
||||||
|
{
|
||||||
|
if (!ptr)
|
||||||
|
return;
|
||||||
|
atomic_long_add(size, &test->data.kmalloc.free);
|
||||||
|
kfree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_mem_report(struct ot_test *test)
|
||||||
|
{
|
||||||
|
long alloc, free;
|
||||||
|
|
||||||
|
pr_info("memory allocation summary for %s\n", test->name);
|
||||||
|
|
||||||
|
alloc = atomic_long_read(&test->data.kmalloc.alloc);
|
||||||
|
free = atomic_long_read(&test->data.kmalloc.free);
|
||||||
|
pr_info(" kmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
|
||||||
|
|
||||||
|
alloc = atomic_long_read(&test->data.vmalloc.alloc);
|
||||||
|
free = atomic_long_read(&test->data.vmalloc.free);
|
||||||
|
pr_info(" vmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* user object instance */
|
||||||
|
struct ot_node {
|
||||||
|
void *owner;
|
||||||
|
unsigned long data;
|
||||||
|
unsigned long refs;
|
||||||
|
unsigned long payload[32];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* user objpool manager */
|
||||||
|
struct ot_context {
|
||||||
|
struct objpool_head pool; /* objpool head */
|
||||||
|
struct ot_test *test; /* test parameters */
|
||||||
|
void *ptr; /* user pool buffer */
|
||||||
|
unsigned long size; /* buffer size */
|
||||||
|
struct rcu_head rcu;
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct ot_item, ot_pcup_items);
|
||||||
|
|
||||||
|
static int ot_init_data(struct ot_data *data)
|
||||||
|
{
|
||||||
|
memset(data, 0, sizeof(*data));
|
||||||
|
init_rwsem(&data->start);
|
||||||
|
init_completion(&data->wait);
|
||||||
|
init_completion(&data->rcu);
|
||||||
|
atomic_set(&data->nthreads, 1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_init_node(void *nod, void *context)
|
||||||
|
{
|
||||||
|
struct ot_context *sop = context;
|
||||||
|
struct ot_node *on = nod;
|
||||||
|
|
||||||
|
on->owner = &sop->pool;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum hrtimer_restart ot_hrtimer_handler(struct hrtimer *hrt)
|
||||||
|
{
|
||||||
|
struct ot_item *item = container_of(hrt, struct ot_item, hrtimer);
|
||||||
|
struct ot_test *test = item->test;
|
||||||
|
|
||||||
|
if (atomic_read_acquire(&test->data.stop))
|
||||||
|
return HRTIMER_NORESTART;
|
||||||
|
|
||||||
|
/* do bulk-testings for objects pop/push */
|
||||||
|
item->worker(item, 1);
|
||||||
|
|
||||||
|
hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle);
|
||||||
|
return HRTIMER_RESTART;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_start_hrtimer(struct ot_item *item)
|
||||||
|
{
|
||||||
|
if (!item->test->hrtimer)
|
||||||
|
return;
|
||||||
|
hrtimer_start(&item->hrtimer, item->hrtcycle, HRTIMER_MODE_REL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_stop_hrtimer(struct ot_item *item)
|
||||||
|
{
|
||||||
|
if (!item->test->hrtimer)
|
||||||
|
return;
|
||||||
|
hrtimer_cancel(&item->hrtimer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_init_hrtimer(struct ot_item *item, unsigned long hrtimer)
|
||||||
|
{
|
||||||
|
struct hrtimer *hrt = &item->hrtimer;
|
||||||
|
|
||||||
|
if (!hrtimer)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
item->hrtcycle = ktime_set(0, hrtimer * 1000000UL);
|
||||||
|
hrtimer_init(hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
|
hrt->function = ot_hrtimer_handler;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_init_cpu_item(struct ot_item *item,
|
||||||
|
struct ot_test *test,
|
||||||
|
struct objpool_head *pool,
|
||||||
|
void (*worker)(struct ot_item *, int))
|
||||||
|
{
|
||||||
|
memset(item, 0, sizeof(*item));
|
||||||
|
item->pool = pool;
|
||||||
|
item->test = test;
|
||||||
|
item->worker = worker;
|
||||||
|
|
||||||
|
item->bulk[0] = test->bulk_normal;
|
||||||
|
item->bulk[1] = test->bulk_irq;
|
||||||
|
item->delay = test->delay;
|
||||||
|
|
||||||
|
/* initialize hrtimer */
|
||||||
|
ot_init_hrtimer(item, item->test->hrtimer);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_thread_worker(void *arg)
|
||||||
|
{
|
||||||
|
struct ot_item *item = arg;
|
||||||
|
struct ot_test *test = item->test;
|
||||||
|
ktime_t start;
|
||||||
|
|
||||||
|
atomic_inc(&test->data.nthreads);
|
||||||
|
down_read(&test->data.start);
|
||||||
|
up_read(&test->data.start);
|
||||||
|
start = ktime_get();
|
||||||
|
ot_start_hrtimer(item);
|
||||||
|
do {
|
||||||
|
if (atomic_read_acquire(&test->data.stop))
|
||||||
|
break;
|
||||||
|
/* do bulk-testings for objects pop/push */
|
||||||
|
item->worker(item, 0);
|
||||||
|
} while (!kthread_should_stop());
|
||||||
|
ot_stop_hrtimer(item);
|
||||||
|
item->duration = (u64) ktime_us_delta(ktime_get(), start);
|
||||||
|
if (atomic_dec_and_test(&test->data.nthreads))
|
||||||
|
complete(&test->data.wait);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_perf_report(struct ot_test *test, u64 duration)
|
||||||
|
{
|
||||||
|
struct ot_obj_stat total, normal = {0}, irq = {0};
|
||||||
|
int cpu, nthreads = 0;
|
||||||
|
|
||||||
|
pr_info("\n");
|
||||||
|
pr_info("Testing summary for %s\n", test->name);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
|
||||||
|
if (!item->duration)
|
||||||
|
continue;
|
||||||
|
normal.nhits += item->stat[0].nhits;
|
||||||
|
normal.nmiss += item->stat[0].nmiss;
|
||||||
|
irq.nhits += item->stat[1].nhits;
|
||||||
|
irq.nmiss += item->stat[1].nmiss;
|
||||||
|
pr_info("CPU: %d duration: %lluus\n", cpu, item->duration);
|
||||||
|
pr_info("\tthread:\t%16lu hits \t%16lu miss\n",
|
||||||
|
item->stat[0].nhits, item->stat[0].nmiss);
|
||||||
|
pr_info("\tirq: \t%16lu hits \t%16lu miss\n",
|
||||||
|
item->stat[1].nhits, item->stat[1].nmiss);
|
||||||
|
pr_info("\ttotal: \t%16lu hits \t%16lu miss\n",
|
||||||
|
item->stat[0].nhits + item->stat[1].nhits,
|
||||||
|
item->stat[0].nmiss + item->stat[1].nmiss);
|
||||||
|
nthreads++;
|
||||||
|
}
|
||||||
|
|
||||||
|
total.nhits = normal.nhits + irq.nhits;
|
||||||
|
total.nmiss = normal.nmiss + irq.nmiss;
|
||||||
|
|
||||||
|
pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads, duration);
|
||||||
|
pr_info("SUM: \t%16lu hits \t%16lu miss\n",
|
||||||
|
total.nhits, total.nmiss);
|
||||||
|
|
||||||
|
test->data.objects = total;
|
||||||
|
test->data.duration = duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* synchronous test cases for objpool manipulation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* objpool manipulation for synchronous mode (percpu objpool) */
|
||||||
|
static struct ot_context *ot_init_sync_m0(struct ot_test *test)
|
||||||
|
{
|
||||||
|
struct ot_context *sop = NULL;
|
||||||
|
int max = num_possible_cpus() << 3;
|
||||||
|
gfp_t gfp = GFP_KERNEL;
|
||||||
|
|
||||||
|
sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
|
||||||
|
if (!sop)
|
||||||
|
return NULL;
|
||||||
|
sop->test = test;
|
||||||
|
if (test->objsz < 512)
|
||||||
|
gfp = GFP_ATOMIC;
|
||||||
|
|
||||||
|
if (objpool_init(&sop->pool, max, test->objsz,
|
||||||
|
gfp, sop, ot_init_node, NULL)) {
|
||||||
|
ot_kfree(test, sop, sizeof(*sop));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
WARN_ON(max != sop->pool.nr_objs);
|
||||||
|
|
||||||
|
return sop;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_fini_sync(struct ot_context *sop)
|
||||||
|
{
|
||||||
|
objpool_fini(&sop->pool);
|
||||||
|
ot_kfree(sop->test, sop, sizeof(*sop));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct ot_context * (*init)(struct ot_test *oc);
|
||||||
|
void (*fini)(struct ot_context *sop);
|
||||||
|
} g_ot_sync_ops[] = {
|
||||||
|
{.init = ot_init_sync_m0, .fini = ot_fini_sync},
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* synchronous test cases: performance mode
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void ot_bulk_sync(struct ot_item *item, int irq)
|
||||||
|
{
|
||||||
|
struct ot_node *nods[OT_NR_MAX_BULK];
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < item->bulk[irq]; i++)
|
||||||
|
nods[i] = objpool_pop(item->pool);
|
||||||
|
|
||||||
|
if (!irq && (item->delay || !(++(item->niters) & 0x7FFF)))
|
||||||
|
msleep(item->delay);
|
||||||
|
|
||||||
|
while (i-- > 0) {
|
||||||
|
struct ot_node *on = nods[i];
|
||||||
|
if (on) {
|
||||||
|
on->refs++;
|
||||||
|
objpool_push(on, item->pool);
|
||||||
|
item->stat[irq].nhits++;
|
||||||
|
} else {
|
||||||
|
item->stat[irq].nmiss++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_start_sync(struct ot_test *test)
|
||||||
|
{
|
||||||
|
struct ot_context *sop;
|
||||||
|
ktime_t start;
|
||||||
|
u64 duration;
|
||||||
|
unsigned long timeout;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/* initialize objpool for syncrhonous testcase */
|
||||||
|
sop = g_ot_sync_ops[test->mode].init(test);
|
||||||
|
if (!sop)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* grab rwsem to block testing threads */
|
||||||
|
down_write(&test->data.start);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
|
||||||
|
struct task_struct *work;
|
||||||
|
|
||||||
|
ot_init_cpu_item(item, test, &sop->pool, ot_bulk_sync);
|
||||||
|
|
||||||
|
/* skip offline cpus */
|
||||||
|
if (!cpu_online(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
work = kthread_create_on_node(ot_thread_worker, item,
|
||||||
|
cpu_to_node(cpu), "ot_worker_%d", cpu);
|
||||||
|
if (IS_ERR(work)) {
|
||||||
|
pr_err("failed to create thread for cpu %d\n", cpu);
|
||||||
|
} else {
|
||||||
|
kthread_bind(work, cpu);
|
||||||
|
wake_up_process(work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* wait a while to make sure all threads waiting at start line */
|
||||||
|
msleep(20);
|
||||||
|
|
||||||
|
/* in case no threads were created: memory insufficient ? */
|
||||||
|
if (atomic_dec_and_test(&test->data.nthreads))
|
||||||
|
complete(&test->data.wait);
|
||||||
|
|
||||||
|
// sched_set_fifo_low(current);
|
||||||
|
|
||||||
|
/* start objpool testing threads */
|
||||||
|
start = ktime_get();
|
||||||
|
up_write(&test->data.start);
|
||||||
|
|
||||||
|
/* yeild cpu to worker threads for duration ms */
|
||||||
|
timeout = msecs_to_jiffies(test->duration);
|
||||||
|
schedule_timeout_interruptible(timeout);
|
||||||
|
|
||||||
|
/* tell workers threads to quit */
|
||||||
|
atomic_set_release(&test->data.stop, 1);
|
||||||
|
|
||||||
|
/* wait all workers threads finish and quit */
|
||||||
|
wait_for_completion(&test->data.wait);
|
||||||
|
duration = (u64) ktime_us_delta(ktime_get(), start);
|
||||||
|
|
||||||
|
/* cleanup objpool */
|
||||||
|
g_ot_sync_ops[test->mode].fini(sop);
|
||||||
|
|
||||||
|
/* report testing summary and performance results */
|
||||||
|
ot_perf_report(test, duration);
|
||||||
|
|
||||||
|
/* report memory allocation summary */
|
||||||
|
ot_mem_report(test);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* asynchronous test cases: pool lifecycle controlled by refcount
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void ot_fini_async_rcu(struct rcu_head *rcu)
|
||||||
|
{
|
||||||
|
struct ot_context *sop = container_of(rcu, struct ot_context, rcu);
|
||||||
|
struct ot_test *test = sop->test;
|
||||||
|
|
||||||
|
/* here all cpus are aware of the stop event: test->data.stop = 1 */
|
||||||
|
WARN_ON(!atomic_read_acquire(&test->data.stop));
|
||||||
|
|
||||||
|
objpool_fini(&sop->pool);
|
||||||
|
complete(&test->data.rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_fini_async(struct ot_context *sop)
|
||||||
|
{
|
||||||
|
/* make sure the stop event is acknowledged by all cores */
|
||||||
|
call_rcu(&sop->rcu, ot_fini_async_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_objpool_release(struct objpool_head *head, void *context)
|
||||||
|
{
|
||||||
|
struct ot_context *sop = context;
|
||||||
|
|
||||||
|
WARN_ON(!head || !sop || head != &sop->pool);
|
||||||
|
|
||||||
|
/* do context cleaning if needed */
|
||||||
|
if (sop)
|
||||||
|
ot_kfree(sop->test, sop, sizeof(*sop));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ot_context *ot_init_async_m0(struct ot_test *test)
|
||||||
|
{
|
||||||
|
struct ot_context *sop = NULL;
|
||||||
|
int max = num_possible_cpus() << 3;
|
||||||
|
gfp_t gfp = GFP_KERNEL;
|
||||||
|
|
||||||
|
sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
|
||||||
|
if (!sop)
|
||||||
|
return NULL;
|
||||||
|
sop->test = test;
|
||||||
|
if (test->objsz < 512)
|
||||||
|
gfp = GFP_ATOMIC;
|
||||||
|
|
||||||
|
if (objpool_init(&sop->pool, max, test->objsz, gfp, sop,
|
||||||
|
ot_init_node, ot_objpool_release)) {
|
||||||
|
ot_kfree(test, sop, sizeof(*sop));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
WARN_ON(max != sop->pool.nr_objs);
|
||||||
|
|
||||||
|
return sop;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct ot_context * (*init)(struct ot_test *oc);
|
||||||
|
void (*fini)(struct ot_context *sop);
|
||||||
|
} g_ot_async_ops[] = {
|
||||||
|
{.init = ot_init_async_m0, .fini = ot_fini_async},
|
||||||
|
};
|
||||||
|
|
||||||
|
static void ot_nod_recycle(struct ot_node *on, struct objpool_head *pool,
|
||||||
|
int release)
|
||||||
|
{
|
||||||
|
struct ot_context *sop;
|
||||||
|
|
||||||
|
on->refs++;
|
||||||
|
|
||||||
|
if (!release) {
|
||||||
|
/* push object back to opjpool for reuse */
|
||||||
|
objpool_push(on, pool);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sop = container_of(pool, struct ot_context, pool);
|
||||||
|
WARN_ON(sop != pool->context);
|
||||||
|
|
||||||
|
/* unref objpool with nod removed forever */
|
||||||
|
objpool_drop(on, pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ot_bulk_async(struct ot_item *item, int irq)
|
||||||
|
{
|
||||||
|
struct ot_test *test = item->test;
|
||||||
|
struct ot_node *nods[OT_NR_MAX_BULK];
|
||||||
|
int i, stop;
|
||||||
|
|
||||||
|
for (i = 0; i < item->bulk[irq]; i++)
|
||||||
|
nods[i] = objpool_pop(item->pool);
|
||||||
|
|
||||||
|
if (!irq) {
|
||||||
|
if (item->delay || !(++(item->niters) & 0x7FFF))
|
||||||
|
msleep(item->delay);
|
||||||
|
get_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
|
stop = atomic_read_acquire(&test->data.stop);
|
||||||
|
|
||||||
|
/* drop all objects and deref objpool */
|
||||||
|
while (i-- > 0) {
|
||||||
|
struct ot_node *on = nods[i];
|
||||||
|
|
||||||
|
if (on) {
|
||||||
|
on->refs++;
|
||||||
|
ot_nod_recycle(on, item->pool, stop);
|
||||||
|
item->stat[irq].nhits++;
|
||||||
|
} else {
|
||||||
|
item->stat[irq].nmiss++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!irq)
|
||||||
|
put_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ot_start_async(struct ot_test *test)
|
||||||
|
{
|
||||||
|
struct ot_context *sop;
|
||||||
|
ktime_t start;
|
||||||
|
u64 duration;
|
||||||
|
unsigned long timeout;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/* initialize objpool for syncrhonous testcase */
|
||||||
|
sop = g_ot_async_ops[test->mode].init(test);
|
||||||
|
if (!sop)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* grab rwsem to block testing threads */
|
||||||
|
down_write(&test->data.start);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
|
||||||
|
struct task_struct *work;
|
||||||
|
|
||||||
|
ot_init_cpu_item(item, test, &sop->pool, ot_bulk_async);
|
||||||
|
|
||||||
|
/* skip offline cpus */
|
||||||
|
if (!cpu_online(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
work = kthread_create_on_node(ot_thread_worker, item,
|
||||||
|
cpu_to_node(cpu), "ot_worker_%d", cpu);
|
||||||
|
if (IS_ERR(work)) {
|
||||||
|
pr_err("failed to create thread for cpu %d\n", cpu);
|
||||||
|
} else {
|
||||||
|
kthread_bind(work, cpu);
|
||||||
|
wake_up_process(work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* wait a while to make sure all threads waiting at start line */
|
||||||
|
msleep(20);
|
||||||
|
|
||||||
|
/* in case no threads were created: memory insufficient ? */
|
||||||
|
if (atomic_dec_and_test(&test->data.nthreads))
|
||||||
|
complete(&test->data.wait);
|
||||||
|
|
||||||
|
/* start objpool testing threads */
|
||||||
|
start = ktime_get();
|
||||||
|
up_write(&test->data.start);
|
||||||
|
|
||||||
|
/* yeild cpu to worker threads for duration ms */
|
||||||
|
timeout = msecs_to_jiffies(test->duration);
|
||||||
|
schedule_timeout_interruptible(timeout);
|
||||||
|
|
||||||
|
/* tell workers threads to quit */
|
||||||
|
atomic_set_release(&test->data.stop, 1);
|
||||||
|
|
||||||
|
/* do async-finalization */
|
||||||
|
g_ot_async_ops[test->mode].fini(sop);
|
||||||
|
|
||||||
|
/* wait all workers threads finish and quit */
|
||||||
|
wait_for_completion(&test->data.wait);
|
||||||
|
duration = (u64) ktime_us_delta(ktime_get(), start);
|
||||||
|
|
||||||
|
/* assure rcu callback is triggered */
|
||||||
|
wait_for_completion(&test->data.rcu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* now we are sure that objpool is finalized either
|
||||||
|
* by rcu callback or by worker threads
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* report testing summary and performance results */
|
||||||
|
ot_perf_report(test, duration);
|
||||||
|
|
||||||
|
/* report memory allocation summary */
|
||||||
|
ot_mem_report(test);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* predefined testing cases:
|
||||||
|
* synchronous case / overrun case / async case
|
||||||
|
*
|
||||||
|
* async: synchronous or asynchronous testing
|
||||||
|
* mode: only mode 0 supported
|
||||||
|
* objsz: object size
|
||||||
|
* duration: int, total test time in ms
|
||||||
|
* delay: int, delay (in ms) between each iteration
|
||||||
|
* bulk_normal: int, repeat times for thread worker
|
||||||
|
* bulk_irq: int, repeat times for irq consumer
|
||||||
|
* hrtimer: unsigned long, hrtimer intervnal in ms
|
||||||
|
* name: char *, tag for current test ot_item
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define NODE_COMPACT sizeof(struct ot_node)
|
||||||
|
#define NODE_VMALLOC (512)
|
||||||
|
|
||||||
|
struct ot_test g_testcases[] = {
|
||||||
|
|
||||||
|
/* sync & normal */
|
||||||
|
{0, 0, NODE_COMPACT, 1000, 0, 1, 0, 0, "sync: percpu objpool"},
|
||||||
|
{0, 0, NODE_VMALLOC, 1000, 0, 1, 0, 0, "sync: percpu objpool from vmalloc"},
|
||||||
|
|
||||||
|
/* sync & hrtimer */
|
||||||
|
{0, 0, NODE_COMPACT, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool"},
|
||||||
|
{0, 0, NODE_VMALLOC, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool from vmalloc"},
|
||||||
|
|
||||||
|
/* sync & overrun */
|
||||||
|
{0, 0, NODE_COMPACT, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool"},
|
||||||
|
{0, 0, NODE_VMALLOC, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool from vmalloc"},
|
||||||
|
|
||||||
|
/* async mode */
|
||||||
|
{1, 0, NODE_COMPACT, 1000, 100, 1, 0, 0, "async: percpu objpool"},
|
||||||
|
{1, 0, NODE_VMALLOC, 1000, 100, 1, 0, 0, "async: percpu objpool from vmalloc"},
|
||||||
|
|
||||||
|
/* async + hrtimer mode */
|
||||||
|
{1, 0, NODE_COMPACT, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool"},
|
||||||
|
{1, 0, NODE_VMALLOC, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool from vmalloc"},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init ot_mod_init(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* perform testings */
|
||||||
|
for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
|
||||||
|
ot_init_data(&g_testcases[i].data);
|
||||||
|
if (g_testcases[i].async)
|
||||||
|
ot_start_async(&g_testcases[i]);
|
||||||
|
else
|
||||||
|
ot_start_sync(&g_testcases[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* show tests summary */
|
||||||
|
pr_info("\n");
|
||||||
|
pr_info("Summary of testcases:\n");
|
||||||
|
for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
|
||||||
|
pr_info(" duration: %lluus \thits: %10lu \tmiss: %10lu \t%s\n",
|
||||||
|
g_testcases[i].data.duration, g_testcases[i].data.objects.nhits,
|
||||||
|
g_testcases[i].data.objects.nmiss, g_testcases[i].name);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit ot_mod_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(ot_mod_init);
|
||||||
|
module_exit(ot_mod_exit);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
|
@ -35,7 +35,7 @@ struct my_data {
|
||||||
ktime_t entry_stamp;
|
ktime_t entry_stamp;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Here we use the entry_hanlder to timestamp function entry */
|
/* Here we use the entry_handler to timestamp function entry */
|
||||||
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
|
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct my_data *data;
|
struct my_data *data;
|
||||||
|
|
Loading…
Add table
Reference in a new issue