binder: Scaffolding for binder_alloc KUnit tests

Add setup and teardown for testing binder allocator code with KUnit.
Include minimal test cases to verify that tests are initialized
correctly.

Tested-by: Rae Moar <rmoar@google.com>
Signed-off-by: Tiffany Yang <ynaffit@google.com>
Acked-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20250714185321.2417234-5-ynaffit@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Tiffany Yang 2025-07-14 11:53:17 -07:00 committed by Greg Kroah-Hartman
parent bdfa89c489
commit 5e024582f4
9 changed files with 208 additions and 6 deletions

View file

@ -47,4 +47,15 @@ config ANDROID_BINDER_IPC_SELFTEST
exhaustively with combinations of various buffer sizes and
alignments.
config ANDROID_BINDER_ALLOC_KUNIT_TEST
tristate "KUnit Tests for Android Binder Alloc" if !KUNIT_ALL_TESTS
depends on ANDROID_BINDER_IPC && KUNIT
default KUNIT_ALL_TESTS
help
This feature builds the binder alloc KUnit tests.
Each test case runs using a pared-down binder_alloc struct and
test-specific freelist, which allows this KUnit module to be loaded
for testing without interfering with a running system.
endmenu

View file

@ -4,3 +4,4 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/

View file

@ -68,6 +68,8 @@
#include <linux/sizes.h>
#include <linux/ktime.h>
#include <kunit/visibility.h>
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
@ -5947,10 +5949,11 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_alloc_vma_close(&proc->alloc);
}
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,

View file

@ -23,6 +23,7 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
#include <kunit/visibility.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@ -57,13 +58,14 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size);
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
@ -955,7 +957,7 @@ err_invalid_mm:
failure_string, ret);
return ret;
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler);
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
@ -1024,6 +1026,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d buffers %d, pages %d\n",
__func__, alloc->pid, buffers, page_count);
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release);
/**
* binder_alloc_print_allocated() - print buffer info
@ -1116,6 +1119,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
{
binder_alloc_set_mapped(alloc, false);
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
/**
* binder_alloc_free_page() - shrinker callback to free pages
@ -1223,8 +1227,8 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static struct shrinker *binder_shrinker;
static void __binder_alloc_init(struct binder_alloc *alloc,
struct list_lru *freelist)
VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc,
struct list_lru *freelist)
{
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
@ -1233,6 +1237,7 @@ static void __binder_alloc_init(struct binder_alloc *alloc,
INIT_LIST_HEAD(&alloc->buffers);
alloc->freelist = freelist;
}
EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init);
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization

View file

@ -184,5 +184,11 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
binder_size_t buffer_offset,
size_t bytes);
#if IS_ENABLED(CONFIG_KUNIT)
void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist);
size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer);
#endif
#endif /* _LINUX_BINDER_ALLOC_H */

View file

@ -592,4 +592,8 @@ void binder_add_device(struct binder_device *device);
*/
void binder_remove_device(struct binder_device *device);
#if IS_ENABLED(CONFIG_KUNIT)
vm_fault_t binder_vm_fault(struct vm_fault *vmf);
#endif
#endif /* _LINUX_BINDER_INTERNAL_H */

View file

@ -0,0 +1,3 @@
CONFIG_KUNIT=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=y

View file

@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += binder_alloc_kunit.o

View file

@ -0,0 +1,166 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for binder allocator code
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/anon_inodes.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sizes.h>
#include "../binder_alloc.h"
#include "../binder_internal.h"
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
#define BINDER_MMAP_SIZE SZ_128K
struct binder_alloc_test {
struct binder_alloc alloc;
struct list_lru binder_test_freelist;
struct file *filp;
unsigned long mmap_uaddr;
};
static void binder_alloc_test_init_freelist(struct kunit *test)
{
struct binder_alloc_test *priv = test->priv;
KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
&priv->binder_test_freelist);
}
static void binder_alloc_test_mmap(struct kunit *test)
{
struct binder_alloc_test *priv = test->priv;
struct binder_alloc *alloc = &priv->alloc;
struct binder_buffer *buf;
struct rb_node *n;
KUNIT_EXPECT_EQ(test, alloc->mapped, true);
KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
n = rb_first(&alloc->allocated_buffers);
KUNIT_EXPECT_PTR_EQ(test, n, NULL);
n = rb_first(&alloc->free_buffers);
buf = rb_entry(n, struct binder_buffer, rb_node);
KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
BINDER_MMAP_SIZE);
KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
}
/* ===== End test cases ===== */
static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
{
struct binder_alloc *alloc = vma->vm_private_data;
binder_alloc_vma_close(alloc);
}
static const struct vm_operations_struct binder_alloc_test_vm_ops = {
.close = binder_alloc_test_vma_close,
.fault = binder_vm_fault,
};
static int binder_alloc_test_mmap_handler(struct file *filp,
struct vm_area_struct *vma)
{
struct binder_alloc *alloc = filp->private_data;
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_alloc_test_vm_ops;
vma->vm_private_data = alloc;
return binder_alloc_mmap_handler(alloc, vma);
}
static const struct file_operations binder_alloc_test_fops = {
.mmap = binder_alloc_test_mmap_handler,
};
static int binder_alloc_test_init(struct kunit *test)
{
struct binder_alloc_test *priv;
int ret;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
test->priv = priv;
ret = list_lru_init(&priv->binder_test_freelist);
if (ret) {
kunit_err(test, "Failed to initialize test freelist\n");
return ret;
}
/* __binder_alloc_init requires mm to be attached */
ret = kunit_attach_mm();
if (ret) {
kunit_err(test, "Failed to attach mm\n");
return ret;
}
__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
priv->filp = anon_inode_getfile("binder_alloc_kunit",
&binder_alloc_test_fops, &priv->alloc,
O_RDWR | O_CLOEXEC);
if (IS_ERR_OR_NULL(priv->filp)) {
kunit_err(test, "Failed to open binder alloc test driver file\n");
return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
}
priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
0);
if (!priv->mmap_uaddr) {
kunit_err(test, "Could not map the test's transaction memory\n");
return -ENOMEM;
}
return 0;
}
static void binder_alloc_test_exit(struct kunit *test)
{
struct binder_alloc_test *priv = test->priv;
/* Close the backing file to make sure binder_alloc_vma_close runs */
if (!IS_ERR_OR_NULL(priv->filp))
fput(priv->filp);
if (priv->alloc.mm)
binder_alloc_deferred_release(&priv->alloc);
/* Make sure freelist is empty */
KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
list_lru_destroy(&priv->binder_test_freelist);
}
static struct kunit_case binder_alloc_test_cases[] = {
KUNIT_CASE(binder_alloc_test_init_freelist),
KUNIT_CASE(binder_alloc_test_mmap),
{}
};
static struct kunit_suite binder_alloc_test_suite = {
.name = "binder_alloc",
.test_cases = binder_alloc_test_cases,
.init = binder_alloc_test_init,
.exit = binder_alloc_test_exit,
};
kunit_test_suite(binder_alloc_test_suite);
MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
MODULE_DESCRIPTION("Binder Alloc KUnit tests");
MODULE_LICENSE("GPL");