2021-09-07 19:57:09 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Data Access Monitor Unit Tests
|
|
|
|
*
|
|
|
|
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
|
|
|
|
*
|
2023-12-13 19:03:33 +00:00
|
|
|
* Author: SeongJae Park <sj@kernel.org>
|
2021-09-07 19:57:09 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_DAMON_KUNIT_TEST
|
|
|
|
|
|
|
|
#ifndef _DAMON_CORE_TEST_H
|
|
|
|
#define _DAMON_CORE_TEST_H
|
|
|
|
|
|
|
|
#include <kunit/test.h>
|
|
|
|
|
|
|
|
static void damon_test_regions(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_region *r;
|
|
|
|
struct damon_target *t;
|
|
|
|
|
|
|
|
r = damon_new_region(1, 2);
|
|
|
|
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
|
|
|
|
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
|
|
|
|
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
|
|
|
|
|
|
|
|
damon_add_region(r, t);
|
|
|
|
KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
|
|
|
|
|
mm/damon/core-test: fix memory leak in damon_new_region()
Patch series "mm/damon/core-test: Fix memory leaks in core-test", v3.
There are a few memory leaks in core-test which are detected by kmemleak.
This patchset fixes the issues.
This patch (of 2):
When CONFIG_DAMON_KUNIT_TEST=y and making CONFIG_DEBUG_KMEMLEAK=y
and CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y, the below memory leak is detected.
The damon_region which is allocated by kmem_cache_alloc() in
damon_new_region() in damon_test_regions() and
damon_test_update_monitoring_result() are not freed.
So for damon_test_regions(), replace damon_del_region() call with
damon_destroy_region() so that it calls both damon_del_region() and
damon_free_region(), the latter will free the damon_region. For
damon_test_update_monitoring_result(), call damon_free_region() to
free it. After applying this patch, the following memory leak is never
detected.
unreferenced object 0xffff2b49c3edc000 (size 56):
comm "kunit_try_catch", pid 338, jiffies 4294895280 (age 557.084s)
hex dump (first 32 bytes):
01 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 49 2b ff ff ............I+..
backtrace:
[<0000000088e71769>] slab_post_alloc_hook+0xb8/0x368
[<00000000b528f67c>] kmem_cache_alloc+0x168/0x284
[<000000008603f022>] damon_new_region+0x28/0x54
[<00000000a3b8c64e>] damon_test_regions+0x38/0x270
[<00000000559c4801>] kunit_try_run_case+0x50/0xac
[<000000003932ed49>] kunit_generic_run_threadfn_adapter+0x20/0x2c
[<000000003c3e9211>] kthread+0x124/0x130
[<0000000028f85bdd>] ret_from_fork+0x10/0x20
unreferenced object 0xffff2b49c5b20000 (size 56):
comm "kunit_try_catch", pid 354, jiffies 4294895304 (age 556.988s)
hex dump (first 32 bytes):
03 00 00 00 00 00 00 00 07 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 96 00 00 00 49 2b ff ff ............I+..
backtrace:
[<0000000088e71769>] slab_post_alloc_hook+0xb8/0x368
[<00000000b528f67c>] kmem_cache_alloc+0x168/0x284
[<000000008603f022>] damon_new_region+0x28/0x54
[<00000000ca019f80>] damon_test_update_monitoring_result+0x18/0x34
[<00000000559c4801>] kunit_try_run_case+0x50/0xac
[<000000003932ed49>] kunit_generic_run_threadfn_adapter+0x20/0x2c
[<000000003c3e9211>] kthread+0x124/0x130
[<0000000028f85bdd>] ret_from_fork+0x10/0x20
Link: https://lkml.kernel.org/r/20230918120951.2230468-1-ruanjinjie@huawei.com
Link: https://lkml.kernel.org/r/20230918120951.2230468-2-ruanjinjie@huawei.com
Fixes: 17ccae8bb5c9 ("mm/damon: add kunit tests")
Fixes: f4c978b6594b ("mm/damon/core-test: add a test for damon_update_monitoring_results()")
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Brendan Higgins <brendan.higgins@linux.dev>
Cc: Feng Tang <feng.tang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-09-18 20:09:50 +08:00
|
|
|
damon_destroy_region(r, t);
|
2021-09-07 19:57:09 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
|
|
|
|
|
|
|
|
damon_free_target(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int nr_damon_targets(struct damon_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct damon_target *t;
|
|
|
|
unsigned int nr_targets = 0;
|
|
|
|
|
|
|
|
damon_for_each_target(t, ctx)
|
|
|
|
nr_targets++;
|
|
|
|
|
|
|
|
return nr_targets;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void damon_test_target(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_ctx *c = damon_new_ctx();
|
|
|
|
struct damon_target *t;
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
|
|
|
|
|
|
|
|
damon_add_target(c, t);
|
|
|
|
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
|
|
|
|
|
2025-07-12 12:50:11 -07:00
|
|
|
damon_destroy_target(t, c);
|
2021-09-07 19:57:09 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
|
|
|
|
|
|
|
|
damon_destroy_ctx(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test kdamond_reset_aggregated()
|
|
|
|
*
|
|
|
|
* DAMON checks access to each region and aggregates this information as the
|
|
|
|
* access frequency of each region. In detail, it increases '->nr_accesses' of
|
|
|
|
* regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
|
|
|
|
* the aggregated information ('->nr_accesses' of each regions) to the result
|
|
|
|
* buffer. As a result of the flushing, the '->nr_accesses' of regions are
|
|
|
|
* initialized to zero.
|
|
|
|
*/
|
|
|
|
static void damon_test_aggregate(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_ctx *ctx = damon_new_ctx();
|
|
|
|
unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
|
|
|
|
unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
|
|
|
|
unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
|
|
|
|
struct damon_target *t;
|
|
|
|
struct damon_region *r;
|
|
|
|
int it, ir;
|
|
|
|
|
2022-03-22 14:48:37 -07:00
|
|
|
for (it = 0; it < 3; it++) {
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2022-03-22 14:48:37 -07:00
|
|
|
damon_add_target(ctx, t);
|
|
|
|
}
|
2021-09-07 19:57:09 -07:00
|
|
|
|
|
|
|
it = 0;
|
|
|
|
damon_for_each_target(t, ctx) {
|
|
|
|
for (ir = 0; ir < 3; ir++) {
|
|
|
|
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
|
|
|
|
r->nr_accesses = accesses[it][ir];
|
2023-09-15 02:52:48 +00:00
|
|
|
r->nr_accesses_bp = accesses[it][ir] * 10000;
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_add_region(r, t);
|
|
|
|
}
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
kdamond_reset_aggregated(ctx);
|
|
|
|
it = 0;
|
|
|
|
damon_for_each_target(t, ctx) {
|
|
|
|
ir = 0;
|
|
|
|
/* '->nr_accesses' should be zeroed */
|
|
|
|
damon_for_each_region(r, t) {
|
|
|
|
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
|
|
|
|
ir++;
|
|
|
|
}
|
|
|
|
/* regions should be preserved */
|
|
|
|
KUNIT_EXPECT_EQ(test, 3, ir);
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
/* targets also should be preserved */
|
|
|
|
KUNIT_EXPECT_EQ(test, 3, it);
|
|
|
|
|
|
|
|
damon_destroy_ctx(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void damon_test_split_at(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_ctx *c = damon_new_ctx();
|
|
|
|
struct damon_target *t;
|
2023-11-19 17:15:29 +00:00
|
|
|
struct damon_region *r, *r_new;
|
2021-09-07 19:57:09 -07:00
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
r = damon_new_region(0, 100);
|
2023-11-19 17:15:29 +00:00
|
|
|
r->nr_accesses_bp = 420000;
|
|
|
|
r->nr_accesses = 42;
|
|
|
|
r->last_nr_accesses = 15;
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_add_region(r, t);
|
2022-08-13 23:19:03 +08:00
|
|
|
damon_split_region_at(t, r, 25);
|
2021-09-07 19:57:09 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
|
|
|
|
|
2023-11-19 17:15:29 +00:00
|
|
|
r_new = damon_next_region(r);
|
|
|
|
KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
|
|
|
|
KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
|
2021-09-07 19:57:09 -07:00
|
|
|
|
|
|
|
damon_free_target(t);
|
|
|
|
damon_destroy_ctx(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void damon_test_merge_two(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_target *t;
|
|
|
|
struct damon_region *r, *r2, *r3;
|
|
|
|
int i;
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
r = damon_new_region(0, 100);
|
|
|
|
r->nr_accesses = 10;
|
2023-09-15 02:52:48 +00:00
|
|
|
r->nr_accesses_bp = 100000;
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_add_region(r, t);
|
|
|
|
r2 = damon_new_region(100, 300);
|
|
|
|
r2->nr_accesses = 20;
|
2023-09-15 02:52:48 +00:00
|
|
|
r2->nr_accesses_bp = 200000;
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_add_region(r2, t);
|
|
|
|
|
|
|
|
damon_merge_two_regions(t, r, r2);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
damon_for_each_region(r3, t) {
|
|
|
|
KUNIT_EXPECT_PTR_EQ(test, r, r3);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
KUNIT_EXPECT_EQ(test, i, 1);
|
|
|
|
|
|
|
|
damon_free_target(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
|
|
|
|
{
|
|
|
|
struct damon_region *r;
|
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
|
|
damon_for_each_region(r, t) {
|
|
|
|
if (i++ == idx)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void damon_test_merge_regions_of(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_target *t;
|
|
|
|
struct damon_region *r;
|
|
|
|
unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
|
|
|
|
unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
|
|
|
|
unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
|
|
|
|
|
|
|
|
unsigned long saddrs[] = {0, 114, 130, 156, 170};
|
|
|
|
unsigned long eaddrs[] = {112, 130, 156, 170, 230};
|
|
|
|
int i;
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(sa); i++) {
|
|
|
|
r = damon_new_region(sa[i], ea[i]);
|
|
|
|
r->nr_accesses = nrs[i];
|
2023-09-15 02:52:48 +00:00
|
|
|
r->nr_accesses_bp = nrs[i] * 10000;
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_add_region(r, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
damon_merge_regions_of(t, 9, 9999);
|
|
|
|
/* 0-112, 114-130, 130-156, 156-170 */
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
|
|
|
|
for (i = 0; i < 5; i++) {
|
|
|
|
r = __nth_region_of(t, i);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
|
|
|
|
}
|
|
|
|
damon_free_target(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void damon_test_split_regions_of(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_ctx *c = damon_new_ctx();
|
|
|
|
struct damon_target *t;
|
|
|
|
struct damon_region *r;
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
r = damon_new_region(0, 22);
|
|
|
|
damon_add_region(r, t);
|
2022-08-13 23:19:03 +08:00
|
|
|
damon_split_regions_of(t, 2);
|
2021-10-28 14:36:33 -07:00
|
|
|
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_free_target(t);
|
|
|
|
|
mm/damon: remove the target id concept
DAMON asks each monitoring target ('struct damon_target') to have one
'unsigned long' integer called 'id', which should be unique among the
targets of same monitoring context. Meaning of it is, however, totally up
to the monitoring primitives that registered to the monitoring context.
For example, the virtual address spaces monitoring primitives treats the
id as a 'struct pid' pointer.
This makes the code flexible, but ugly, not well-documented, and
type-unsafe[1]. Also, identification of each target can be done via its
index. For the reason, this commit removes the concept and uses clear
type definition. For now, only 'struct pid' pointer is used for the
virtual address spaces monitoring. If DAMON is extended in future so that
we need to put another identifier field in the struct, we will use a union
for such primitives-dependent fields and document which primitives are
using which type.
[1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/
Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-22 14:48:40 -07:00
|
|
|
t = damon_new_target();
|
2021-09-07 19:57:09 -07:00
|
|
|
r = damon_new_region(0, 220);
|
|
|
|
damon_add_region(r, t);
|
2022-08-13 23:19:03 +08:00
|
|
|
damon_split_regions_of(t, 4);
|
2021-10-28 14:36:33 -07:00
|
|
|
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
|
2021-09-07 19:57:09 -07:00
|
|
|
damon_free_target(t);
|
|
|
|
damon_destroy_ctx(c);
|
|
|
|
}
|
|
|
|
|
2022-04-29 14:37:00 -07:00
|
|
|
static void damon_test_ops_registration(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_ctx *c = damon_new_ctx();
|
2024-08-26 20:03:31 -07:00
|
|
|
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
|
2024-08-26 20:03:32 -07:00
|
|
|
bool need_cleanup = false;
|
2022-04-29 14:37:00 -07:00
|
|
|
|
2024-08-26 20:03:32 -07:00
|
|
|
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
|
|
|
|
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
|
|
|
|
bak.id = DAMON_OPS_VADDR;
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
|
|
|
|
need_cleanup = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DAMON_OPS_VADDR is ensured to be registered */
|
2022-04-29 14:37:00 -07:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
|
|
|
|
|
|
|
|
/* Double-registration is prohibited */
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
|
|
|
|
|
|
|
|
/* Unknown ops id cannot be registered */
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
|
|
|
|
|
|
|
|
/* Registration should success after unregistration */
|
|
|
|
mutex_lock(&damon_ops_lock);
|
|
|
|
bak = damon_registered_ops[DAMON_OPS_VADDR];
|
|
|
|
damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
|
|
|
|
mutex_unlock(&damon_ops_lock);
|
|
|
|
|
|
|
|
ops.id = DAMON_OPS_VADDR;
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
|
|
|
|
|
|
|
|
mutex_lock(&damon_ops_lock);
|
|
|
|
damon_registered_ops[DAMON_OPS_VADDR] = bak;
|
|
|
|
mutex_unlock(&damon_ops_lock);
|
|
|
|
|
|
|
|
/* Check double-registration failure again */
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
|
2023-09-18 20:09:51 +08:00
|
|
|
|
|
|
|
damon_destroy_ctx(c);
|
2024-08-26 20:03:32 -07:00
|
|
|
|
|
|
|
if (need_cleanup) {
|
|
|
|
mutex_lock(&damon_ops_lock);
|
|
|
|
damon_registered_ops[DAMON_OPS_VADDR] =
|
|
|
|
(struct damon_operations){};
|
|
|
|
mutex_unlock(&damon_ops_lock);
|
|
|
|
}
|
2022-04-29 14:37:00 -07:00
|
|
|
}
|
|
|
|
|
2022-09-09 20:28:57 +00:00
|
|
|
static void damon_test_set_regions(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_target *t = damon_new_target();
|
|
|
|
struct damon_region *r1 = damon_new_region(4, 16);
|
|
|
|
struct damon_region *r2 = damon_new_region(24, 32);
|
|
|
|
struct damon_addr_range range = {.start = 8, .end = 28};
|
|
|
|
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
|
|
|
|
int expect_idx = 0;
|
|
|
|
struct damon_region *r;
|
|
|
|
|
|
|
|
damon_add_region(r1, t);
|
|
|
|
damon_add_region(r2, t);
|
|
|
|
damon_set_regions(t, &range, 1);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
|
|
|
|
damon_for_each_region(r, t) {
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
|
|
|
|
}
|
2025-07-12 12:50:11 -07:00
|
|
|
damon_destroy_target(t, NULL);
|
2022-09-09 20:28:57 +00:00
|
|
|
}
|
|
|
|
|
2023-12-13 19:03:34 +00:00
|
|
|
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_attrs attrs = {
|
|
|
|
.sample_interval = 10,
|
|
|
|
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
|
|
|
|
};
|
|
|
|
|
2024-09-05 09:24:23 -07:00
|
|
|
/*
|
|
|
|
* In some cases such as 32bit architectures where UINT_MAX is
|
|
|
|
* ULONG_MAX, attrs.aggr_interval becomes zero. Calling
|
|
|
|
* damon_nr_accesses_to_accesses_bp() in the case will cause
|
|
|
|
* divide-by-zero. Such case is prohibited in normal execution since
|
|
|
|
* the caution is documented on the comment for the function, and
|
|
|
|
* damon_update_monitoring_results() does the check. Skip the test in
|
|
|
|
* the case.
|
|
|
|
*/
|
|
|
|
if (!attrs.aggr_interval)
|
|
|
|
kunit_skip(test, "aggr_interval is zero.");
|
|
|
|
|
2023-12-13 19:03:34 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
|
|
|
|
}
|
|
|
|
|
2023-01-19 01:38:31 +00:00
|
|
|
static void damon_test_update_monitoring_result(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_attrs old_attrs = {
|
|
|
|
.sample_interval = 10, .aggr_interval = 1000,};
|
|
|
|
struct damon_attrs new_attrs;
|
|
|
|
struct damon_region *r = damon_new_region(3, 7);
|
|
|
|
|
|
|
|
r->nr_accesses = 15;
|
2023-09-15 02:52:48 +00:00
|
|
|
r->nr_accesses_bp = 150000;
|
2023-01-19 01:38:31 +00:00
|
|
|
r->age = 20;
|
|
|
|
|
|
|
|
new_attrs = (struct damon_attrs){
|
|
|
|
.sample_interval = 100, .aggr_interval = 10000,};
|
mm/damon/core: make damon_set_attrs() be safe to be called from damon_call()
Currently all DAMON kernel API callers do online DAMON parameters commit
from damon_callback->after_aggregation because only those are safe place
to call the DAMON monitoring attributes update function, namely
damon_set_attrs().
Because damon_callback hooks provide no synchronization, the callers work
in asynchronous ways or implement their own inefficient and complicated
synchronization mechanisms. It also means online DAMON parameters commit
can take up to one aggregation interval. On large systems having long
aggregation intervals, that can be too slow. The synchronization can be
done in more efficient and simple way while removing the latency
constraint if it can be done using damon_call().
The fact that damon_call() can be executed in the middle of the
aggregation makes damon_set_attrs() unsafe to be called from it, though.
Two real problems can occur in the case. First, converting the not yet
completely aggregated nr_accesses for new user-set intervals can arguably
degrade the accuracy or at least make the logic complicated. Second,
kdamond_reset_aggregated() will not be called after the monitoring results
update, so next aggregation starts from unclean state. This can result in
inconsistent and unexpected nr_accesses_bp.
Make it safe as follows. Catch the middle-of-the-aggregation case from
damon_set_attrs() by checking the passed_sample_intervals and
next_aggregationsis of the context. And pass the information to
nr_accesses conversion logic. The logic works as before if it is not the
case (called after the current aggregation is completed). If it is the
case (committing parameters in the middle of the aggregation), it drops
the nr_accesses information that so far aggregated, and make the status
same to the beginning of this aggregation, but as if the last aggregation
was started with the updated sampling/aggregation intervals.
The middle-of-aggregastion check introduce yet another edge case, though.
This happens because kdamond_tune_intervals() can also call
damon_set_attrs() with the middle-of-aggregation check. Consider
damon_call() for parameters commit and kdamond_tune_intervals() are called
in same iteration of kdamond main loop. Because kdamond_tune_interval()
is called for aggregation intervals, it should be the end of the
aggregation. The first damon_set_attrs() call from kdamond_call()
understands it is the end of the aggregation and correctly handle it.
But, because the damon_set_attrs() updated next_aggregation_sis of the
context. Hence, the second damon_set_attrs() invocation from
kdamond_tune_interval() believes it is called in the middle of the
aggregation. It therefore resets aggregated information so far. After
that, kdamond_reset_interval() is called and double-reset the aggregated
information. Avoid this case, too, by setting the next_aggregation_sis
before kdamond_tune_intervals() is invoked.
Link: https://lkml.kernel.org/r/20250306175908.66300-4-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-03-06 09:58:58 -08:00
|
|
|
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
|
2023-01-19 01:38:31 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->age, 2);
|
|
|
|
|
|
|
|
new_attrs = (struct damon_attrs){
|
|
|
|
.sample_interval = 1, .aggr_interval = 1000};
|
mm/damon/core: make damon_set_attrs() be safe to be called from damon_call()
Currently all DAMON kernel API callers do online DAMON parameters commit
from damon_callback->after_aggregation because only those are safe place
to call the DAMON monitoring attributes update function, namely
damon_set_attrs().
Because damon_callback hooks provide no synchronization, the callers work
in asynchronous ways or implement their own inefficient and complicated
synchronization mechanisms. It also means online DAMON parameters commit
can take up to one aggregation interval. On large systems having long
aggregation intervals, that can be too slow. The synchronization can be
done in more efficient and simple way while removing the latency
constraint if it can be done using damon_call().
The fact that damon_call() can be executed in the middle of the
aggregation makes damon_set_attrs() unsafe to be called from it, though.
Two real problems can occur in the case. First, converting the not yet
completely aggregated nr_accesses for new user-set intervals can arguably
degrade the accuracy or at least make the logic complicated. Second,
kdamond_reset_aggregated() will not be called after the monitoring results
update, so next aggregation starts from unclean state. This can result in
inconsistent and unexpected nr_accesses_bp.
Make it safe as follows. Catch the middle-of-the-aggregation case from
damon_set_attrs() by checking the passed_sample_intervals and
next_aggregationsis of the context. And pass the information to
nr_accesses conversion logic. The logic works as before if it is not the
case (called after the current aggregation is completed). If it is the
case (committing parameters in the middle of the aggregation), it drops
the nr_accesses information that so far aggregated, and make the status
same to the beginning of this aggregation, but as if the last aggregation
was started with the updated sampling/aggregation intervals.
The middle-of-aggregastion check introduce yet another edge case, though.
This happens because kdamond_tune_intervals() can also call
damon_set_attrs() with the middle-of-aggregation check. Consider
damon_call() for parameters commit and kdamond_tune_intervals() are called
in same iteration of kdamond main loop. Because kdamond_tune_interval()
is called for aggregation intervals, it should be the end of the
aggregation. The first damon_set_attrs() call from kdamond_call()
understands it is the end of the aggregation and correctly handle it.
But, because the damon_set_attrs() updated next_aggregation_sis of the
context. Hence, the second damon_set_attrs() invocation from
kdamond_tune_interval() believes it is called in the middle of the
aggregation. It therefore resets aggregated information so far. After
that, kdamond_reset_interval() is called and double-reset the aggregated
information. Avoid this case, too, by setting the next_aggregation_sis
before kdamond_tune_intervals() is invoked.
Link: https://lkml.kernel.org/r/20250306175908.66300-4-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-03-06 09:58:58 -08:00
|
|
|
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
|
2023-01-19 01:38:31 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->age, 2);
|
|
|
|
|
|
|
|
new_attrs = (struct damon_attrs){
|
|
|
|
.sample_interval = 1, .aggr_interval = 100};
|
mm/damon/core: make damon_set_attrs() be safe to be called from damon_call()
Currently all DAMON kernel API callers do online DAMON parameters commit
from damon_callback->after_aggregation because only those are safe place
to call the DAMON monitoring attributes update function, namely
damon_set_attrs().
Because damon_callback hooks provide no synchronization, the callers work
in asynchronous ways or implement their own inefficient and complicated
synchronization mechanisms. It also means online DAMON parameters commit
can take up to one aggregation interval. On large systems having long
aggregation intervals, that can be too slow. The synchronization can be
done in more efficient and simple way while removing the latency
constraint if it can be done using damon_call().
The fact that damon_call() can be executed in the middle of the
aggregation makes damon_set_attrs() unsafe to be called from it, though.
Two real problems can occur in the case. First, converting the not yet
completely aggregated nr_accesses for new user-set intervals can arguably
degrade the accuracy or at least make the logic complicated. Second,
kdamond_reset_aggregated() will not be called after the monitoring results
update, so next aggregation starts from unclean state. This can result in
inconsistent and unexpected nr_accesses_bp.
Make it safe as follows. Catch the middle-of-the-aggregation case from
damon_set_attrs() by checking the passed_sample_intervals and
next_aggregationsis of the context. And pass the information to
nr_accesses conversion logic. The logic works as before if it is not the
case (called after the current aggregation is completed). If it is the
case (committing parameters in the middle of the aggregation), it drops
the nr_accesses information that so far aggregated, and make the status
same to the beginning of this aggregation, but as if the last aggregation
was started with the updated sampling/aggregation intervals.
The middle-of-aggregastion check introduce yet another edge case, though.
This happens because kdamond_tune_intervals() can also call
damon_set_attrs() with the middle-of-aggregation check. Consider
damon_call() for parameters commit and kdamond_tune_intervals() are called
in same iteration of kdamond main loop. Because kdamond_tune_interval()
is called for aggregation intervals, it should be the end of the
aggregation. The first damon_set_attrs() call from kdamond_call()
understands it is the end of the aggregation and correctly handle it.
But, because the damon_set_attrs() updated next_aggregation_sis of the
context. Hence, the second damon_set_attrs() invocation from
kdamond_tune_interval() believes it is called in the middle of the
aggregation. It therefore resets aggregated information so far. After
that, kdamond_reset_interval() is called and double-reset the aggregated
information. Avoid this case, too, by setting the next_aggregation_sis
before kdamond_tune_intervals() is invoked.
Link: https://lkml.kernel.org/r/20250306175908.66300-4-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-03-06 09:58:58 -08:00
|
|
|
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
|
2023-01-19 01:38:31 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->age, 20);
|
mm/damon/core-test: fix memory leak in damon_new_region()
Patch series "mm/damon/core-test: Fix memory leaks in core-test", v3.
There are a few memory leaks in core-test which are detected by kmemleak.
This patchset fixes the issues.
This patch (of 2):
When CONFIG_DAMON_KUNIT_TEST=y and making CONFIG_DEBUG_KMEMLEAK=y
and CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y, the below memory leak is detected.
The damon_region which is allocated by kmem_cache_alloc() in
damon_new_region() in damon_test_regions() and
damon_test_update_monitoring_result() are not freed.
So for damon_test_regions(), replace damon_del_region() call with
damon_destroy_region() so that it calls both damon_del_region() and
damon_free_region(), the latter will free the damon_region. For
damon_test_update_monitoring_result(), call damon_free_region() to
free it. After applying this patch, the following memory leak is never
detected.
unreferenced object 0xffff2b49c3edc000 (size 56):
comm "kunit_try_catch", pid 338, jiffies 4294895280 (age 557.084s)
hex dump (first 32 bytes):
01 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 49 2b ff ff ............I+..
backtrace:
[<0000000088e71769>] slab_post_alloc_hook+0xb8/0x368
[<00000000b528f67c>] kmem_cache_alloc+0x168/0x284
[<000000008603f022>] damon_new_region+0x28/0x54
[<00000000a3b8c64e>] damon_test_regions+0x38/0x270
[<00000000559c4801>] kunit_try_run_case+0x50/0xac
[<000000003932ed49>] kunit_generic_run_threadfn_adapter+0x20/0x2c
[<000000003c3e9211>] kthread+0x124/0x130
[<0000000028f85bdd>] ret_from_fork+0x10/0x20
unreferenced object 0xffff2b49c5b20000 (size 56):
comm "kunit_try_catch", pid 354, jiffies 4294895304 (age 556.988s)
hex dump (first 32 bytes):
03 00 00 00 00 00 00 00 07 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 96 00 00 00 49 2b ff ff ............I+..
backtrace:
[<0000000088e71769>] slab_post_alloc_hook+0xb8/0x368
[<00000000b528f67c>] kmem_cache_alloc+0x168/0x284
[<000000008603f022>] damon_new_region+0x28/0x54
[<00000000ca019f80>] damon_test_update_monitoring_result+0x18/0x34
[<00000000559c4801>] kunit_try_run_case+0x50/0xac
[<000000003932ed49>] kunit_generic_run_threadfn_adapter+0x20/0x2c
[<000000003c3e9211>] kthread+0x124/0x130
[<0000000028f85bdd>] ret_from_fork+0x10/0x20
Link: https://lkml.kernel.org/r/20230918120951.2230468-1-ruanjinjie@huawei.com
Link: https://lkml.kernel.org/r/20230918120951.2230468-2-ruanjinjie@huawei.com
Fixes: 17ccae8bb5c9 ("mm/damon: add kunit tests")
Fixes: f4c978b6594b ("mm/damon/core-test: add a test for damon_update_monitoring_results()")
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Brendan Higgins <brendan.higgins@linux.dev>
Cc: Feng Tang <feng.tang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-09-18 20:09:50 +08:00
|
|
|
|
|
|
|
damon_free_region(r);
|
2023-01-19 01:38:31 +00:00
|
|
|
}
|
|
|
|
|
2023-06-15 18:33:22 +00:00
|
|
|
static void damon_test_set_attrs(struct kunit *test)
|
|
|
|
{
|
2023-07-18 13:28:11 +08:00
|
|
|
struct damon_ctx *c = damon_new_ctx();
|
2023-06-15 18:33:22 +00:00
|
|
|
struct damon_attrs valid_attrs = {
|
|
|
|
.min_nr_regions = 10, .max_nr_regions = 1000,
|
|
|
|
.sample_interval = 5000, .aggr_interval = 100000,};
|
|
|
|
struct damon_attrs invalid_attrs;
|
|
|
|
|
2023-07-18 13:28:11 +08:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
|
2023-06-15 18:33:22 +00:00
|
|
|
|
|
|
|
invalid_attrs = valid_attrs;
|
|
|
|
invalid_attrs.min_nr_regions = 1;
|
2023-07-18 13:28:11 +08:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
|
2023-06-15 18:33:22 +00:00
|
|
|
|
|
|
|
invalid_attrs = valid_attrs;
|
|
|
|
invalid_attrs.max_nr_regions = 9;
|
2023-07-18 13:28:11 +08:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
|
2023-06-15 18:33:22 +00:00
|
|
|
|
|
|
|
invalid_attrs = valid_attrs;
|
|
|
|
invalid_attrs.aggr_interval = 4999;
|
2023-07-18 13:28:11 +08:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
|
2023-09-18 20:09:51 +08:00
|
|
|
|
|
|
|
damon_destroy_ctx(c);
|
2023-06-15 18:33:22 +00:00
|
|
|
}
|
|
|
|
|
2023-09-15 02:52:47 +00:00
|
|
|
static void damon_test_moving_sum(struct kunit *test)
|
|
|
|
{
|
|
|
|
unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
|
|
|
|
unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
|
|
|
|
unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
|
|
|
|
45000, 40000, 35000, 30000};
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(new_values); i++) {
|
|
|
|
mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
|
|
|
|
new_values[i]);
|
|
|
|
KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-29 20:37:33 +00:00
|
|
|
static void damos_test_new_filter(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damos_filter *filter;
|
|
|
|
|
2025-01-09 09:51:21 -08:00
|
|
|
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
|
2023-07-29 20:37:33 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
|
|
|
|
KUNIT_EXPECT_EQ(test, filter->matching, true);
|
|
|
|
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
|
|
|
|
KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
|
|
|
|
damos_destroy_filter(filter);
|
|
|
|
}
|
|
|
|
|
2023-08-02 21:43:02 +00:00
|
|
|
static void damos_test_filter_out(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damon_target *t;
|
|
|
|
struct damon_region *r, *r2;
|
|
|
|
struct damos_filter *f;
|
|
|
|
|
2025-01-09 09:51:21 -08:00
|
|
|
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
|
2023-08-02 21:43:02 +00:00
|
|
|
f->addr_range = (struct damon_addr_range){
|
|
|
|
.start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
|
|
|
|
|
|
|
|
t = damon_new_target();
|
|
|
|
r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
|
|
|
|
damon_add_region(r, t);
|
|
|
|
|
|
|
|
/* region in the range */
|
2025-01-09 09:51:19 -08:00
|
|
|
KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
|
2023-08-02 21:43:02 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
|
|
|
|
|
|
|
|
/* region before the range */
|
|
|
|
r->ar.start = DAMON_MIN_REGION * 1;
|
|
|
|
r->ar.end = DAMON_MIN_REGION * 2;
|
2025-01-09 09:51:19 -08:00
|
|
|
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
|
2023-08-02 21:43:02 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
|
|
|
|
|
|
|
|
/* region after the range */
|
|
|
|
r->ar.start = DAMON_MIN_REGION * 6;
|
|
|
|
r->ar.end = DAMON_MIN_REGION * 8;
|
2025-01-09 09:51:19 -08:00
|
|
|
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
|
2023-08-02 21:43:02 +00:00
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
|
|
|
|
|
|
|
|
/* region started before the range */
|
|
|
|
r->ar.start = DAMON_MIN_REGION * 1;
|
|
|
|
r->ar.end = DAMON_MIN_REGION * 4;
|
2025-01-09 09:51:19 -08:00
|
|
|
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
|
2023-08-02 21:43:02 +00:00
|
|
|
/* filter should have split the region */
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
|
|
|
|
r2 = damon_next_region(r);
|
|
|
|
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
|
|
|
|
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
|
|
|
|
damon_destroy_region(r2, t);
|
|
|
|
|
|
|
|
/* region started in the range */
|
|
|
|
r->ar.start = DAMON_MIN_REGION * 2;
|
|
|
|
r->ar.end = DAMON_MIN_REGION * 8;
|
2025-01-09 09:51:19 -08:00
|
|
|
KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
|
2023-08-02 21:43:02 +00:00
|
|
|
/* filter should have split the region */
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
|
|
|
|
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
|
|
|
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
|
|
|
|
r2 = damon_next_region(r);
|
|
|
|
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
|
|
|
|
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
|
|
|
|
damon_destroy_region(r2, t);
|
|
|
|
|
|
|
|
damon_free_target(t);
|
|
|
|
damos_free_filter(f);
|
|
|
|
}
|
|
|
|
|
2023-11-30 02:36:48 +00:00
|
|
|
static void damon_test_feed_loop_next_input(struct kunit *test)
|
|
|
|
{
|
|
|
|
unsigned long last_input = 900000, current_score = 200;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If current score is lower than the goal, which is always 10,000
|
|
|
|
* (read the comment on damon_feed_loop_next_input()'s comment), next
|
|
|
|
* input should be higher than the last input.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_GT(test,
|
|
|
|
damon_feed_loop_next_input(last_input, current_score),
|
|
|
|
last_input);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If current score is higher than the goal, next input should be lower
|
|
|
|
* than the last input.
|
|
|
|
*/
|
|
|
|
current_score = 250000000;
|
|
|
|
KUNIT_EXPECT_LT(test,
|
|
|
|
damon_feed_loop_next_input(last_input, current_score),
|
|
|
|
last_input);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The next input depends on the distance between the current score and
|
|
|
|
* the goal
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_GT(test,
|
|
|
|
damon_feed_loop_next_input(last_input, 200),
|
|
|
|
damon_feed_loop_next_input(last_input, 2000));
|
|
|
|
}
|
|
|
|
|
2025-05-12 17:27:13 -07:00
|
|
|
static void damon_test_set_filters_default_reject(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct damos scheme;
|
|
|
|
struct damos_filter *target_filter, *anon_filter;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&scheme.filters);
|
|
|
|
INIT_LIST_HEAD(&scheme.ops_filters);
|
|
|
|
|
|
|
|
damos_set_filters_default_reject(&scheme);
|
|
|
|
/*
|
|
|
|
* No filter is installed. Allow by default on both core and ops layer
|
|
|
|
* filtering stages, since there are no filters at all.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
|
|
|
|
|
|
|
|
target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
|
|
|
|
damos_add_filter(&scheme, target_filter);
|
|
|
|
damos_set_filters_default_reject(&scheme);
|
|
|
|
/*
|
|
|
|
* A core-handled allow-filter is installed.
|
|
|
|
* Rejct by default on core layer filtering stage due to the last
|
|
|
|
* core-layer-filter's behavior.
|
|
|
|
* Allow by default on ops layer filtering stage due to the absence of
|
|
|
|
* ops layer filters.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true);
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
|
|
|
|
|
|
|
|
target_filter->allow = false;
|
|
|
|
damos_set_filters_default_reject(&scheme);
|
|
|
|
/*
|
|
|
|
* A core-handled reject-filter is installed.
|
|
|
|
* Allow by default on core layer filtering stage due to the last
|
|
|
|
* core-layer-filter's behavior.
|
|
|
|
* Allow by default on ops layer filtering stage due to the absence of
|
|
|
|
* ops layer filters.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
|
|
|
|
|
|
|
|
anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
|
|
|
|
damos_add_filter(&scheme, anon_filter);
|
|
|
|
|
|
|
|
damos_set_filters_default_reject(&scheme);
|
|
|
|
/*
|
|
|
|
* A core-handled reject-filter and ops-handled allow-filter are installed.
|
|
|
|
* Allow by default on core layer filtering stage due to the existence
|
|
|
|
* of the ops-handled filter.
|
|
|
|
* Reject by default on ops layer filtering stage due to the last
|
|
|
|
* ops-layer-filter's behavior.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
|
|
|
|
|
|
|
|
target_filter->allow = true;
|
|
|
|
damos_set_filters_default_reject(&scheme);
|
|
|
|
/*
|
|
|
|
* A core-handled allow-filter and ops-handled allow-filter are
|
|
|
|
* installed.
|
|
|
|
* Allow by default on core layer filtering stage due to the existence
|
|
|
|
* of the ops-handled filter.
|
|
|
|
* Reject by default on ops layer filtering stage due to the last
|
|
|
|
* ops-layer-filter's behavior.
|
|
|
|
*/
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
|
|
|
|
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
|
|
|
|
}
|
|
|
|
|
2021-09-07 19:57:09 -07:00
|
|
|
static struct kunit_case damon_test_cases[] = {
|
|
|
|
KUNIT_CASE(damon_test_target),
|
|
|
|
KUNIT_CASE(damon_test_regions),
|
|
|
|
KUNIT_CASE(damon_test_aggregate),
|
|
|
|
KUNIT_CASE(damon_test_split_at),
|
|
|
|
KUNIT_CASE(damon_test_merge_two),
|
|
|
|
KUNIT_CASE(damon_test_merge_regions_of),
|
|
|
|
KUNIT_CASE(damon_test_split_regions_of),
|
2022-04-29 14:37:00 -07:00
|
|
|
KUNIT_CASE(damon_test_ops_registration),
|
2022-09-09 20:28:57 +00:00
|
|
|
KUNIT_CASE(damon_test_set_regions),
|
2023-12-13 19:03:34 +00:00
|
|
|
KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
|
2023-01-19 01:38:31 +00:00
|
|
|
KUNIT_CASE(damon_test_update_monitoring_result),
|
2023-06-15 18:33:22 +00:00
|
|
|
KUNIT_CASE(damon_test_set_attrs),
|
2023-09-15 02:52:47 +00:00
|
|
|
KUNIT_CASE(damon_test_moving_sum),
|
2023-07-29 20:37:33 +00:00
|
|
|
KUNIT_CASE(damos_test_new_filter),
|
2023-08-02 21:43:02 +00:00
|
|
|
KUNIT_CASE(damos_test_filter_out),
|
2023-11-30 02:36:48 +00:00
|
|
|
KUNIT_CASE(damon_test_feed_loop_next_input),
|
2025-05-12 17:27:13 -07:00
|
|
|
KUNIT_CASE(damon_test_set_filters_default_reject),
|
2021-09-07 19:57:09 -07:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kunit_suite damon_test_suite = {
|
|
|
|
.name = "damon",
|
|
|
|
.test_cases = damon_test_cases,
|
|
|
|
};
|
|
|
|
kunit_test_suite(damon_test_suite);
|
|
|
|
|
|
|
|
#endif /* _DAMON_CORE_TEST_H */
|
|
|
|
|
|
|
|
#endif /* CONFIG_DAMON_KUNIT_TEST */
|