2022-04-28 23:16:11 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2023-04-12 12:41:20 -04:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <err.h>
|
2024-03-29 18:58:10 +00:00
|
|
|
#include <strings.h> /* ffsl() */
|
2023-04-12 12:41:20 -04:00
|
|
|
#include <unistd.h> /* _SC_PAGESIZE */
|
|
|
|
|
2023-04-12 12:42:27 -04:00
|
|
|
#define BIT_ULL(nr) (1ULL << (nr))
|
|
|
|
#define PM_SOFT_DIRTY BIT_ULL(55)
|
|
|
|
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
|
|
|
|
#define PM_UFFD_WP BIT_ULL(57)
|
|
|
|
#define PM_FILE BIT_ULL(61)
|
|
|
|
#define PM_SWAP BIT_ULL(62)
|
|
|
|
#define PM_PRESENT BIT_ULL(63)
|
|
|
|
|
2023-04-12 12:41:20 -04:00
|
|
|
extern unsigned int __page_size;
|
|
|
|
extern unsigned int __page_shift;
|
|
|
|
|
|
|
|
static inline unsigned int psize(void)
|
|
|
|
{
|
|
|
|
if (!__page_size)
|
|
|
|
__page_size = sysconf(_SC_PAGESIZE);
|
|
|
|
return __page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int pshift(void)
|
|
|
|
{
|
|
|
|
if (!__page_shift)
|
|
|
|
__page_shift = (ffsl(psize()) - 1);
|
|
|
|
return __page_shift;
|
|
|
|
}
|
2022-04-28 23:16:11 -07:00
|
|
|
|
|
|
|
uint64_t pagemap_get_entry(int fd, char *start);
|
|
|
|
bool pagemap_is_softdirty(int fd, char *start);
|
2022-09-27 13:01:14 +02:00
|
|
|
bool pagemap_is_swapped(int fd, char *start);
|
2022-09-27 13:01:15 +02:00
|
|
|
bool pagemap_is_populated(int fd, char *start);
|
selftests/vm: add KSM unmerge tests
Patch series "mm/ksm: break_ksm() cleanups and fixes", v2.
This series cleans up and fixes break_ksm(). In summary, we no longer use
fake write faults to break COW but instead FAULT_FLAG_UNSHARE. Further,
we move away from using follow_page() --- that we can hopefully remove
completely at one point --- and use new walk_page_range_vma() instead.
Fortunately, we can get rid of VM_FAULT_WRITE and FOLL_MIGRATION in common
code now.
Extend the existing ksm tests by an unmerge benchmark, and a some new
unmerge tests.
Also, add a selftest to measure MADV_UNMERGEABLE performance. In my setup
(AMD Ryzen 9 3900X), running the KSM selftest to test unmerge performance
on 2 GiB (taskset 0x8 ./ksm_tests -D -s 2048), this results in a
performance degradation of ~6% -- 7% (old: ~5250 MiB/s, new: ~4900 MiB/s).
I don't think we particularly care for now, but it's good to be aware of
the implication.
This patch (of 9):
Let's add three unmerge tests (MADV_UNMERGEABLE unmerging all pages in the
range).
test_unmerge(): basic unmerge tests
test_unmerge_discarded(): have some pte_none() entries in the range
test_unmerge_uffd_wp(): protect the merged pages using uffd-wp
ksm_tests.c currently contains a mixture of benchmarks and tests, whereby
each test is carried out by executing the ksm_tests binary with specific
parameters. Let's add new ksm_functional_tests.c that performs multiple,
smaller functional tests all at once.
Link: https://lkml.kernel.org/r/20221021101141.84170-1-david@redhat.com
Link: https://lkml.kernel.org/r/20221021101141.84170-5-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-10-21 12:11:36 +02:00
|
|
|
unsigned long pagemap_get_pfn(int fd, char *start);
|
2022-04-28 23:16:11 -07:00
|
|
|
void clear_softdirty(void);
|
2022-09-22 15:40:41 -07:00
|
|
|
bool check_for_pattern(FILE *fp, const char *pattern, char *buf, size_t len);
|
2022-04-28 23:16:11 -07:00
|
|
|
uint64_t read_pmd_pagesize(void);
|
2022-09-22 15:40:41 -07:00
|
|
|
bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size);
|
2022-09-22 15:40:43 -07:00
|
|
|
bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size);
|
|
|
|
bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
|
2023-04-12 12:41:20 -04:00
|
|
|
int64_t allocate_transhuge(void *ptr, int pagemap_fd);
|
2023-04-12 12:42:23 -04:00
|
|
|
unsigned long default_huge_page_size(void);
|
2023-05-19 12:27:21 +02:00
|
|
|
int detect_hugetlb_page_sizes(size_t sizes[], int max);
|
2023-04-12 12:41:20 -04:00
|
|
|
|
2023-04-12 12:42:47 -04:00
|
|
|
int uffd_register(int uffd, void *addr, uint64_t len,
|
|
|
|
bool miss, bool wp, bool minor);
|
|
|
|
int uffd_unregister(int uffd, void *addr, uint64_t len);
|
2023-04-12 12:44:04 -04:00
|
|
|
int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
|
|
|
|
bool miss, bool wp, bool minor, uint64_t *ioctls);
|
2023-10-05 09:39:21 -07:00
|
|
|
unsigned long get_free_hugepages(void);
|
2023-04-12 12:42:47 -04:00
|
|
|
|
2023-04-12 12:41:20 -04:00
|
|
|
/*
|
|
|
|
* On ppc64 this will only work with radix 2M hugepage size
|
|
|
|
*/
|
|
|
|
#define HPAGE_SHIFT 21
|
|
|
|
#define HPAGE_SIZE (1 << HPAGE_SHIFT)
|
|
|
|
|
|
|
|
#define PAGEMAP_PRESENT(ent) (((ent) & (1ull << 63)) != 0)
|
|
|
|
#define PAGEMAP_PFN(ent) ((ent) & ((1ull << 55) - 1))
|