mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

- The 4 patch series "mm: ksm: prevent KSM from breaking merging of new VMAs" from Lorenzo Stoakes addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. - The 4 patch series "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" from SeongJae Park adds a new kernel module which simplifies the setup and usage of DAMON in production environments. - The 6 patch series "stop passing a writeback_control to swap/shmem writeout" from Christoph Hellwig is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. - The 7 patch series "drivers/base/node.c: optimization and cleanups" from Donet Tom contains largely uncorrelated cleanups to the NUMA node setup and management code. - The 4 patch series "mm: userfaultfd: assorted fixes and cleanups" from Tal Zussman does some maintenance work on the userfaultfd code. - The 5 patch series "Readahead tweaks for larger folios" from Ryan Roberts implements some tuneups for pagecache readahead when it is reading into order>0 folios. - The 4 patch series "selftests/mm: Tweaks to the cow test" from Mark Brown provides some cleanups and consistency improvements to the selftests code. - The 4 patch series "Optimize mremap() for large folios" from Dev Jain does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. - The 5 patch series "Remove zero_user()" from Matthew Wilcox expunges zero_user() in favor of the more modern memzero_page(). - The 3 patch series "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" from David Hildenbrand addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. - The 3 patch series "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" from SeongJae Park provides some cleanup and consolidation work in DAMON. - The 3 patch series "use vm_flags_t consistently" from Lorenzo Stoakes uses vm_flags_t in places where we were inappropriately using other types. - The 3 patch series "mm/memfd: Reserve hugetlb folios before allocation" from Vivek Kasireddy increases the reliability of large page allocation in the memfd code. - The 14 patch series "mm: Remove pXX_devmap page table bit and pfn_t type" from Alistair Popple removes several now-unneeded PFN_* flags. - The 5 patch series "mm/damon: decouple sysfs from core" from SeongJae Park implememnts some cleanup and maintainability work in the DAMON sysfs layer. - The 5 patch series "madvise cleanup" from Lorenzo Stoakes does quite a lot of cleanup/maintenance work in the madvise() code. - The 4 patch series "madvise anon_name cleanups" from Vlastimil Babka provides additional cleanups on top or Lorenzo's effort. - The 11 patch series "Implement numa node notifier" from Oscar Salvador creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. - The 6 patch series "Make MIGRATE_ISOLATE a standalone bit" from Zi Yan cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. - The 5 patch series "selftests/damon: add python and drgn based DAMON sysfs functionality tests" from SeongJae Park adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. - The 5 patch series "Misc rework on hugetlb faulting path" from Oscar Salvador fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. - The 3 patch series "cma: factor out allocation logic from __cma_declare_contiguous_nid" from Mike Rapoport rationalizes and cleans up the highmem-specific code in the CMA allocator. - The 28 patch series "mm/migration: rework movable_ops page migration (part 1)" from David Hildenbrand provides cleanups and future-preparedness to the migration code. - The 2 patch series "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" from SeongJae Park adds some tracepoints to some DAMON auto-tuning code. - The 6 patch series "mm/damon: fix misc bugs in DAMON modules" from SeongJae Park does that. - The 6 patch series "mm/damon: misc cleanups" from SeongJae Park also does what it claims. - The 4 patch series "mm: folio_pte_batch() improvements" from David Hildenbrand cleans up the large folio PTE batching code. - The 13 patch series "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" from SeongJae Park facilitates dynamic alteration of DAMON's inter-node allocation policy. - The 3 patch series "Remove unmap_and_put_page()" from Vishal Moola provides a couple of page->folio conversions. - The 4 patch series "mm: per-node proactive reclaim" from Davidlohr Bueso implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. - The 14 patch series "mm/damon: remove damon_callback" from SeongJae Park replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. - The 10 patch series "mm/mremap: permit mremap() move of multiple VMAs" from Lorenzo Stoakes implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. - The 3 patch series "drop hugetlb_free_pgd_range()" from Anthony Yznaga switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). - The 4 patch series "mm/damon/sysfs: support periodic and automated stats update" from SeongJae Park augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. - The 4 patch series "Some randome fixes and cleanups to swapfile" from Kemeng Shi does what is claims. - The 4 patch series "mm: introduce snapshot_page" from Luiz Capitulino and David Hildenbrand provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. - The 6 patch series "use per-vma locks for /proc/pid/maps reads" from Suren Baghdasaryan addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. - The 6 patch series "__folio_split() clean up" from Zi Yan cleans up __folio_split()! - The 7 patch series "Optimize mprotect() for large folios" from Dev Jain provides some quite large (>3x) speedups to mprotect() when dealing with large folios. - The 2 patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" from wang lian does some cleanup work in the selftests code. - The 3 patch series "tools/testing: expand mremap testing" from Lorenzo Stoakes extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. - The 22 patch series "selftests/damon/sysfs.py: test all parameters" from SeongJae Park extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaIqcCgAKCRDdBJ7gKXxA jkVBAQCCn9DR1QP0CRk961ot0cKzOgioSc0aA03DPb2KXRt2kQEAzDAz0ARurFhL 8BzbvI0c+4tntHLXvIlrC33n9KWAOQM= =XsFy -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: "As usual, many cleanups. The below blurbiage describes 42 patchsets. 21 of those are partially or fully cleanup work. "cleans up", "cleanup", "maintainability", "rationalizes", etc. I never knew the MM code was so dirty. "mm: ksm: prevent KSM from breaking merging of new VMAs" (Lorenzo Stoakes) addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" (SeongJae Park) adds a new kernel module which simplifies the setup and usage of DAMON in production environments. "stop passing a writeback_control to swap/shmem writeout" (Christoph Hellwig) is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. "drivers/base/node.c: optimization and cleanups" (Donet Tom) contains largely uncorrelated cleanups to the NUMA node setup and management code. "mm: userfaultfd: assorted fixes and cleanups" (Tal Zussman) does some maintenance work on the userfaultfd code. "Readahead tweaks for larger folios" (Ryan Roberts) implements some tuneups for pagecache readahead when it is reading into order>0 folios. "selftests/mm: Tweaks to the cow test" (Mark Brown) provides some cleanups and consistency improvements to the selftests code. "Optimize mremap() for large folios" (Dev Jain) does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. "Remove zero_user()" (Matthew Wilcox) expunges zero_user() in favor of the more modern memzero_page(). "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" (David Hildenbrand) addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" (SeongJae Park) provides some cleanup and consolidation work in DAMON. "use vm_flags_t consistently" (Lorenzo Stoakes) uses vm_flags_t in places where we were inappropriately using other types. "mm/memfd: Reserve hugetlb folios before allocation" (Vivek Kasireddy) increases the reliability of large page allocation in the memfd code. "mm: Remove pXX_devmap page table bit and pfn_t type" (Alistair Popple) removes several now-unneeded PFN_* flags. "mm/damon: decouple sysfs from core" (SeongJae Park) implememnts some cleanup and maintainability work in the DAMON sysfs layer. "madvise cleanup" (Lorenzo Stoakes) does quite a lot of cleanup/maintenance work in the madvise() code. "madvise anon_name cleanups" (Vlastimil Babka) provides additional cleanups on top or Lorenzo's effort. "Implement numa node notifier" (Oscar Salvador) creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. "Make MIGRATE_ISOLATE a standalone bit" (Zi Yan) cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. "selftests/damon: add python and drgn based DAMON sysfs functionality tests" (SeongJae Park) adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. "Misc rework on hugetlb faulting path" (Oscar Salvador) fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. "cma: factor out allocation logic from __cma_declare_contiguous_nid" (Mike Rapoport) rationalizes and cleans up the highmem-specific code in the CMA allocator. "mm/migration: rework movable_ops page migration (part 1)" (David Hildenbrand) provides cleanups and future-preparedness to the migration code. "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" (SeongJae Park) adds some tracepoints to some DAMON auto-tuning code. "mm/damon: fix misc bugs in DAMON modules" (SeongJae Park) does that. "mm/damon: misc cleanups" (SeongJae Park) also does what it claims. "mm: folio_pte_batch() improvements" (David Hildenbrand) cleans up the large folio PTE batching code. "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" (SeongJae Park) facilitates dynamic alteration of DAMON's inter-node allocation policy. "Remove unmap_and_put_page()" (Vishal Moola) provides a couple of page->folio conversions. "mm: per-node proactive reclaim" (Davidlohr Bueso) implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. "mm/damon: remove damon_callback" (SeongJae Park) replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. "mm/mremap: permit mremap() move of multiple VMAs" (Lorenzo Stoakes) implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. "drop hugetlb_free_pgd_range()" (Anthony Yznaga) switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). "mm/damon/sysfs: support periodic and automated stats update" (SeongJae Park) augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. "Some randome fixes and cleanups to swapfile" (Kemeng Shi) does what is claims. "mm: introduce snapshot_page" (Luiz Capitulino and David Hildenbrand) provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. "use per-vma locks for /proc/pid/maps reads" (Suren Baghdasaryan) addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. "__folio_split() clean up" (Zi Yan) cleans up __folio_split()! "Optimize mprotect() for large folios" (Dev Jain) provides some quite large (>3x) speedups to mprotect() when dealing with large folios. "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" (wang lian) does some cleanup work in the selftests code. "tools/testing: expand mremap testing" (Lorenzo Stoakes) extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. "selftests/damon/sysfs.py: test all parameters" (SeongJae Park) extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset" * tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (370 commits) MAINTAINERS: add missing headers to mempory policy & migration section MAINTAINERS: add missing file to cgroup section MAINTAINERS: add MM MISC section, add missing files to MISC and CORE MAINTAINERS: add missing zsmalloc file MAINTAINERS: add missing files to page alloc section MAINTAINERS: add missing shrinker files MAINTAINERS: move memremap.[ch] to hotplug section MAINTAINERS: add missing mm_slot.h file THP section MAINTAINERS: add missing interval_tree.c to memory mapping section MAINTAINERS: add missing percpu-internal.h file to per-cpu section mm/page_alloc: remove trace_mm_alloc_contig_migrate_range_info() selftests/damon: introduce _common.sh to host shared function selftests/damon/sysfs.py: test runtime reduction of DAMON parameters selftests/damon/sysfs.py: test non-default parameters runtime commit selftests/damon/sysfs.py: generalize DAMON context commit assertion selftests/damon/sysfs.py: generalize monitoring attributes commit assertion selftests/damon/sysfs.py: generalize DAMOS schemes commit assertion selftests/damon/sysfs.py: test DAMOS filters commitment selftests/damon/sysfs.py: generalize DAMOS scheme commit assertion selftests/damon/sysfs.py: test DAMOS destinations commitment ...
428 lines
12 KiB
C
428 lines
12 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/* Internal procfs definitions
|
|
*
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/proc_ns.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/binfmts.h>
|
|
#include <linux/sched/coredump.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/mm.h>
|
|
|
|
struct ctl_table_header;
|
|
struct mempolicy;
|
|
|
|
/*
|
|
* This is not completely implemented yet. The idea is to
|
|
* create an in-memory tree (like the actual /proc filesystem
|
|
* tree) of these proc_dir_entries, so that we can dynamically
|
|
* add new files to /proc.
|
|
*
|
|
* parent/subdir are used for the directory structure (every /proc file has a
|
|
* parent, but "subdir" is empty for all non-directory entries).
|
|
* subdir_node is used to build the rb tree "subdir" of the parent.
|
|
*/
|
|
struct proc_dir_entry {
|
|
/*
|
|
* number of callers into module in progress;
|
|
* negative -> it's going away RSN
|
|
*/
|
|
atomic_t in_use;
|
|
refcount_t refcnt;
|
|
struct list_head pde_openers; /* who did ->open, but not ->release */
|
|
/* protects ->pde_openers and all struct pde_opener instances */
|
|
spinlock_t pde_unload_lock;
|
|
struct completion *pde_unload_completion;
|
|
const struct inode_operations *proc_iops;
|
|
union {
|
|
const struct proc_ops *proc_ops;
|
|
const struct file_operations *proc_dir_ops;
|
|
};
|
|
union {
|
|
const struct seq_operations *seq_ops;
|
|
int (*single_show)(struct seq_file *, void *);
|
|
};
|
|
proc_write_t write;
|
|
void *data;
|
|
unsigned int state_size;
|
|
unsigned int low_ino;
|
|
nlink_t nlink;
|
|
kuid_t uid;
|
|
kgid_t gid;
|
|
loff_t size;
|
|
struct proc_dir_entry *parent;
|
|
struct rb_root subdir;
|
|
struct rb_node subdir_node;
|
|
char *name;
|
|
umode_t mode;
|
|
u8 flags;
|
|
u8 namelen;
|
|
char inline_name[];
|
|
} __randomize_layout;
|
|
|
|
#define SIZEOF_PDE ( \
|
|
sizeof(struct proc_dir_entry) < 128 ? 128 : \
|
|
sizeof(struct proc_dir_entry) < 192 ? 192 : \
|
|
sizeof(struct proc_dir_entry) < 256 ? 256 : \
|
|
sizeof(struct proc_dir_entry) < 512 ? 512 : \
|
|
0)
|
|
#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
|
|
|
|
static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
|
|
{
|
|
return pde->flags & PROC_ENTRY_PERMANENT;
|
|
}
|
|
|
|
static inline void pde_make_permanent(struct proc_dir_entry *pde)
|
|
{
|
|
pde->flags |= PROC_ENTRY_PERMANENT;
|
|
}
|
|
|
|
static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde)
|
|
{
|
|
return pde->flags & PROC_ENTRY_proc_read_iter;
|
|
}
|
|
|
|
static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
|
|
{
|
|
#ifdef CONFIG_COMPAT
|
|
return pde->flags & PROC_ENTRY_proc_compat_ioctl;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
static inline bool pde_has_proc_lseek(const struct proc_dir_entry *pde)
|
|
{
|
|
return pde->flags & PROC_ENTRY_proc_lseek;
|
|
}
|
|
|
|
extern struct kmem_cache *proc_dir_entry_cache;
|
|
void pde_free(struct proc_dir_entry *pde);
|
|
|
|
union proc_op {
|
|
int (*proc_get_link)(struct dentry *, struct path *);
|
|
int (*proc_show)(struct seq_file *m,
|
|
struct pid_namespace *ns, struct pid *pid,
|
|
struct task_struct *task);
|
|
int lsmid;
|
|
};
|
|
|
|
struct proc_inode {
|
|
struct pid *pid;
|
|
unsigned int fd;
|
|
union proc_op op;
|
|
struct proc_dir_entry *pde;
|
|
struct ctl_table_header *sysctl;
|
|
const struct ctl_table *sysctl_entry;
|
|
struct hlist_node sibling_inodes;
|
|
const struct proc_ns_operations *ns_ops;
|
|
struct inode vfs_inode;
|
|
} __randomize_layout;
|
|
|
|
/*
|
|
* General functions
|
|
*/
|
|
static inline struct proc_inode *PROC_I(const struct inode *inode)
|
|
{
|
|
return container_of(inode, struct proc_inode, vfs_inode);
|
|
}
|
|
|
|
static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
|
{
|
|
return PROC_I(inode)->pde;
|
|
}
|
|
|
|
static inline struct pid *proc_pid(const struct inode *inode)
|
|
{
|
|
return PROC_I(inode)->pid;
|
|
}
|
|
|
|
static inline struct task_struct *get_proc_task(const struct inode *inode)
|
|
{
|
|
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
}
|
|
|
|
void task_dump_owner(struct task_struct *task, umode_t mode,
|
|
kuid_t *ruid, kgid_t *rgid);
|
|
|
|
unsigned name_to_int(const struct qstr *qstr);
|
|
/*
|
|
* Offset of the first process in the /proc root directory..
|
|
*/
|
|
#define FIRST_PROCESS_ENTRY 256
|
|
|
|
/* Worst case buffer size needed for holding an integer. */
|
|
#define PROC_NUMBUF 13
|
|
|
|
#ifdef CONFIG_PAGE_MAPCOUNT
|
|
/**
|
|
* folio_precise_page_mapcount() - Number of mappings of this folio page.
|
|
* @folio: The folio.
|
|
* @page: The page.
|
|
*
|
|
* The number of present user page table entries that reference this page
|
|
* as tracked via the RMAP: either referenced directly (PTE) or as part of
|
|
* a larger area that covers this page (e.g., PMD).
|
|
*
|
|
* Use this function only for the calculation of existing statistics
|
|
* (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount).
|
|
*
|
|
* Do not add new users.
|
|
*
|
|
* Returns: The number of mappings of this folio page. 0 for
|
|
* folios that are not mapped to user space or are not tracked via the RMAP
|
|
* (e.g., shared zeropage).
|
|
*/
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
struct page *page)
|
|
{
|
|
int mapcount = atomic_read(&page->_mapcount) + 1;
|
|
|
|
if (page_mapcount_is_type(mapcount))
|
|
mapcount = 0;
|
|
if (folio_test_large(folio))
|
|
mapcount += folio_entire_mapcount(folio);
|
|
|
|
return mapcount;
|
|
}
|
|
#else /* !CONFIG_PAGE_MAPCOUNT */
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
struct page *page)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif /* CONFIG_PAGE_MAPCOUNT */
|
|
|
|
/**
|
|
* folio_average_page_mapcount() - Average number of mappings per page in this
|
|
* folio
|
|
* @folio: The folio.
|
|
*
|
|
* The average number of user page table entries that reference each page in
|
|
* this folio as tracked via the RMAP: either referenced directly (PTE) or
|
|
* as part of a larger area that covers this page (e.g., PMD).
|
|
*
|
|
* The average is calculated by rounding to the nearest integer; however,
|
|
* to avoid duplicated code in current callers, the average is at least
|
|
* 1 if any page of the folio is mapped.
|
|
*
|
|
* Returns: The average number of mappings per page in this folio.
|
|
*/
|
|
static inline int folio_average_page_mapcount(struct folio *folio)
|
|
{
|
|
int mapcount, entire_mapcount, avg;
|
|
|
|
if (!folio_test_large(folio))
|
|
return atomic_read(&folio->_mapcount) + 1;
|
|
|
|
mapcount = folio_large_mapcount(folio);
|
|
if (unlikely(mapcount <= 0))
|
|
return 0;
|
|
entire_mapcount = folio_entire_mapcount(folio);
|
|
if (mapcount <= entire_mapcount)
|
|
return entire_mapcount;
|
|
mapcount -= entire_mapcount;
|
|
|
|
/* Round to closest integer ... */
|
|
avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio);
|
|
/* ... but return at least 1. */
|
|
return max_t(int, avg + entire_mapcount, 1);
|
|
}
|
|
/*
|
|
* array.c
|
|
*/
|
|
extern const struct file_operations proc_tid_children_operations;
|
|
|
|
extern void proc_task_name(struct seq_file *m, struct task_struct *p,
|
|
bool escape);
|
|
extern int proc_tid_stat(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
|
|
/*
|
|
* base.c
|
|
*/
|
|
extern const struct dentry_operations pid_dentry_operations;
|
|
extern int pid_getattr(struct mnt_idmap *, const struct path *,
|
|
struct kstat *, u32, unsigned int);
|
|
extern int proc_setattr(struct mnt_idmap *, struct dentry *,
|
|
struct iattr *);
|
|
extern void proc_pid_evict_inode(struct proc_inode *);
|
|
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
|
|
extern void pid_update_inode(struct task_struct *, struct inode *);
|
|
extern int pid_delete_dentry(const struct dentry *);
|
|
extern int proc_pid_readdir(struct file *, struct dir_context *);
|
|
struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
|
|
extern loff_t mem_lseek(struct file *, loff_t, int);
|
|
|
|
/* Lookups */
|
|
typedef struct dentry *instantiate_t(struct dentry *,
|
|
struct task_struct *, const void *);
|
|
bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
|
|
instantiate_t, struct task_struct *, const void *);
|
|
|
|
/*
|
|
* generic.c
|
|
*/
|
|
struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
|
|
struct proc_dir_entry **parent, void *data);
|
|
struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
|
struct proc_dir_entry *dp);
|
|
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
|
|
struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
|
|
extern int proc_readdir(struct file *, struct dir_context *);
|
|
int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
|
|
|
|
static inline void pde_get(struct proc_dir_entry *pde)
|
|
{
|
|
refcount_inc(&pde->refcnt);
|
|
}
|
|
extern void pde_put(struct proc_dir_entry *);
|
|
|
|
static inline bool is_empty_pde(const struct proc_dir_entry *pde)
|
|
{
|
|
return S_ISDIR(pde->mode) && !pde->proc_iops;
|
|
}
|
|
extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *);
|
|
|
|
/*
|
|
* inode.c
|
|
*/
|
|
struct pde_opener {
|
|
struct list_head lh;
|
|
struct file *file;
|
|
bool closing;
|
|
struct completion *c;
|
|
} __randomize_layout;
|
|
extern const struct inode_operations proc_link_inode_operations;
|
|
extern const struct inode_operations proc_pid_link_inode_operations;
|
|
extern const struct super_operations proc_sops;
|
|
|
|
void proc_init_kmemcache(void);
|
|
void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
|
|
void set_proc_pid_nlink(void);
|
|
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
|
|
extern void proc_entry_rundown(struct proc_dir_entry *);
|
|
|
|
/*
|
|
* proc_namespaces.c
|
|
*/
|
|
extern const struct inode_operations proc_ns_dir_inode_operations;
|
|
extern const struct file_operations proc_ns_dir_operations;
|
|
|
|
/*
|
|
* proc_net.c
|
|
*/
|
|
extern const struct file_operations proc_net_operations;
|
|
extern const struct inode_operations proc_net_inode_operations;
|
|
|
|
#ifdef CONFIG_NET
|
|
extern int proc_net_init(void);
|
|
#else
|
|
static inline int proc_net_init(void) { return 0; }
|
|
#endif
|
|
|
|
/*
|
|
* proc_self.c
|
|
*/
|
|
extern int proc_setup_self(struct super_block *);
|
|
|
|
/*
|
|
* proc_thread_self.c
|
|
*/
|
|
extern int proc_setup_thread_self(struct super_block *);
|
|
extern void proc_thread_self_init(void);
|
|
|
|
/*
|
|
* proc_sysctl.c
|
|
*/
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
extern int proc_sys_init(void);
|
|
extern void proc_sys_evict_inode(struct inode *inode,
|
|
struct ctl_table_header *head);
|
|
#else
|
|
static inline void proc_sys_init(void) { }
|
|
static inline void proc_sys_evict_inode(struct inode *inode,
|
|
struct ctl_table_header *head) { }
|
|
#endif
|
|
|
|
/*
|
|
* proc_tty.c
|
|
*/
|
|
#ifdef CONFIG_TTY
|
|
extern void proc_tty_init(void);
|
|
#else
|
|
static inline void proc_tty_init(void) {}
|
|
#endif
|
|
|
|
/*
|
|
* root.c
|
|
*/
|
|
extern struct proc_dir_entry proc_root;
|
|
|
|
extern void proc_self_init(void);
|
|
|
|
/*
|
|
* task_[no]mmu.c
|
|
*/
|
|
struct mem_size_stats;
|
|
struct proc_maps_private {
|
|
struct inode *inode;
|
|
struct task_struct *task;
|
|
struct mm_struct *mm;
|
|
struct vma_iterator iter;
|
|
loff_t last_pos;
|
|
#ifdef CONFIG_PER_VMA_LOCK
|
|
bool mmap_locked;
|
|
struct vm_area_struct *locked_vma;
|
|
#endif
|
|
#ifdef CONFIG_NUMA
|
|
struct mempolicy *task_mempolicy;
|
|
#endif
|
|
} __randomize_layout;
|
|
|
|
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
|
|
|
|
extern const struct file_operations proc_pid_maps_operations;
|
|
extern const struct file_operations proc_pid_numa_maps_operations;
|
|
extern const struct file_operations proc_pid_smaps_operations;
|
|
extern const struct file_operations proc_pid_smaps_rollup_operations;
|
|
extern const struct file_operations proc_clear_refs_operations;
|
|
extern const struct file_operations proc_pagemap_operations;
|
|
|
|
extern unsigned long task_vsize(struct mm_struct *);
|
|
extern unsigned long task_statm(struct mm_struct *,
|
|
unsigned long *, unsigned long *,
|
|
unsigned long *, unsigned long *);
|
|
extern void task_mem(struct seq_file *, struct mm_struct *);
|
|
|
|
extern const struct dentry_operations proc_net_dentry_ops;
|
|
static inline void pde_force_lookup(struct proc_dir_entry *pde)
|
|
{
|
|
/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
|
|
pde->flags |= PROC_ENTRY_FORCE_LOOKUP;
|
|
}
|
|
|
|
/*
|
|
* Add a new procfs dentry that can't serve as a mountpoint. That should
|
|
* encompass anything that is ephemeral and can just disappear while the
|
|
* process is still around.
|
|
*/
|
|
static inline struct dentry *proc_splice_unmountable(struct inode *inode,
|
|
struct dentry *dentry, const struct dentry_operations *d_ops)
|
|
{
|
|
dont_mount(dentry);
|
|
return d_splice_alias_ops(inode, dentry, d_ops);
|
|
}
|