mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

- The 4 patch series "mm: ksm: prevent KSM from breaking merging of new VMAs" from Lorenzo Stoakes addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. - The 4 patch series "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" from SeongJae Park adds a new kernel module which simplifies the setup and usage of DAMON in production environments. - The 6 patch series "stop passing a writeback_control to swap/shmem writeout" from Christoph Hellwig is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. - The 7 patch series "drivers/base/node.c: optimization and cleanups" from Donet Tom contains largely uncorrelated cleanups to the NUMA node setup and management code. - The 4 patch series "mm: userfaultfd: assorted fixes and cleanups" from Tal Zussman does some maintenance work on the userfaultfd code. - The 5 patch series "Readahead tweaks for larger folios" from Ryan Roberts implements some tuneups for pagecache readahead when it is reading into order>0 folios. - The 4 patch series "selftests/mm: Tweaks to the cow test" from Mark Brown provides some cleanups and consistency improvements to the selftests code. - The 4 patch series "Optimize mremap() for large folios" from Dev Jain does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. - The 5 patch series "Remove zero_user()" from Matthew Wilcox expunges zero_user() in favor of the more modern memzero_page(). - The 3 patch series "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" from David Hildenbrand addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. - The 3 patch series "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" from SeongJae Park provides some cleanup and consolidation work in DAMON. - The 3 patch series "use vm_flags_t consistently" from Lorenzo Stoakes uses vm_flags_t in places where we were inappropriately using other types. - The 3 patch series "mm/memfd: Reserve hugetlb folios before allocation" from Vivek Kasireddy increases the reliability of large page allocation in the memfd code. - The 14 patch series "mm: Remove pXX_devmap page table bit and pfn_t type" from Alistair Popple removes several now-unneeded PFN_* flags. - The 5 patch series "mm/damon: decouple sysfs from core" from SeongJae Park implememnts some cleanup and maintainability work in the DAMON sysfs layer. - The 5 patch series "madvise cleanup" from Lorenzo Stoakes does quite a lot of cleanup/maintenance work in the madvise() code. - The 4 patch series "madvise anon_name cleanups" from Vlastimil Babka provides additional cleanups on top or Lorenzo's effort. - The 11 patch series "Implement numa node notifier" from Oscar Salvador creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. - The 6 patch series "Make MIGRATE_ISOLATE a standalone bit" from Zi Yan cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. - The 5 patch series "selftests/damon: add python and drgn based DAMON sysfs functionality tests" from SeongJae Park adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. - The 5 patch series "Misc rework on hugetlb faulting path" from Oscar Salvador fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. - The 3 patch series "cma: factor out allocation logic from __cma_declare_contiguous_nid" from Mike Rapoport rationalizes and cleans up the highmem-specific code in the CMA allocator. - The 28 patch series "mm/migration: rework movable_ops page migration (part 1)" from David Hildenbrand provides cleanups and future-preparedness to the migration code. - The 2 patch series "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" from SeongJae Park adds some tracepoints to some DAMON auto-tuning code. - The 6 patch series "mm/damon: fix misc bugs in DAMON modules" from SeongJae Park does that. - The 6 patch series "mm/damon: misc cleanups" from SeongJae Park also does what it claims. - The 4 patch series "mm: folio_pte_batch() improvements" from David Hildenbrand cleans up the large folio PTE batching code. - The 13 patch series "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" from SeongJae Park facilitates dynamic alteration of DAMON's inter-node allocation policy. - The 3 patch series "Remove unmap_and_put_page()" from Vishal Moola provides a couple of page->folio conversions. - The 4 patch series "mm: per-node proactive reclaim" from Davidlohr Bueso implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. - The 14 patch series "mm/damon: remove damon_callback" from SeongJae Park replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. - The 10 patch series "mm/mremap: permit mremap() move of multiple VMAs" from Lorenzo Stoakes implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. - The 3 patch series "drop hugetlb_free_pgd_range()" from Anthony Yznaga switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). - The 4 patch series "mm/damon/sysfs: support periodic and automated stats update" from SeongJae Park augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. - The 4 patch series "Some randome fixes and cleanups to swapfile" from Kemeng Shi does what is claims. - The 4 patch series "mm: introduce snapshot_page" from Luiz Capitulino and David Hildenbrand provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. - The 6 patch series "use per-vma locks for /proc/pid/maps reads" from Suren Baghdasaryan addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. - The 6 patch series "__folio_split() clean up" from Zi Yan cleans up __folio_split()! - The 7 patch series "Optimize mprotect() for large folios" from Dev Jain provides some quite large (>3x) speedups to mprotect() when dealing with large folios. - The 2 patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" from wang lian does some cleanup work in the selftests code. - The 3 patch series "tools/testing: expand mremap testing" from Lorenzo Stoakes extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. - The 22 patch series "selftests/damon/sysfs.py: test all parameters" from SeongJae Park extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaIqcCgAKCRDdBJ7gKXxA jkVBAQCCn9DR1QP0CRk961ot0cKzOgioSc0aA03DPb2KXRt2kQEAzDAz0ARurFhL 8BzbvI0c+4tntHLXvIlrC33n9KWAOQM= =XsFy -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: "As usual, many cleanups. The below blurbiage describes 42 patchsets. 21 of those are partially or fully cleanup work. "cleans up", "cleanup", "maintainability", "rationalizes", etc. I never knew the MM code was so dirty. "mm: ksm: prevent KSM from breaking merging of new VMAs" (Lorenzo Stoakes) addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" (SeongJae Park) adds a new kernel module which simplifies the setup and usage of DAMON in production environments. "stop passing a writeback_control to swap/shmem writeout" (Christoph Hellwig) is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. "drivers/base/node.c: optimization and cleanups" (Donet Tom) contains largely uncorrelated cleanups to the NUMA node setup and management code. "mm: userfaultfd: assorted fixes and cleanups" (Tal Zussman) does some maintenance work on the userfaultfd code. "Readahead tweaks for larger folios" (Ryan Roberts) implements some tuneups for pagecache readahead when it is reading into order>0 folios. "selftests/mm: Tweaks to the cow test" (Mark Brown) provides some cleanups and consistency improvements to the selftests code. "Optimize mremap() for large folios" (Dev Jain) does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. "Remove zero_user()" (Matthew Wilcox) expunges zero_user() in favor of the more modern memzero_page(). "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" (David Hildenbrand) addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" (SeongJae Park) provides some cleanup and consolidation work in DAMON. "use vm_flags_t consistently" (Lorenzo Stoakes) uses vm_flags_t in places where we were inappropriately using other types. "mm/memfd: Reserve hugetlb folios before allocation" (Vivek Kasireddy) increases the reliability of large page allocation in the memfd code. "mm: Remove pXX_devmap page table bit and pfn_t type" (Alistair Popple) removes several now-unneeded PFN_* flags. "mm/damon: decouple sysfs from core" (SeongJae Park) implememnts some cleanup and maintainability work in the DAMON sysfs layer. "madvise cleanup" (Lorenzo Stoakes) does quite a lot of cleanup/maintenance work in the madvise() code. "madvise anon_name cleanups" (Vlastimil Babka) provides additional cleanups on top or Lorenzo's effort. "Implement numa node notifier" (Oscar Salvador) creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. "Make MIGRATE_ISOLATE a standalone bit" (Zi Yan) cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. "selftests/damon: add python and drgn based DAMON sysfs functionality tests" (SeongJae Park) adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. "Misc rework on hugetlb faulting path" (Oscar Salvador) fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. "cma: factor out allocation logic from __cma_declare_contiguous_nid" (Mike Rapoport) rationalizes and cleans up the highmem-specific code in the CMA allocator. "mm/migration: rework movable_ops page migration (part 1)" (David Hildenbrand) provides cleanups and future-preparedness to the migration code. "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" (SeongJae Park) adds some tracepoints to some DAMON auto-tuning code. "mm/damon: fix misc bugs in DAMON modules" (SeongJae Park) does that. "mm/damon: misc cleanups" (SeongJae Park) also does what it claims. "mm: folio_pte_batch() improvements" (David Hildenbrand) cleans up the large folio PTE batching code. "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" (SeongJae Park) facilitates dynamic alteration of DAMON's inter-node allocation policy. "Remove unmap_and_put_page()" (Vishal Moola) provides a couple of page->folio conversions. "mm: per-node proactive reclaim" (Davidlohr Bueso) implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. "mm/damon: remove damon_callback" (SeongJae Park) replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. "mm/mremap: permit mremap() move of multiple VMAs" (Lorenzo Stoakes) implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. "drop hugetlb_free_pgd_range()" (Anthony Yznaga) switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). "mm/damon/sysfs: support periodic and automated stats update" (SeongJae Park) augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. "Some randome fixes and cleanups to swapfile" (Kemeng Shi) does what is claims. "mm: introduce snapshot_page" (Luiz Capitulino and David Hildenbrand) provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. "use per-vma locks for /proc/pid/maps reads" (Suren Baghdasaryan) addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. "__folio_split() clean up" (Zi Yan) cleans up __folio_split()! "Optimize mprotect() for large folios" (Dev Jain) provides some quite large (>3x) speedups to mprotect() when dealing with large folios. "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" (wang lian) does some cleanup work in the selftests code. "tools/testing: expand mremap testing" (Lorenzo Stoakes) extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. "selftests/damon/sysfs.py: test all parameters" (SeongJae Park) extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset" * tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (370 commits) MAINTAINERS: add missing headers to mempory policy & migration section MAINTAINERS: add missing file to cgroup section MAINTAINERS: add MM MISC section, add missing files to MISC and CORE MAINTAINERS: add missing zsmalloc file MAINTAINERS: add missing files to page alloc section MAINTAINERS: add missing shrinker files MAINTAINERS: move memremap.[ch] to hotplug section MAINTAINERS: add missing mm_slot.h file THP section MAINTAINERS: add missing interval_tree.c to memory mapping section MAINTAINERS: add missing percpu-internal.h file to per-cpu section mm/page_alloc: remove trace_mm_alloc_contig_migrate_range_info() selftests/damon: introduce _common.sh to host shared function selftests/damon/sysfs.py: test runtime reduction of DAMON parameters selftests/damon/sysfs.py: test non-default parameters runtime commit selftests/damon/sysfs.py: generalize DAMON context commit assertion selftests/damon/sysfs.py: generalize monitoring attributes commit assertion selftests/damon/sysfs.py: generalize DAMOS schemes commit assertion selftests/damon/sysfs.py: test DAMOS filters commitment selftests/damon/sysfs.py: generalize DAMOS scheme commit assertion selftests/damon/sysfs.py: test DAMOS destinations commitment ...
832 lines
19 KiB
C
832 lines
19 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* proc/fs/generic.c --- generic routines for the proc-fs
|
|
*
|
|
* This file contains generic proc-fs routines for handling
|
|
* directories and files.
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds.
|
|
* Copyright (C) 1997 Theodore Ts'o
|
|
*/
|
|
|
|
#include <linux/cache.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/time.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/namei.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/init.h>
|
|
#include <linux/idr.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/seq_file.h>
|
|
|
|
#include "internal.h"
|
|
|
|
static DEFINE_RWLOCK(proc_subdir_lock);
|
|
|
|
struct kmem_cache *proc_dir_entry_cache __ro_after_init;
|
|
|
|
void pde_free(struct proc_dir_entry *pde)
|
|
{
|
|
if (S_ISLNK(pde->mode))
|
|
kfree(pde->data);
|
|
if (pde->name != pde->inline_name)
|
|
kfree(pde->name);
|
|
kmem_cache_free(proc_dir_entry_cache, pde);
|
|
}
|
|
|
|
static int proc_match(const char *name, struct proc_dir_entry *de, unsigned int len)
|
|
{
|
|
if (len < de->namelen)
|
|
return -1;
|
|
if (len > de->namelen)
|
|
return 1;
|
|
|
|
return memcmp(name, de->name, len);
|
|
}
|
|
|
|
static struct proc_dir_entry *pde_subdir_first(struct proc_dir_entry *dir)
|
|
{
|
|
return rb_entry_safe(rb_first(&dir->subdir), struct proc_dir_entry,
|
|
subdir_node);
|
|
}
|
|
|
|
static struct proc_dir_entry *pde_subdir_next(struct proc_dir_entry *dir)
|
|
{
|
|
return rb_entry_safe(rb_next(&dir->subdir_node), struct proc_dir_entry,
|
|
subdir_node);
|
|
}
|
|
|
|
static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir,
|
|
const char *name,
|
|
unsigned int len)
|
|
{
|
|
struct rb_node *node = dir->subdir.rb_node;
|
|
|
|
while (node) {
|
|
struct proc_dir_entry *de = rb_entry(node,
|
|
struct proc_dir_entry,
|
|
subdir_node);
|
|
int result = proc_match(name, de, len);
|
|
|
|
if (result < 0)
|
|
node = node->rb_left;
|
|
else if (result > 0)
|
|
node = node->rb_right;
|
|
else
|
|
return de;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static bool pde_subdir_insert(struct proc_dir_entry *dir,
|
|
struct proc_dir_entry *de)
|
|
{
|
|
struct rb_root *root = &dir->subdir;
|
|
struct rb_node **new = &root->rb_node, *parent = NULL;
|
|
|
|
/* Figure out where to put new node */
|
|
while (*new) {
|
|
struct proc_dir_entry *this = rb_entry(*new,
|
|
struct proc_dir_entry,
|
|
subdir_node);
|
|
int result = proc_match(de->name, this, de->namelen);
|
|
|
|
parent = *new;
|
|
if (result < 0)
|
|
new = &(*new)->rb_left;
|
|
else if (result > 0)
|
|
new = &(*new)->rb_right;
|
|
else
|
|
return false;
|
|
}
|
|
|
|
/* Add new node and rebalance tree. */
|
|
rb_link_node(&de->subdir_node, parent, new);
|
|
rb_insert_color(&de->subdir_node, root);
|
|
return true;
|
|
}
|
|
|
|
static int proc_notify_change(struct mnt_idmap *idmap,
|
|
struct dentry *dentry, struct iattr *iattr)
|
|
{
|
|
struct inode *inode = d_inode(dentry);
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
int error;
|
|
|
|
error = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
|
|
if (error)
|
|
return error;
|
|
|
|
setattr_copy(&nop_mnt_idmap, inode, iattr);
|
|
|
|
proc_set_user(de, inode->i_uid, inode->i_gid);
|
|
de->mode = inode->i_mode;
|
|
return 0;
|
|
}
|
|
|
|
static int proc_getattr(struct mnt_idmap *idmap,
|
|
const struct path *path, struct kstat *stat,
|
|
u32 request_mask, unsigned int query_flags)
|
|
{
|
|
struct inode *inode = d_inode(path->dentry);
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
if (de) {
|
|
nlink_t nlink = READ_ONCE(de->nlink);
|
|
if (nlink > 0) {
|
|
set_nlink(inode, nlink);
|
|
}
|
|
}
|
|
|
|
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
|
|
return 0;
|
|
}
|
|
|
|
static const struct inode_operations proc_file_inode_operations = {
|
|
.setattr = proc_notify_change,
|
|
};
|
|
|
|
/*
|
|
* This function parses a name such as "tty/driver/serial", and
|
|
* returns the struct proc_dir_entry for "/proc/tty/driver", and
|
|
* returns "serial" in residual.
|
|
*/
|
|
static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
|
|
const char **residual)
|
|
{
|
|
const char *cp = name, *next;
|
|
struct proc_dir_entry *de;
|
|
|
|
de = *ret ?: &proc_root;
|
|
while ((next = strchr(cp, '/')) != NULL) {
|
|
de = pde_subdir_find(de, cp, next - cp);
|
|
if (!de) {
|
|
WARN(1, "name '%s'\n", name);
|
|
return -ENOENT;
|
|
}
|
|
cp = next + 1;
|
|
}
|
|
*residual = cp;
|
|
*ret = de;
|
|
return 0;
|
|
}
|
|
|
|
static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
|
|
const char **residual)
|
|
{
|
|
int rv;
|
|
|
|
read_lock(&proc_subdir_lock);
|
|
rv = __xlate_proc_name(name, ret, residual);
|
|
read_unlock(&proc_subdir_lock);
|
|
return rv;
|
|
}
|
|
|
|
static DEFINE_IDA(proc_inum_ida);
|
|
|
|
#define PROC_DYNAMIC_FIRST 0xF0000000U
|
|
|
|
/*
|
|
* Return an inode number between PROC_DYNAMIC_FIRST and
|
|
* 0xffffffff, or zero on failure.
|
|
*/
|
|
int proc_alloc_inum(unsigned int *inum)
|
|
{
|
|
int i;
|
|
|
|
i = ida_alloc_max(&proc_inum_ida, UINT_MAX - PROC_DYNAMIC_FIRST,
|
|
GFP_KERNEL);
|
|
if (i < 0)
|
|
return i;
|
|
|
|
*inum = PROC_DYNAMIC_FIRST + (unsigned int)i;
|
|
return 0;
|
|
}
|
|
|
|
void proc_free_inum(unsigned int inum)
|
|
{
|
|
ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
|
|
}
|
|
|
|
static int proc_misc_d_revalidate(struct inode *dir, const struct qstr *name,
|
|
struct dentry *dentry, unsigned int flags)
|
|
{
|
|
if (flags & LOOKUP_RCU)
|
|
return -ECHILD;
|
|
|
|
if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0)
|
|
return 0; /* revalidate */
|
|
return 1;
|
|
}
|
|
|
|
static int proc_misc_d_delete(const struct dentry *dentry)
|
|
{
|
|
return atomic_read(&PDE(d_inode(dentry))->in_use) < 0;
|
|
}
|
|
|
|
static const struct dentry_operations proc_misc_dentry_ops = {
|
|
.d_revalidate = proc_misc_d_revalidate,
|
|
.d_delete = proc_misc_d_delete,
|
|
};
|
|
|
|
/*
|
|
* Don't create negative dentries here, return -ENOENT by hand
|
|
* instead.
|
|
*/
|
|
struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
|
|
struct proc_dir_entry *de)
|
|
{
|
|
struct inode *inode;
|
|
|
|
read_lock(&proc_subdir_lock);
|
|
de = pde_subdir_find(de, dentry->d_name.name, dentry->d_name.len);
|
|
if (de) {
|
|
pde_get(de);
|
|
read_unlock(&proc_subdir_lock);
|
|
inode = proc_get_inode(dir->i_sb, de);
|
|
if (!inode)
|
|
return ERR_PTR(-ENOMEM);
|
|
if (de->flags & PROC_ENTRY_FORCE_LOOKUP)
|
|
return d_splice_alias_ops(inode, dentry,
|
|
&proc_net_dentry_ops);
|
|
return d_splice_alias_ops(inode, dentry,
|
|
&proc_misc_dentry_ops);
|
|
}
|
|
read_unlock(&proc_subdir_lock);
|
|
return ERR_PTR(-ENOENT);
|
|
}
|
|
|
|
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
|
|
unsigned int flags)
|
|
{
|
|
struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb);
|
|
|
|
if (fs_info->pidonly == PROC_PIDONLY_ON)
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
return proc_lookup_de(dir, dentry, PDE(dir));
|
|
}
|
|
|
|
/*
|
|
* This returns non-zero if at EOF, so that the /proc
|
|
* root directory can use this and check if it should
|
|
* continue with the <pid> entries..
|
|
*
|
|
* Note that the VFS-layer doesn't care about the return
|
|
* value of the readdir() call, as long as it's non-negative
|
|
* for success..
|
|
*/
|
|
int proc_readdir_de(struct file *file, struct dir_context *ctx,
|
|
struct proc_dir_entry *de)
|
|
{
|
|
int i;
|
|
|
|
if (!dir_emit_dots(file, ctx))
|
|
return 0;
|
|
|
|
i = ctx->pos - 2;
|
|
read_lock(&proc_subdir_lock);
|
|
de = pde_subdir_first(de);
|
|
for (;;) {
|
|
if (!de) {
|
|
read_unlock(&proc_subdir_lock);
|
|
return 0;
|
|
}
|
|
if (!i)
|
|
break;
|
|
de = pde_subdir_next(de);
|
|
i--;
|
|
}
|
|
|
|
do {
|
|
struct proc_dir_entry *next;
|
|
pde_get(de);
|
|
read_unlock(&proc_subdir_lock);
|
|
if (!dir_emit(ctx, de->name, de->namelen,
|
|
de->low_ino, de->mode >> 12)) {
|
|
pde_put(de);
|
|
return 0;
|
|
}
|
|
ctx->pos++;
|
|
read_lock(&proc_subdir_lock);
|
|
next = pde_subdir_next(de);
|
|
pde_put(de);
|
|
de = next;
|
|
} while (de);
|
|
read_unlock(&proc_subdir_lock);
|
|
return 1;
|
|
}
|
|
|
|
int proc_readdir(struct file *file, struct dir_context *ctx)
|
|
{
|
|
struct inode *inode = file_inode(file);
|
|
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
|
|
|
|
if (fs_info->pidonly == PROC_PIDONLY_ON)
|
|
return 1;
|
|
|
|
return proc_readdir_de(file, ctx, PDE(inode));
|
|
}
|
|
|
|
/*
|
|
* These are the generic /proc directory operations. They
|
|
* use the in-memory "struct proc_dir_entry" tree to parse
|
|
* the /proc directory.
|
|
*/
|
|
static const struct file_operations proc_dir_operations = {
|
|
.llseek = generic_file_llseek,
|
|
.read = generic_read_dir,
|
|
.iterate_shared = proc_readdir,
|
|
};
|
|
|
|
static int proc_net_d_revalidate(struct inode *dir, const struct qstr *name,
|
|
struct dentry *dentry, unsigned int flags)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
const struct dentry_operations proc_net_dentry_ops = {
|
|
.d_revalidate = proc_net_d_revalidate,
|
|
.d_delete = always_delete_dentry,
|
|
};
|
|
|
|
/*
|
|
* proc directories can do almost nothing..
|
|
*/
|
|
static const struct inode_operations proc_dir_inode_operations = {
|
|
.lookup = proc_lookup,
|
|
.getattr = proc_getattr,
|
|
.setattr = proc_notify_change,
|
|
};
|
|
|
|
/* returns the registered entry, or frees dp and returns NULL on failure */
|
|
struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
|
struct proc_dir_entry *dp)
|
|
{
|
|
if (proc_alloc_inum(&dp->low_ino))
|
|
goto out_free_entry;
|
|
|
|
write_lock(&proc_subdir_lock);
|
|
dp->parent = dir;
|
|
if (pde_subdir_insert(dir, dp) == false) {
|
|
WARN(1, "proc_dir_entry '%s/%s' already registered\n",
|
|
dir->name, dp->name);
|
|
write_unlock(&proc_subdir_lock);
|
|
goto out_free_inum;
|
|
}
|
|
dir->nlink++;
|
|
write_unlock(&proc_subdir_lock);
|
|
|
|
return dp;
|
|
out_free_inum:
|
|
proc_free_inum(dp->low_ino);
|
|
out_free_entry:
|
|
pde_free(dp);
|
|
return NULL;
|
|
}
|
|
|
|
static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
|
|
const char *name,
|
|
umode_t mode,
|
|
nlink_t nlink)
|
|
{
|
|
struct proc_dir_entry *ent = NULL;
|
|
const char *fn;
|
|
struct qstr qstr;
|
|
|
|
if (xlate_proc_name(name, parent, &fn) != 0)
|
|
goto out;
|
|
qstr.name = fn;
|
|
qstr.len = strlen(fn);
|
|
if (qstr.len == 0 || qstr.len >= 256) {
|
|
WARN(1, "name len %u\n", qstr.len);
|
|
return NULL;
|
|
}
|
|
if (qstr.len == 1 && fn[0] == '.') {
|
|
WARN(1, "name '.'\n");
|
|
return NULL;
|
|
}
|
|
if (qstr.len == 2 && fn[0] == '.' && fn[1] == '.') {
|
|
WARN(1, "name '..'\n");
|
|
return NULL;
|
|
}
|
|
if (*parent == &proc_root && name_to_int(&qstr) != ~0U) {
|
|
WARN(1, "create '/proc/%s' by hand\n", qstr.name);
|
|
return NULL;
|
|
}
|
|
if (is_empty_pde(*parent)) {
|
|
WARN(1, "attempt to add to permanently empty directory");
|
|
return NULL;
|
|
}
|
|
|
|
ent = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL);
|
|
if (!ent)
|
|
goto out;
|
|
|
|
if (qstr.len + 1 <= SIZEOF_PDE_INLINE_NAME) {
|
|
ent->name = ent->inline_name;
|
|
} else {
|
|
ent->name = kmalloc(qstr.len + 1, GFP_KERNEL);
|
|
if (!ent->name) {
|
|
pde_free(ent);
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
memcpy(ent->name, fn, qstr.len + 1);
|
|
ent->namelen = qstr.len;
|
|
ent->mode = mode;
|
|
ent->nlink = nlink;
|
|
ent->subdir = RB_ROOT;
|
|
refcount_set(&ent->refcnt, 1);
|
|
spin_lock_init(&ent->pde_unload_lock);
|
|
INIT_LIST_HEAD(&ent->pde_openers);
|
|
proc_set_user(ent, (*parent)->uid, (*parent)->gid);
|
|
|
|
/* Revalidate everything under /proc/${pid}/net */
|
|
if ((*parent)->flags & PROC_ENTRY_FORCE_LOOKUP)
|
|
pde_force_lookup(ent);
|
|
|
|
out:
|
|
return ent;
|
|
}
|
|
|
|
struct proc_dir_entry *proc_symlink(const char *name,
|
|
struct proc_dir_entry *parent, const char *dest)
|
|
{
|
|
struct proc_dir_entry *ent;
|
|
|
|
ent = __proc_create(&parent, name,
|
|
(S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
|
|
|
|
if (ent) {
|
|
ent->size = strlen(dest);
|
|
ent->data = kmemdup(dest, ent->size + 1, GFP_KERNEL);
|
|
if (ent->data) {
|
|
ent->proc_iops = &proc_link_inode_operations;
|
|
ent = proc_register(parent, ent);
|
|
} else {
|
|
pde_free(ent);
|
|
ent = NULL;
|
|
}
|
|
}
|
|
return ent;
|
|
}
|
|
EXPORT_SYMBOL(proc_symlink);
|
|
|
|
struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent, void *data, bool force_lookup)
|
|
{
|
|
struct proc_dir_entry *ent;
|
|
|
|
if (mode == 0)
|
|
mode = S_IRUGO | S_IXUGO;
|
|
|
|
ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
|
|
if (ent) {
|
|
ent->data = data;
|
|
ent->proc_dir_ops = &proc_dir_operations;
|
|
ent->proc_iops = &proc_dir_inode_operations;
|
|
if (force_lookup) {
|
|
pde_force_lookup(ent);
|
|
}
|
|
ent = proc_register(parent, ent);
|
|
}
|
|
return ent;
|
|
}
|
|
EXPORT_SYMBOL_GPL(_proc_mkdir);
|
|
|
|
struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent, void *data)
|
|
{
|
|
return _proc_mkdir(name, mode, parent, data, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(proc_mkdir_data);
|
|
|
|
struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent)
|
|
{
|
|
return proc_mkdir_data(name, mode, parent, NULL);
|
|
}
|
|
EXPORT_SYMBOL(proc_mkdir_mode);
|
|
|
|
struct proc_dir_entry *proc_mkdir(const char *name,
|
|
struct proc_dir_entry *parent)
|
|
{
|
|
return proc_mkdir_data(name, 0, parent, NULL);
|
|
}
|
|
EXPORT_SYMBOL(proc_mkdir);
|
|
|
|
struct proc_dir_entry *proc_create_mount_point(const char *name)
|
|
{
|
|
umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
|
struct proc_dir_entry *ent, *parent = NULL;
|
|
|
|
ent = __proc_create(&parent, name, mode, 2);
|
|
if (ent) {
|
|
ent->data = NULL;
|
|
ent->proc_dir_ops = NULL;
|
|
ent->proc_iops = NULL;
|
|
ent = proc_register(parent, ent);
|
|
}
|
|
return ent;
|
|
}
|
|
EXPORT_SYMBOL(proc_create_mount_point);
|
|
|
|
struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
|
|
struct proc_dir_entry **parent, void *data)
|
|
{
|
|
struct proc_dir_entry *p;
|
|
|
|
if ((mode & S_IFMT) == 0)
|
|
mode |= S_IFREG;
|
|
if ((mode & S_IALLUGO) == 0)
|
|
mode |= S_IRUGO;
|
|
if (WARN_ON_ONCE(!S_ISREG(mode)))
|
|
return NULL;
|
|
|
|
p = __proc_create(parent, name, mode, 1);
|
|
if (p) {
|
|
p->proc_iops = &proc_file_inode_operations;
|
|
p->data = data;
|
|
}
|
|
return p;
|
|
}
|
|
|
|
static void pde_set_flags(struct proc_dir_entry *pde)
|
|
{
|
|
if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
|
|
pde->flags |= PROC_ENTRY_PERMANENT;
|
|
if (pde->proc_ops->proc_read_iter)
|
|
pde->flags |= PROC_ENTRY_proc_read_iter;
|
|
#ifdef CONFIG_COMPAT
|
|
if (pde->proc_ops->proc_compat_ioctl)
|
|
pde->flags |= PROC_ENTRY_proc_compat_ioctl;
|
|
#endif
|
|
if (pde->proc_ops->proc_lseek)
|
|
pde->flags |= PROC_ENTRY_proc_lseek;
|
|
}
|
|
|
|
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent,
|
|
const struct proc_ops *proc_ops, void *data)
|
|
{
|
|
struct proc_dir_entry *p;
|
|
|
|
p = proc_create_reg(name, mode, &parent, data);
|
|
if (!p)
|
|
return NULL;
|
|
p->proc_ops = proc_ops;
|
|
pde_set_flags(p);
|
|
return proc_register(parent, p);
|
|
}
|
|
EXPORT_SYMBOL(proc_create_data);
|
|
|
|
struct proc_dir_entry *proc_create(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent,
|
|
const struct proc_ops *proc_ops)
|
|
{
|
|
return proc_create_data(name, mode, parent, proc_ops, NULL);
|
|
}
|
|
EXPORT_SYMBOL(proc_create);
|
|
|
|
static int proc_seq_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
|
|
if (de->state_size)
|
|
return seq_open_private(file, de->seq_ops, de->state_size);
|
|
return seq_open(file, de->seq_ops);
|
|
}
|
|
|
|
static int proc_seq_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
|
|
if (de->state_size)
|
|
return seq_release_private(inode, file);
|
|
return seq_release(inode, file);
|
|
}
|
|
|
|
static const struct proc_ops proc_seq_ops = {
|
|
/* not permanent -- can call into arbitrary seq_operations */
|
|
.proc_open = proc_seq_open,
|
|
.proc_read_iter = seq_read_iter,
|
|
.proc_lseek = seq_lseek,
|
|
.proc_release = proc_seq_release,
|
|
};
|
|
|
|
struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent, const struct seq_operations *ops,
|
|
unsigned int state_size, void *data)
|
|
{
|
|
struct proc_dir_entry *p;
|
|
|
|
p = proc_create_reg(name, mode, &parent, data);
|
|
if (!p)
|
|
return NULL;
|
|
p->proc_ops = &proc_seq_ops;
|
|
p->seq_ops = ops;
|
|
p->state_size = state_size;
|
|
pde_set_flags(p);
|
|
return proc_register(parent, p);
|
|
}
|
|
EXPORT_SYMBOL(proc_create_seq_private);
|
|
|
|
static int proc_single_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
|
|
return single_open(file, de->single_show, de->data);
|
|
}
|
|
|
|
static const struct proc_ops proc_single_ops = {
|
|
/* not permanent -- can call into arbitrary ->single_show */
|
|
.proc_open = proc_single_open,
|
|
.proc_read_iter = seq_read_iter,
|
|
.proc_lseek = seq_lseek,
|
|
.proc_release = single_release,
|
|
};
|
|
|
|
struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
|
|
struct proc_dir_entry *parent,
|
|
int (*show)(struct seq_file *, void *), void *data)
|
|
{
|
|
struct proc_dir_entry *p;
|
|
|
|
p = proc_create_reg(name, mode, &parent, data);
|
|
if (!p)
|
|
return NULL;
|
|
p->proc_ops = &proc_single_ops;
|
|
p->single_show = show;
|
|
pde_set_flags(p);
|
|
return proc_register(parent, p);
|
|
}
|
|
EXPORT_SYMBOL(proc_create_single_data);
|
|
|
|
void proc_set_size(struct proc_dir_entry *de, loff_t size)
|
|
{
|
|
de->size = size;
|
|
}
|
|
EXPORT_SYMBOL(proc_set_size);
|
|
|
|
void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid)
|
|
{
|
|
de->uid = uid;
|
|
de->gid = gid;
|
|
}
|
|
EXPORT_SYMBOL(proc_set_user);
|
|
|
|
void pde_put(struct proc_dir_entry *pde)
|
|
{
|
|
if (refcount_dec_and_test(&pde->refcnt)) {
|
|
proc_free_inum(pde->low_ino);
|
|
pde_free(pde);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Remove a /proc entry and free it if it's not currently in use.
|
|
*/
|
|
void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
|
{
|
|
struct proc_dir_entry *de = NULL;
|
|
const char *fn = name;
|
|
unsigned int len;
|
|
|
|
write_lock(&proc_subdir_lock);
|
|
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
|
write_unlock(&proc_subdir_lock);
|
|
return;
|
|
}
|
|
len = strlen(fn);
|
|
|
|
de = pde_subdir_find(parent, fn, len);
|
|
if (de) {
|
|
if (unlikely(pde_is_permanent(de))) {
|
|
WARN(1, "removing permanent /proc entry '%s'", de->name);
|
|
de = NULL;
|
|
} else {
|
|
rb_erase(&de->subdir_node, &parent->subdir);
|
|
if (S_ISDIR(de->mode))
|
|
parent->nlink--;
|
|
}
|
|
}
|
|
write_unlock(&proc_subdir_lock);
|
|
if (!de) {
|
|
WARN(1, "name '%s'\n", name);
|
|
return;
|
|
}
|
|
|
|
proc_entry_rundown(de);
|
|
|
|
WARN(pde_subdir_first(de),
|
|
"%s: removing non-empty directory '%s/%s', leaking at least '%s'\n",
|
|
__func__, de->parent->name, de->name, pde_subdir_first(de)->name);
|
|
pde_put(de);
|
|
}
|
|
EXPORT_SYMBOL(remove_proc_entry);
|
|
|
|
int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
|
{
|
|
struct proc_dir_entry *root = NULL, *de, *next;
|
|
const char *fn = name;
|
|
unsigned int len;
|
|
|
|
write_lock(&proc_subdir_lock);
|
|
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
|
write_unlock(&proc_subdir_lock);
|
|
return -ENOENT;
|
|
}
|
|
len = strlen(fn);
|
|
|
|
root = pde_subdir_find(parent, fn, len);
|
|
if (!root) {
|
|
write_unlock(&proc_subdir_lock);
|
|
return -ENOENT;
|
|
}
|
|
if (unlikely(pde_is_permanent(root))) {
|
|
write_unlock(&proc_subdir_lock);
|
|
WARN(1, "removing permanent /proc entry '%s/%s'",
|
|
root->parent->name, root->name);
|
|
return -EINVAL;
|
|
}
|
|
rb_erase(&root->subdir_node, &parent->subdir);
|
|
|
|
de = root;
|
|
while (1) {
|
|
next = pde_subdir_first(de);
|
|
if (next) {
|
|
if (unlikely(pde_is_permanent(next))) {
|
|
write_unlock(&proc_subdir_lock);
|
|
WARN(1, "removing permanent /proc entry '%s/%s'",
|
|
next->parent->name, next->name);
|
|
return -EINVAL;
|
|
}
|
|
rb_erase(&next->subdir_node, &de->subdir);
|
|
de = next;
|
|
continue;
|
|
}
|
|
next = de->parent;
|
|
if (S_ISDIR(de->mode))
|
|
next->nlink--;
|
|
write_unlock(&proc_subdir_lock);
|
|
|
|
proc_entry_rundown(de);
|
|
if (de == root)
|
|
break;
|
|
pde_put(de);
|
|
|
|
write_lock(&proc_subdir_lock);
|
|
de = next;
|
|
}
|
|
pde_put(root);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(remove_proc_subtree);
|
|
|
|
void *proc_get_parent_data(const struct inode *inode)
|
|
{
|
|
struct proc_dir_entry *de = PDE(inode);
|
|
return de->parent->data;
|
|
}
|
|
EXPORT_SYMBOL_GPL(proc_get_parent_data);
|
|
|
|
void proc_remove(struct proc_dir_entry *de)
|
|
{
|
|
if (de)
|
|
remove_proc_subtree(de->name, de->parent);
|
|
}
|
|
EXPORT_SYMBOL(proc_remove);
|
|
|
|
/*
|
|
* Pull a user buffer into memory and pass it to the file's write handler if
|
|
* one is supplied. The ->write() method is permitted to modify the
|
|
* kernel-side buffer.
|
|
*/
|
|
ssize_t proc_simple_write(struct file *f, const char __user *ubuf, size_t size,
|
|
loff_t *_pos)
|
|
{
|
|
struct proc_dir_entry *pde = PDE(file_inode(f));
|
|
char *buf;
|
|
int ret;
|
|
|
|
if (!pde->write)
|
|
return -EACCES;
|
|
if (size == 0 || size > PAGE_SIZE - 1)
|
|
return -EINVAL;
|
|
buf = memdup_user_nul(ubuf, size);
|
|
if (IS_ERR(buf))
|
|
return PTR_ERR(buf);
|
|
ret = pde->write(f, buf, size);
|
|
kfree(buf);
|
|
return ret == 0 ? size : ret;
|
|
}
|