2019-05-27 08:55:01 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2013-04-11 13:34:43 +01:00
|
|
|
/* Internal procfs definitions
|
2005-04-16 15:20:36 -07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/proc_fs.h>
|
2013-04-11 13:34:43 +01:00
|
|
|
#include <linux/proc_ns.h>
|
2018-04-10 16:32:14 -07:00
|
|
|
#include <linux/refcount.h>
|
2013-04-11 13:34:43 +01:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/atomic.h>
|
2013-02-27 17:03:15 -08:00
|
|
|
#include <linux/binfmts.h>
|
2017-02-08 18:51:30 +01:00
|
|
|
#include <linux/sched/coredump.h>
|
2017-02-06 10:57:33 +01:00
|
|
|
#include <linux/sched/task.h>
|
2024-06-07 14:23:56 +02:00
|
|
|
#include <linux/mm.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
struct ctl_table_header;
|
|
|
|
struct mempolicy;
|
2007-02-14 00:34:12 -08:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/*
|
|
|
|
* This is not completely implemented yet. The idea is to
|
|
|
|
* create an in-memory tree (like the actual /proc filesystem
|
|
|
|
* tree) of these proc_dir_entries, so that we can dynamically
|
|
|
|
* add new files to /proc.
|
|
|
|
*
|
2014-12-10 15:45:01 -08:00
|
|
|
* parent/subdir are used for the directory structure (every /proc file has a
|
|
|
|
* parent, but "subdir" is empty for all non-directory entries).
|
|
|
|
* subdir_node is used to build the rb tree "subdir" of the parent.
|
2013-04-11 13:34:43 +01:00
|
|
|
*/
|
|
|
|
struct proc_dir_entry {
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-06 15:37:18 -08:00
|
|
|
/*
|
|
|
|
* number of callers into module in progress;
|
|
|
|
* negative -> it's going away RSN
|
|
|
|
*/
|
|
|
|
atomic_t in_use;
|
2018-04-10 16:32:14 -07:00
|
|
|
refcount_t refcnt;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-06 15:37:18 -08:00
|
|
|
struct list_head pde_openers; /* who did ->open, but not ->release */
|
2018-02-06 15:37:21 -08:00
|
|
|
/* protects ->pde_openers and all struct pde_opener instances */
|
|
|
|
spinlock_t pde_unload_lock;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-06 15:37:18 -08:00
|
|
|
struct completion *pde_unload_completion;
|
|
|
|
const struct inode_operations *proc_iops;
|
proc: decouple proc from VFS with "struct proc_ops"
Currently core /proc code uses "struct file_operations" for custom hooks,
however, VFS doesn't directly call them. Every time VFS expands
file_operations hook set, /proc code bloats for no reason.
Introduce "struct proc_ops" which contains only those hooks which /proc
allows to call into (open, release, read, write, ioctl, mmap, poll). It
doesn't contain module pointer as well.
Save ~184 bytes per usage:
add/remove: 26/26 grow/shrink: 1/4 up/down: 1922/-6674 (-4752)
Function old new delta
sysvipc_proc_ops - 72 +72
...
config_gz_proc_ops - 72 +72
proc_get_inode 289 339 +50
proc_reg_get_unmapped_area 110 107 -3
close_pdeo 227 224 -3
proc_reg_open 289 284 -5
proc_create_data 60 53 -7
rt_cpu_seq_fops 256 - -256
...
default_affinity_proc_fops 256 - -256
Total: Before=5430095, After=5425343, chg -0.09%
Link: http://lkml.kernel.org/r/20191225172228.GA13378@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-02-03 17:37:14 -08:00
|
|
|
union {
|
|
|
|
const struct proc_ops *proc_ops;
|
|
|
|
const struct file_operations *proc_dir_ops;
|
|
|
|
};
|
2018-05-15 15:57:23 +02:00
|
|
|
union {
|
|
|
|
const struct seq_operations *seq_ops;
|
|
|
|
int (*single_show)(struct seq_file *, void *);
|
|
|
|
};
|
2018-05-18 11:46:15 +01:00
|
|
|
proc_write_t write;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-06 15:37:18 -08:00
|
|
|
void *data;
|
2018-04-24 17:05:17 +02:00
|
|
|
unsigned int state_size;
|
2013-04-11 13:34:43 +01:00
|
|
|
unsigned int low_ino;
|
|
|
|
nlink_t nlink;
|
|
|
|
kuid_t uid;
|
|
|
|
kgid_t gid;
|
|
|
|
loff_t size;
|
2014-12-10 15:45:01 -08:00
|
|
|
struct proc_dir_entry *parent;
|
2018-04-10 16:32:20 -07:00
|
|
|
struct rb_root subdir;
|
2014-12-10 15:45:01 -08:00
|
|
|
struct rb_node subdir_node;
|
2018-04-10 16:31:52 -07:00
|
|
|
char *name;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-06 15:37:18 -08:00
|
|
|
umode_t mode;
|
proc: faster open/read/close with "permanent" files
Now that "struct proc_ops" exist we can start putting there stuff which
could not fly with VFS "struct file_operations"...
Most of fs/proc/inode.c file is dedicated to make open/read/.../close
reliable in the event of disappearing /proc entries which usually happens
if module is getting removed. Files like /proc/cpuinfo which never
disappear simply do not need such protection.
Save 2 atomic ops, 1 allocation, 1 free per open/read/close sequence for such
"permanent" files.
Enable "permanent" flag for
/proc/cpuinfo
/proc/kmsg
/proc/modules
/proc/slabinfo
/proc/stat
/proc/sysvipc/*
/proc/swaps
More will come once I figure out foolproof way to prevent out module
authors from marking their stuff "permanent" for performance reasons
when it is not.
This should help with scalability: benchmark is "read /proc/cpuinfo R times
by N threads scattered over the system".
N R t, s (before) t, s (after)
-----------------------------------------------------
64 4096 1.582458 1.530502 -3.2%
256 4096 6.371926 6.125168 -3.9%
1024 4096 25.64888 24.47528 -4.6%
Benchmark source:
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
const int NR_CPUS = sysconf(_SC_NPROCESSORS_ONLN);
int N;
const char *filename;
int R;
int xxx = 0;
int glue(int n)
{
cpu_set_t m;
CPU_ZERO(&m);
CPU_SET(n, &m);
return sched_setaffinity(0, sizeof(cpu_set_t), &m);
}
void f(int n)
{
glue(n % NR_CPUS);
while (*(volatile int *)&xxx == 0) {
}
for (int i = 0; i < R; i++) {
int fd = open(filename, O_RDONLY);
char buf[4096];
ssize_t rv = read(fd, buf, sizeof(buf));
asm volatile ("" :: "g" (rv));
close(fd);
}
}
int main(int argc, char *argv[])
{
if (argc < 4) {
std::cerr << "usage: " << argv[0] << ' ' << "N /proc/filename R
";
return 1;
}
N = atoi(argv[1]);
filename = argv[2];
R = atoi(argv[3]);
for (int i = 0; i < NR_CPUS; i++) {
if (glue(i) == 0)
break;
}
std::vector<std::thread> T;
T.reserve(N);
for (int i = 0; i < N; i++) {
T.emplace_back(f, i);
}
auto t0 = std::chrono::system_clock::now();
{
*(volatile int *)&xxx = 1;
for (auto& t: T) {
t.join();
}
}
auto t1 = std::chrono::system_clock::now();
std::chrono::duration<double> dt = t1 - t0;
std::cout << dt.count() << '
';
return 0;
}
P.S.:
Explicit randomization marker is added because adding non-function pointer
will silently disable structure layout randomization.
[akpm@linux-foundation.org: coding style fixes]
Reported-by: kbuild test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Joe Perches <joe@perches.com>
Link: http://lkml.kernel.org/r/20200222201539.GA22576@avx2
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-06 20:09:01 -07:00
|
|
|
u8 flags;
|
2013-04-11 13:34:43 +01:00
|
|
|
u8 namelen;
|
2018-06-13 19:43:19 +01:00
|
|
|
char inline_name[];
|
2016-10-28 01:22:25 -07:00
|
|
|
} __randomize_layout;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2018-08-21 21:54:09 -07:00
|
|
|
#define SIZEOF_PDE ( \
|
|
|
|
sizeof(struct proc_dir_entry) < 128 ? 128 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 192 ? 192 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 256 ? 256 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 512 ? 512 : \
|
|
|
|
0)
|
|
|
|
#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
|
2018-06-13 19:43:19 +01:00
|
|
|
|
proc: faster open/read/close with "permanent" files
Now that "struct proc_ops" exist we can start putting there stuff which
could not fly with VFS "struct file_operations"...
Most of fs/proc/inode.c file is dedicated to make open/read/.../close
reliable in the event of disappearing /proc entries which usually happens
if module is getting removed. Files like /proc/cpuinfo which never
disappear simply do not need such protection.
Save 2 atomic ops, 1 allocation, 1 free per open/read/close sequence for such
"permanent" files.
Enable "permanent" flag for
/proc/cpuinfo
/proc/kmsg
/proc/modules
/proc/slabinfo
/proc/stat
/proc/sysvipc/*
/proc/swaps
More will come once I figure out foolproof way to prevent out module
authors from marking their stuff "permanent" for performance reasons
when it is not.
This should help with scalability: benchmark is "read /proc/cpuinfo R times
by N threads scattered over the system".
N R t, s (before) t, s (after)
-----------------------------------------------------
64 4096 1.582458 1.530502 -3.2%
256 4096 6.371926 6.125168 -3.9%
1024 4096 25.64888 24.47528 -4.6%
Benchmark source:
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
const int NR_CPUS = sysconf(_SC_NPROCESSORS_ONLN);
int N;
const char *filename;
int R;
int xxx = 0;
int glue(int n)
{
cpu_set_t m;
CPU_ZERO(&m);
CPU_SET(n, &m);
return sched_setaffinity(0, sizeof(cpu_set_t), &m);
}
void f(int n)
{
glue(n % NR_CPUS);
while (*(volatile int *)&xxx == 0) {
}
for (int i = 0; i < R; i++) {
int fd = open(filename, O_RDONLY);
char buf[4096];
ssize_t rv = read(fd, buf, sizeof(buf));
asm volatile ("" :: "g" (rv));
close(fd);
}
}
int main(int argc, char *argv[])
{
if (argc < 4) {
std::cerr << "usage: " << argv[0] << ' ' << "N /proc/filename R
";
return 1;
}
N = atoi(argv[1]);
filename = argv[2];
R = atoi(argv[3]);
for (int i = 0; i < NR_CPUS; i++) {
if (glue(i) == 0)
break;
}
std::vector<std::thread> T;
T.reserve(N);
for (int i = 0; i < N; i++) {
T.emplace_back(f, i);
}
auto t0 = std::chrono::system_clock::now();
{
*(volatile int *)&xxx = 1;
for (auto& t: T) {
t.join();
}
}
auto t1 = std::chrono::system_clock::now();
std::chrono::duration<double> dt = t1 - t0;
std::cout << dt.count() << '
';
return 0;
}
P.S.:
Explicit randomization marker is added because adding non-function pointer
will silently disable structure layout randomization.
[akpm@linux-foundation.org: coding style fixes]
Reported-by: kbuild test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Joe Perches <joe@perches.com>
Link: http://lkml.kernel.org/r/20200222201539.GA22576@avx2
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-06 20:09:01 -07:00
|
|
|
static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return pde->flags & PROC_ENTRY_PERMANENT;
|
|
|
|
}
|
|
|
|
|
2022-09-20 20:35:23 +03:00
|
|
|
static inline void pde_make_permanent(struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
pde->flags |= PROC_ENTRY_PERMANENT;
|
|
|
|
}
|
|
|
|
|
2025-03-01 15:06:24 +03:00
|
|
|
static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return pde->flags & PROC_ENTRY_proc_read_iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
return pde->flags & PROC_ENTRY_proc_compat_ioctl;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2025-06-07 10:13:53 +08:00
|
|
|
static inline bool pde_has_proc_lseek(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return pde->flags & PROC_ENTRY_proc_lseek;
|
|
|
|
}
|
|
|
|
|
2018-04-10 16:31:52 -07:00
|
|
|
extern struct kmem_cache *proc_dir_entry_cache;
|
|
|
|
void pde_free(struct proc_dir_entry *pde);
|
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
union proc_op {
|
|
|
|
int (*proc_get_link)(struct dentry *, struct path *);
|
|
|
|
int (*proc_show)(struct seq_file *m,
|
|
|
|
struct pid_namespace *ns, struct pid *pid,
|
|
|
|
struct task_struct *task);
|
2023-09-12 13:56:48 -07:00
|
|
|
int lsmid;
|
2013-04-11 13:34:43 +01:00
|
|
|
};
|
2006-06-26 00:25:55 -07:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
struct proc_inode {
|
2011-05-24 17:12:48 -07:00
|
|
|
struct pid *pid;
|
2016-09-02 00:42:02 +03:00
|
|
|
unsigned int fd;
|
2013-04-11 13:34:43 +01:00
|
|
|
union proc_op op;
|
|
|
|
struct proc_dir_entry *pde;
|
|
|
|
struct ctl_table_header *sysctl;
|
2024-08-05 11:39:37 +02:00
|
|
|
const struct ctl_table *sysctl_entry;
|
2020-02-19 17:17:34 -06:00
|
|
|
struct hlist_node sibling_inodes;
|
2014-11-01 11:10:28 -04:00
|
|
|
const struct proc_ns_operations *ns_ops;
|
2013-04-11 13:34:43 +01:00
|
|
|
struct inode vfs_inode;
|
2016-10-28 01:22:25 -07:00
|
|
|
} __randomize_layout;
|
2011-05-24 17:12:48 -07:00
|
|
|
|
2013-04-12 18:03:36 +01:00
|
|
|
/*
|
|
|
|
* General functions
|
|
|
|
*/
|
|
|
|
static inline struct proc_inode *PROC_I(const struct inode *inode)
|
|
|
|
{
|
|
|
|
return container_of(inode, struct proc_inode, vfs_inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
|
|
|
{
|
|
|
|
return PROC_I(inode)->pde;
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:54:37 -07:00
|
|
|
static inline struct pid *proc_pid(const struct inode *inode)
|
2006-06-26 00:25:55 -07:00
|
|
|
{
|
2006-06-26 00:25:56 -07:00
|
|
|
return PROC_I(inode)->pid;
|
2006-06-26 00:25:55 -07:00
|
|
|
}
|
|
|
|
|
2018-08-21 21:54:37 -07:00
|
|
|
static inline struct task_struct *get_proc_task(const struct inode *inode)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2006-06-26 00:25:56 -07:00
|
|
|
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2017-09-30 14:45:42 -04:00
|
|
|
void task_dump_owner(struct task_struct *task, umode_t mode,
|
2017-01-03 10:23:11 +13:00
|
|
|
kuid_t *ruid, kgid_t *rgid);
|
2012-08-23 14:43:24 +04:00
|
|
|
|
2017-11-17 15:26:49 -08:00
|
|
|
unsigned name_to_int(const struct qstr *qstr);
|
2013-04-12 01:08:50 +01:00
|
|
|
/*
|
2013-04-11 13:34:43 +01:00
|
|
|
* Offset of the first process in the /proc root directory..
|
2013-04-12 01:08:50 +01:00
|
|
|
*/
|
2013-04-11 13:34:43 +01:00
|
|
|
#define FIRST_PROCESS_ENTRY 256
|
2013-04-12 01:08:50 +01:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/* Worst case buffer size needed for holding an integer. */
|
|
|
|
#define PROC_NUMBUF 13
|
2008-07-25 01:48:29 -07:00
|
|
|
|
2025-03-03 17:30:12 +01:00
|
|
|
#ifdef CONFIG_PAGE_MAPCOUNT
|
2024-06-07 14:23:56 +02:00
|
|
|
/**
|
|
|
|
* folio_precise_page_mapcount() - Number of mappings of this folio page.
|
|
|
|
* @folio: The folio.
|
|
|
|
* @page: The page.
|
|
|
|
*
|
|
|
|
* The number of present user page table entries that reference this page
|
|
|
|
* as tracked via the RMAP: either referenced directly (PTE) or as part of
|
|
|
|
* a larger area that covers this page (e.g., PMD).
|
|
|
|
*
|
|
|
|
* Use this function only for the calculation of existing statistics
|
|
|
|
* (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount).
|
|
|
|
*
|
|
|
|
* Do not add new users.
|
|
|
|
*
|
|
|
|
* Returns: The number of mappings of this folio page. 0 for
|
|
|
|
* folios that are not mapped to user space or are not tracked via the RMAP
|
|
|
|
* (e.g., shared zeropage).
|
|
|
|
*/
|
|
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
int mapcount = atomic_read(&page->_mapcount) + 1;
|
|
|
|
|
2024-08-21 18:39:10 +01:00
|
|
|
if (page_mapcount_is_type(mapcount))
|
2024-06-07 14:23:56 +02:00
|
|
|
mapcount = 0;
|
|
|
|
if (folio_test_large(folio))
|
|
|
|
mapcount += folio_entire_mapcount(folio);
|
|
|
|
|
|
|
|
return mapcount;
|
|
|
|
}
|
2025-03-03 17:30:12 +01:00
|
|
|
#else /* !CONFIG_PAGE_MAPCOUNT */
|
|
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
BUILD_BUG();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PAGE_MAPCOUNT */
|
2024-06-07 14:23:56 +02:00
|
|
|
|
2025-03-03 17:30:09 +01:00
|
|
|
/**
|
|
|
|
* folio_average_page_mapcount() - Average number of mappings per page in this
|
|
|
|
* folio
|
|
|
|
* @folio: The folio.
|
|
|
|
*
|
|
|
|
* The average number of user page table entries that reference each page in
|
|
|
|
* this folio as tracked via the RMAP: either referenced directly (PTE) or
|
|
|
|
* as part of a larger area that covers this page (e.g., PMD).
|
|
|
|
*
|
|
|
|
* The average is calculated by rounding to the nearest integer; however,
|
|
|
|
* to avoid duplicated code in current callers, the average is at least
|
|
|
|
* 1 if any page of the folio is mapped.
|
|
|
|
*
|
|
|
|
* Returns: The average number of mappings per page in this folio.
|
|
|
|
*/
|
|
|
|
static inline int folio_average_page_mapcount(struct folio *folio)
|
|
|
|
{
|
|
|
|
int mapcount, entire_mapcount, avg;
|
|
|
|
|
|
|
|
if (!folio_test_large(folio))
|
|
|
|
return atomic_read(&folio->_mapcount) + 1;
|
|
|
|
|
|
|
|
mapcount = folio_large_mapcount(folio);
|
|
|
|
if (unlikely(mapcount <= 0))
|
|
|
|
return 0;
|
|
|
|
entire_mapcount = folio_entire_mapcount(folio);
|
|
|
|
if (mapcount <= entire_mapcount)
|
|
|
|
return entire_mapcount;
|
|
|
|
mapcount -= entire_mapcount;
|
|
|
|
|
|
|
|
/* Round to closest integer ... */
|
|
|
|
avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio);
|
|
|
|
/* ... but return at least 1. */
|
|
|
|
return max_t(int, avg + entire_mapcount, 1);
|
|
|
|
}
|
2013-04-11 13:34:43 +01:00
|
|
|
/*
|
|
|
|
* array.c
|
|
|
|
*/
|
|
|
|
extern const struct file_operations proc_tid_children_operations;
|
2009-04-07 13:19:18 -04:00
|
|
|
|
2018-05-18 08:47:13 -07:00
|
|
|
extern void proc_task_name(struct seq_file *m, struct task_struct *p,
|
|
|
|
bool escape);
|
2013-04-11 13:34:43 +01:00
|
|
|
extern int proc_tid_stat(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* base.c
|
|
|
|
*/
|
|
|
|
extern const struct dentry_operations pid_dentry_operations;
|
2023-01-13 12:49:12 +01:00
|
|
|
extern int pid_getattr(struct mnt_idmap *, const struct path *,
|
2021-01-21 14:19:43 +01:00
|
|
|
struct kstat *, u32, unsigned int);
|
2023-01-13 12:49:11 +01:00
|
|
|
extern int proc_setattr(struct mnt_idmap *, struct dentry *,
|
2021-01-21 14:19:43 +01:00
|
|
|
struct iattr *);
|
2020-02-19 18:22:26 -06:00
|
|
|
extern void proc_pid_evict_inode(struct proc_inode *);
|
2016-11-10 22:18:28 +01:00
|
|
|
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
|
2018-05-02 21:26:16 -04:00
|
|
|
extern void pid_update_inode(struct task_struct *, struct inode *);
|
2013-04-11 13:34:43 +01:00
|
|
|
extern int pid_delete_dentry(const struct dentry *);
|
2013-05-16 12:07:31 -04:00
|
|
|
extern int proc_pid_readdir(struct file *, struct dir_context *);
|
2019-03-05 15:50:29 -08:00
|
|
|
struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
|
2013-04-11 13:34:43 +01:00
|
|
|
extern loff_t mem_lseek(struct file *, loff_t, int);
|
2013-04-03 19:07:30 -04:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/* Lookups */
|
2018-05-03 09:21:05 -04:00
|
|
|
typedef struct dentry *instantiate_t(struct dentry *,
|
2013-04-11 13:34:43 +01:00
|
|
|
struct task_struct *, const void *);
|
2018-06-07 17:10:10 -07:00
|
|
|
bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
|
2013-04-11 13:34:43 +01:00
|
|
|
instantiate_t, struct task_struct *, const void *);
|
2009-04-07 13:19:18 -04:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/*
|
|
|
|
* generic.c
|
|
|
|
*/
|
2018-04-24 17:08:36 +02:00
|
|
|
struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
|
|
|
|
struct proc_dir_entry **parent, void *data);
|
2018-04-24 17:00:52 +02:00
|
|
|
struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
|
|
|
struct proc_dir_entry *dp);
|
2013-04-11 13:34:43 +01:00
|
|
|
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
|
2018-02-06 15:37:31 -08:00
|
|
|
struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
|
2013-05-16 12:07:31 -04:00
|
|
|
extern int proc_readdir(struct file *, struct dir_context *);
|
2018-02-06 15:37:31 -08:00
|
|
|
int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
|
2009-04-07 13:19:18 -04:00
|
|
|
|
2020-12-15 20:42:42 -08:00
|
|
|
static inline void pde_get(struct proc_dir_entry *pde)
|
2009-12-15 16:45:39 -08:00
|
|
|
{
|
2018-04-10 16:32:14 -07:00
|
|
|
refcount_inc(&pde->refcnt);
|
2009-12-15 16:45:39 -08:00
|
|
|
}
|
2013-04-11 13:34:43 +01:00
|
|
|
extern void pde_put(struct proc_dir_entry *);
|
2009-04-07 13:19:18 -04:00
|
|
|
|
2015-05-11 16:44:25 -05:00
|
|
|
static inline bool is_empty_pde(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return S_ISDIR(pde->mode) && !pde->proc_iops;
|
|
|
|
}
|
2018-05-18 11:46:15 +01:00
|
|
|
extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *);
|
2015-05-11 16:44:25 -05:00
|
|
|
|
2009-04-07 13:19:18 -04:00
|
|
|
/*
|
2013-04-11 13:34:43 +01:00
|
|
|
* inode.c
|
2009-04-07 13:19:18 -04:00
|
|
|
*/
|
2013-04-11 13:34:43 +01:00
|
|
|
struct pde_opener {
|
|
|
|
struct list_head lh;
|
2019-12-04 16:50:05 -08:00
|
|
|
struct file *file;
|
2016-12-12 16:45:17 -08:00
|
|
|
bool closing;
|
2013-04-11 13:34:43 +01:00
|
|
|
struct completion *c;
|
2018-04-10 16:31:05 -07:00
|
|
|
} __randomize_layout;
|
2015-02-21 22:16:11 -05:00
|
|
|
extern const struct inode_operations proc_link_inode_operations;
|
2013-04-11 13:34:43 +01:00
|
|
|
extern const struct inode_operations proc_pid_link_inode_operations;
|
2018-11-01 23:07:25 +00:00
|
|
|
extern const struct super_operations proc_sops;
|
2010-03-07 16:41:34 -08:00
|
|
|
|
2018-04-10 16:31:09 -07:00
|
|
|
void proc_init_kmemcache(void);
|
2020-02-21 08:43:23 -06:00
|
|
|
void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
|
2016-12-12 16:45:32 -08:00
|
|
|
void set_proc_pid_nlink(void);
|
2013-04-11 13:34:43 +01:00
|
|
|
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
|
|
|
|
extern void proc_entry_rundown(struct proc_dir_entry *);
|
2010-03-07 16:41:34 -08:00
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/*
|
|
|
|
* proc_namespaces.c
|
|
|
|
*/
|
2010-03-07 16:41:34 -08:00
|
|
|
extern const struct inode_operations proc_ns_dir_inode_operations;
|
|
|
|
extern const struct file_operations proc_ns_dir_operations;
|
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
/*
|
|
|
|
* proc_net.c
|
|
|
|
*/
|
|
|
|
extern const struct file_operations proc_net_operations;
|
|
|
|
extern const struct inode_operations proc_net_inode_operations;
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET
|
|
|
|
extern int proc_net_init(void);
|
|
|
|
#else
|
|
|
|
static inline int proc_net_init(void) { return 0; }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* proc_self.c
|
|
|
|
*/
|
2013-03-29 19:27:05 -04:00
|
|
|
extern int proc_setup_self(struct super_block *);
|
2013-04-12 02:29:19 +01:00
|
|
|
|
2014-07-31 03:10:50 -07:00
|
|
|
/*
|
|
|
|
* proc_thread_self.c
|
|
|
|
*/
|
|
|
|
extern int proc_setup_thread_self(struct super_block *);
|
|
|
|
extern void proc_thread_self_init(void);
|
|
|
|
|
2013-04-12 02:29:19 +01:00
|
|
|
/*
|
2013-04-11 13:34:43 +01:00
|
|
|
* proc_sysctl.c
|
2013-04-12 02:29:19 +01:00
|
|
|
*/
|
2013-04-11 13:34:43 +01:00
|
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
|
|
extern int proc_sys_init(void);
|
2017-02-10 10:35:02 +03:00
|
|
|
extern void proc_sys_evict_inode(struct inode *inode,
|
|
|
|
struct ctl_table_header *head);
|
2013-04-11 13:34:43 +01:00
|
|
|
#else
|
|
|
|
static inline void proc_sys_init(void) { }
|
2017-02-10 10:35:02 +03:00
|
|
|
static inline void proc_sys_evict_inode(struct inode *inode,
|
|
|
|
struct ctl_table_header *head) { }
|
2013-04-11 13:34:43 +01:00
|
|
|
#endif
|
2013-04-12 02:29:19 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* proc_tty.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_TTY
|
|
|
|
extern void proc_tty_init(void);
|
|
|
|
#else
|
|
|
|
static inline void proc_tty_init(void) {}
|
|
|
|
#endif
|
2013-04-11 13:34:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* root.c
|
|
|
|
*/
|
|
|
|
extern struct proc_dir_entry proc_root;
|
|
|
|
|
|
|
|
extern void proc_self_init(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* task_[no]mmu.c
|
|
|
|
*/
|
mm: add /proc/pid/smaps_rollup
/proc/pid/smaps_rollup is a new proc file that improves the performance
of user programs that determine aggregate memory statistics (e.g., total
PSS) of a process.
Android regularly "samples" the memory usage of various processes in
order to balance its memory pool sizes. This sampling process involves
opening /proc/pid/smaps and summing certain fields. For very large
processes, sampling memory use this way can take several hundred
milliseconds, due mostly to the overhead of the seq_printf calls in
task_mmu.c.
smaps_rollup improves the situation. It contains most of the fields of
/proc/pid/smaps, but instead of a set of fields for each VMA,
smaps_rollup instead contains one synthetic smaps-format entry
representing the whole process. In the single smaps_rollup synthetic
entry, each field is the summation of the corresponding field in all of
the real-smaps VMAs. Using a common format for smaps_rollup and smaps
allows userspace parsers to repurpose parsers meant for use with
non-rollup smaps for smaps_rollup, and it allows userspace to switch
between smaps_rollup and smaps at runtime (say, based on the
availability of smaps_rollup in a given kernel) with minimal fuss.
By using smaps_rollup instead of smaps, a caller can avoid the
significant overhead of formatting, reading, and parsing each of a large
process's potentially very numerous memory mappings. For sampling
system_server's PSS in Android, we measured a 12x speedup, representing
a savings of several hundred milliseconds.
One alternative to a new per-process proc file would have been including
PSS information in /proc/pid/status. We considered this option but
thought that PSS would be too expensive (by a few orders of magnitude)
to collect relative to what's already emitted as part of
/proc/pid/status, and slowing every user of /proc/pid/status for the
sake of readers that happen to want PSS feels wrong.
The code itself works by reusing the existing VMA-walking framework we
use for regular smaps generation and keeping the mem_size_stats
structure around between VMA walks instead of using a fresh one for each
VMA. In this way, summation happens automatically. We let seq_file
walk over the VMAs just as it does for regular smaps and just emit
nothing to the seq_file until we hit the last VMA.
Benchmarks:
using smaps:
iterations:1000 pid:1163 pss:220023808
0m29.46s real 0m08.28s user 0m20.98s system
using smaps_rollup:
iterations:1000 pid:1163 pss:220702720
0m04.39s real 0m00.03s user 0m04.31s system
We're using the PSS samples we collect asynchronously for
system-management tasks like fine-tuning oom_adj_score, memory use
tracking for debugging, application-level memory-use attribution, and
deciding whether we want to kill large processes during system idle
maintenance windows. Android has been using PSS for these purposes for
a long time; as the average process VMA count has increased and and
devices become more efficiency-conscious, PSS-collection inefficiency
has started to matter more. IMHO, it'd be a lot safer to optimize the
existing PSS-collection model, which has been fine-tuned over the years,
instead of changing the memory tracking approach entirely to work around
smaps-generation inefficiency.
Tim said:
: There are two main reasons why Android gathers PSS information:
:
: 1. Android devices can show the user the amount of memory used per
: application via the settings app. This is a less important use case.
:
: 2. We log PSS to help identify leaks in applications. We have found
: an enormous number of bugs (in the Android platform, in Google's own
: apps, and in third-party applications) using this data.
:
: To do this, system_server (the main process in Android userspace) will
: sample the PSS of a process three seconds after it changes state (for
: example, app is launched and becomes the foreground application) and about
: every ten minutes after that. The net result is that PSS collection is
: regularly running on at least one process in the system (usually a few
: times a minute while the screen is on, less when screen is off due to
: suspend). PSS of a process is an incredibly useful stat to track, and we
: aren't going to get rid of it. We've looked at some very hacky approaches
: using RSS ("take the RSS of the target process, subtract the RSS of the
: zygote process that is the parent of all Android apps") to reduce the
: accounting time, but it regularly overestimated the memory used by 20+
: percent. Accordingly, I don't think that there's a good alternative to
: using PSS.
:
: We started looking into PSS collection performance after we noticed random
: frequency spikes while a phone's screen was off; occasionally, one of the
: CPU clusters would ramp to a high frequency because there was 200-300ms of
: constant CPU work from a single thread in the main Android userspace
: process. The work causing the spike (which is reasonable governor
: behavior given the amount of CPU time needed) was always PSS collection.
: As a result, Android is burning more power than we should be on PSS
: collection.
:
: The other issue (and why I'm less sure about improving smaps as a
: long-term solution) is that the number of VMAs per process has increased
: significantly from release to release. After trying to figure out why we
: were seeing these 200-300ms PSS collection times on Android O but had not
: noticed it in previous versions, we found that the number of VMAs in the
: main system process increased by 50% from Android N to Android O (from
: ~1800 to ~2700) and varying increases in every userspace process. Android
: M to N also had an increase in the number of VMAs, although not as much.
: I'm not sure why this is increasing so much over time, but thinking about
: ASLR and ways to make ASLR better, I expect that this will continue to
: increase going forward. I would not be surprised if we hit 5000 VMAs on
: the main Android process (system_server) by 2020.
:
: If we assume that the number of VMAs is going to increase over time, then
: doing anything we can do to reduce the overhead of each VMA during PSS
: collection seems like the right way to go, and that means outputting an
: aggregate statistic (to avoid whatever overhead there is per line in
: writing smaps and in reading each line from userspace).
Link: http://lkml.kernel.org/r/20170812022148.178293-1-dancol@google.com
Signed-off-by: Daniel Colascione <dancol@google.com>
Cc: Tim Murray <timmurray@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-06 16:25:08 -07:00
|
|
|
struct mem_size_stats;
|
2013-04-11 13:34:43 +01:00
|
|
|
struct proc_maps_private {
|
2014-10-09 15:25:51 -07:00
|
|
|
struct inode *inode;
|
2013-04-11 13:34:43 +01:00
|
|
|
struct task_struct *task;
|
2014-10-09 15:25:26 -07:00
|
|
|
struct mm_struct *mm;
|
2022-09-06 19:48:57 +00:00
|
|
|
struct vma_iterator iter;
|
fs/proc/task_mmu: read proc/pid/maps under per-vma lock
With maple_tree supporting vma tree traversal under RCU and per-vma locks,
/proc/pid/maps can be read while holding individual vma locks instead of
locking the entire address space.
A completely lockless approach (walking vma tree under RCU) would be quite
complex with the main issue being get_vma_name() using callbacks which
might not work correctly with a stable vma copy, requiring original
(unstable) vma - see special_mapping_name() for example.
When per-vma lock acquisition fails, we take the mmap_lock for reading,
lock the vma, release the mmap_lock and continue. This fallback to mmap
read lock guarantees the reader to make forward progress even during lock
contention. This will interfere with the writer but for a very short time
while we are acquiring the per-vma lock and only when there was contention
on the vma reader is interested in.
We shouldn't see a repeated fallback to mmap read locks in practice, as
this require a very unlikely series of lock contentions (for instance due
to repeated vma split operations). However even if this did somehow
happen, we would still progress.
One case requiring special handling is when a vma changes between the time
it was found and the time it got locked. A problematic case would be if a
vma got shrunk so that its vm_start moved higher in the address space and
a new vma was installed at the beginning:
reader found: |--------VMA A--------|
VMA is modified: |-VMA B-|----VMA A----|
reader locks modified VMA A
reader reports VMA A: | gap |----VMA A----|
This would result in reporting a gap in the address space that does not
exist. To prevent this we retry the lookup after locking the vma, however
we do that only when we identify a gap and detect that the address space
was changed after we found the vma.
This change is designed to reduce mmap_lock contention and prevent a
process reading /proc/pid/maps files (often a low priority task, such as
monitoring/data collection services) from blocking address space updates.
Note that this change has a userspace visible disadvantage: it allows for
sub-page data tearing as opposed to the previous mechanism where data
tearing could happen only between pages of generated output data. Since
current userspace considers data tearing between pages to be acceptable,
we assume is will be able to handle sub-page data tearing as well.
Link: https://lkml.kernel.org/r/20250719182854.3166724-7-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jeongjun Park <aha310510@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Weißschuh <linux@weissschuh.net>
Cc: T.J. Mercier <tjmercier@google.com>
Cc: Ye Bin <yebin10@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-07-19 11:28:54 -07:00
|
|
|
loff_t last_pos;
|
|
|
|
#ifdef CONFIG_PER_VMA_LOCK
|
|
|
|
bool mmap_locked;
|
|
|
|
struct vm_area_struct *locked_vma;
|
|
|
|
#endif
|
2013-04-11 13:34:43 +01:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
struct mempolicy *task_mempolicy;
|
|
|
|
#endif
|
2016-10-28 01:22:25 -07:00
|
|
|
} __randomize_layout;
|
2013-04-11 13:34:43 +01:00
|
|
|
|
2014-10-09 15:25:24 -07:00
|
|
|
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
|
|
|
|
|
2013-04-11 13:34:43 +01:00
|
|
|
extern const struct file_operations proc_pid_maps_operations;
|
|
|
|
extern const struct file_operations proc_pid_numa_maps_operations;
|
|
|
|
extern const struct file_operations proc_pid_smaps_operations;
|
mm: add /proc/pid/smaps_rollup
/proc/pid/smaps_rollup is a new proc file that improves the performance
of user programs that determine aggregate memory statistics (e.g., total
PSS) of a process.
Android regularly "samples" the memory usage of various processes in
order to balance its memory pool sizes. This sampling process involves
opening /proc/pid/smaps and summing certain fields. For very large
processes, sampling memory use this way can take several hundred
milliseconds, due mostly to the overhead of the seq_printf calls in
task_mmu.c.
smaps_rollup improves the situation. It contains most of the fields of
/proc/pid/smaps, but instead of a set of fields for each VMA,
smaps_rollup instead contains one synthetic smaps-format entry
representing the whole process. In the single smaps_rollup synthetic
entry, each field is the summation of the corresponding field in all of
the real-smaps VMAs. Using a common format for smaps_rollup and smaps
allows userspace parsers to repurpose parsers meant for use with
non-rollup smaps for smaps_rollup, and it allows userspace to switch
between smaps_rollup and smaps at runtime (say, based on the
availability of smaps_rollup in a given kernel) with minimal fuss.
By using smaps_rollup instead of smaps, a caller can avoid the
significant overhead of formatting, reading, and parsing each of a large
process's potentially very numerous memory mappings. For sampling
system_server's PSS in Android, we measured a 12x speedup, representing
a savings of several hundred milliseconds.
One alternative to a new per-process proc file would have been including
PSS information in /proc/pid/status. We considered this option but
thought that PSS would be too expensive (by a few orders of magnitude)
to collect relative to what's already emitted as part of
/proc/pid/status, and slowing every user of /proc/pid/status for the
sake of readers that happen to want PSS feels wrong.
The code itself works by reusing the existing VMA-walking framework we
use for regular smaps generation and keeping the mem_size_stats
structure around between VMA walks instead of using a fresh one for each
VMA. In this way, summation happens automatically. We let seq_file
walk over the VMAs just as it does for regular smaps and just emit
nothing to the seq_file until we hit the last VMA.
Benchmarks:
using smaps:
iterations:1000 pid:1163 pss:220023808
0m29.46s real 0m08.28s user 0m20.98s system
using smaps_rollup:
iterations:1000 pid:1163 pss:220702720
0m04.39s real 0m00.03s user 0m04.31s system
We're using the PSS samples we collect asynchronously for
system-management tasks like fine-tuning oom_adj_score, memory use
tracking for debugging, application-level memory-use attribution, and
deciding whether we want to kill large processes during system idle
maintenance windows. Android has been using PSS for these purposes for
a long time; as the average process VMA count has increased and and
devices become more efficiency-conscious, PSS-collection inefficiency
has started to matter more. IMHO, it'd be a lot safer to optimize the
existing PSS-collection model, which has been fine-tuned over the years,
instead of changing the memory tracking approach entirely to work around
smaps-generation inefficiency.
Tim said:
: There are two main reasons why Android gathers PSS information:
:
: 1. Android devices can show the user the amount of memory used per
: application via the settings app. This is a less important use case.
:
: 2. We log PSS to help identify leaks in applications. We have found
: an enormous number of bugs (in the Android platform, in Google's own
: apps, and in third-party applications) using this data.
:
: To do this, system_server (the main process in Android userspace) will
: sample the PSS of a process three seconds after it changes state (for
: example, app is launched and becomes the foreground application) and about
: every ten minutes after that. The net result is that PSS collection is
: regularly running on at least one process in the system (usually a few
: times a minute while the screen is on, less when screen is off due to
: suspend). PSS of a process is an incredibly useful stat to track, and we
: aren't going to get rid of it. We've looked at some very hacky approaches
: using RSS ("take the RSS of the target process, subtract the RSS of the
: zygote process that is the parent of all Android apps") to reduce the
: accounting time, but it regularly overestimated the memory used by 20+
: percent. Accordingly, I don't think that there's a good alternative to
: using PSS.
:
: We started looking into PSS collection performance after we noticed random
: frequency spikes while a phone's screen was off; occasionally, one of the
: CPU clusters would ramp to a high frequency because there was 200-300ms of
: constant CPU work from a single thread in the main Android userspace
: process. The work causing the spike (which is reasonable governor
: behavior given the amount of CPU time needed) was always PSS collection.
: As a result, Android is burning more power than we should be on PSS
: collection.
:
: The other issue (and why I'm less sure about improving smaps as a
: long-term solution) is that the number of VMAs per process has increased
: significantly from release to release. After trying to figure out why we
: were seeing these 200-300ms PSS collection times on Android O but had not
: noticed it in previous versions, we found that the number of VMAs in the
: main system process increased by 50% from Android N to Android O (from
: ~1800 to ~2700) and varying increases in every userspace process. Android
: M to N also had an increase in the number of VMAs, although not as much.
: I'm not sure why this is increasing so much over time, but thinking about
: ASLR and ways to make ASLR better, I expect that this will continue to
: increase going forward. I would not be surprised if we hit 5000 VMAs on
: the main Android process (system_server) by 2020.
:
: If we assume that the number of VMAs is going to increase over time, then
: doing anything we can do to reduce the overhead of each VMA during PSS
: collection seems like the right way to go, and that means outputting an
: aggregate statistic (to avoid whatever overhead there is per line in
: writing smaps and in reading each line from userspace).
Link: http://lkml.kernel.org/r/20170812022148.178293-1-dancol@google.com
Signed-off-by: Daniel Colascione <dancol@google.com>
Cc: Tim Murray <timmurray@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-06 16:25:08 -07:00
|
|
|
extern const struct file_operations proc_pid_smaps_rollup_operations;
|
2013-04-11 13:34:43 +01:00
|
|
|
extern const struct file_operations proc_clear_refs_operations;
|
|
|
|
extern const struct file_operations proc_pagemap_operations;
|
|
|
|
|
|
|
|
extern unsigned long task_vsize(struct mm_struct *);
|
|
|
|
extern unsigned long task_statm(struct mm_struct *,
|
|
|
|
unsigned long *, unsigned long *,
|
|
|
|
unsigned long *, unsigned long *);
|
|
|
|
extern void task_mem(struct seq_file *, struct mm_struct *);
|
2020-12-15 20:42:39 -08:00
|
|
|
|
|
|
|
extern const struct dentry_operations proc_net_dentry_ops;
|
|
|
|
static inline void pde_force_lookup(struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
|
2025-02-22 16:04:47 -05:00
|
|
|
pde->flags |= PROC_ENTRY_FORCE_LOOKUP;
|
2020-12-15 20:42:39 -08:00
|
|
|
}
|
2024-08-06 18:02:29 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a new procfs dentry that can't serve as a mountpoint. That should
|
|
|
|
* encompass anything that is ephemeral and can just disappear while the
|
|
|
|
* process is still around.
|
|
|
|
*/
|
|
|
|
static inline struct dentry *proc_splice_unmountable(struct inode *inode,
|
|
|
|
struct dentry *dentry, const struct dentry_operations *d_ops)
|
|
|
|
{
|
|
|
|
dont_mount(dentry);
|
2025-02-24 13:08:52 -05:00
|
|
|
return d_splice_alias_ops(inode, dentry, d_ops);
|
2024-08-06 18:02:29 +02:00
|
|
|
}
|