mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	const qualify the struct ctl_table argument in the proc_handler function
signatures. This is a prerequisite to moving the static ctl_table
structs into .rodata data which will ensure that proc_handler function
pointers cannot be modified.
This patch has been generated by the following coccinelle script:
```
  virtual patch
  @r1@
  identifier ctl, write, buffer, lenp, ppos;
  identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)";
  @@
  int func(
  - struct ctl_table *ctl
  + const struct ctl_table *ctl
    ,int write, void *buffer, size_t *lenp, loff_t *ppos);
  @r2@
  identifier func, ctl, write, buffer, lenp, ppos;
  @@
  int func(
  - struct ctl_table *ctl
  + const struct ctl_table *ctl
    ,int write, void *buffer, size_t *lenp, loff_t *ppos)
  { ... }
  @r3@
  identifier func;
  @@
  int func(
  - struct ctl_table *
  + const struct ctl_table *
    ,int , void *, size_t *, loff_t *);
  @r4@
  identifier func, ctl;
  @@
  int func(
  - struct ctl_table *ctl
  + const struct ctl_table *ctl
    ,int , void *, size_t *, loff_t *);
  @r5@
  identifier func, write, buffer, lenp, ppos;
  @@
  int func(
  - struct ctl_table *
  + const struct ctl_table *
    ,int write, void *buffer, size_t *lenp, loff_t *ppos);
```
* Code formatting was adjusted in xfs_sysctl.c to comply with code
  conventions. The xfs_stats_clear_proc_handler,
  xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where
  adjusted.
* The ctl_table argument in proc_watchdog_common was const qualified.
  This is called from a proc_handler itself and is calling back into
  another proc_handler, making it necessary to change it as part of the
  proc_handler migration.
Co-developed-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Co-developed-by: Joel Granados <j.granados@samsung.com>
Signed-off-by: Joel Granados <j.granados@samsung.com>
		
	
			
		
			
				
	
	
		
			79 lines
		
	
	
	
		
			1.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			79 lines
		
	
	
	
		
			1.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
/*
 | 
						|
 * Implement the manual drop-all-pagecache function
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/pagemap.h>
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/fs.h>
 | 
						|
#include <linux/writeback.h>
 | 
						|
#include <linux/sysctl.h>
 | 
						|
#include <linux/gfp.h>
 | 
						|
#include <linux/swap.h>
 | 
						|
#include "internal.h"
 | 
						|
 | 
						|
/* A global variable is a bit ugly, but it keeps the code simple */
 | 
						|
int sysctl_drop_caches;
 | 
						|
 | 
						|
static void drop_pagecache_sb(struct super_block *sb, void *unused)
 | 
						|
{
 | 
						|
	struct inode *inode, *toput_inode = NULL;
 | 
						|
 | 
						|
	spin_lock(&sb->s_inode_list_lock);
 | 
						|
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 | 
						|
		spin_lock(&inode->i_lock);
 | 
						|
		/*
 | 
						|
		 * We must skip inodes in unusual state. We may also skip
 | 
						|
		 * inodes without pages but we deliberately won't in case
 | 
						|
		 * we need to reschedule to avoid softlockups.
 | 
						|
		 */
 | 
						|
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
 | 
						|
		    (mapping_empty(inode->i_mapping) && !need_resched())) {
 | 
						|
			spin_unlock(&inode->i_lock);
 | 
						|
			continue;
 | 
						|
		}
 | 
						|
		__iget(inode);
 | 
						|
		spin_unlock(&inode->i_lock);
 | 
						|
		spin_unlock(&sb->s_inode_list_lock);
 | 
						|
 | 
						|
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
 | 
						|
		iput(toput_inode);
 | 
						|
		toput_inode = inode;
 | 
						|
 | 
						|
		cond_resched();
 | 
						|
		spin_lock(&sb->s_inode_list_lock);
 | 
						|
	}
 | 
						|
	spin_unlock(&sb->s_inode_list_lock);
 | 
						|
	iput(toput_inode);
 | 
						|
}
 | 
						|
 | 
						|
int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
 | 
						|
		void *buffer, size_t *length, loff_t *ppos)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
 | 
						|
	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 | 
						|
	if (ret)
 | 
						|
		return ret;
 | 
						|
	if (write) {
 | 
						|
		static int stfu;
 | 
						|
 | 
						|
		if (sysctl_drop_caches & 1) {
 | 
						|
			lru_add_drain_all();
 | 
						|
			iterate_supers(drop_pagecache_sb, NULL);
 | 
						|
			count_vm_event(DROP_PAGECACHE);
 | 
						|
		}
 | 
						|
		if (sysctl_drop_caches & 2) {
 | 
						|
			drop_slab();
 | 
						|
			count_vm_event(DROP_SLAB);
 | 
						|
		}
 | 
						|
		if (!stfu) {
 | 
						|
			pr_info("%s (%d): drop_caches: %d\n",
 | 
						|
				current->comm, task_pid_nr(current),
 | 
						|
				sysctl_drop_caches);
 | 
						|
		}
 | 
						|
		stfu |= sysctl_drop_caches & 4;
 | 
						|
	}
 | 
						|
	return 0;
 | 
						|
}
 |