mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-18 22:14:16 +00:00 
			
		
		
		
	sched: fix the task_group hierarchy for UID grouping
UID grouping doesn't actually have a task_group representing the root of the task_group tree. Add one. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									ec7dc8ac73
								
							
						
					
					
						commit
						eff766a65c
					
				
					 3 changed files with 45 additions and 3 deletions
				
			
		|  | @ -2051,6 +2051,9 @@ extern void normalize_rt_tasks(void); | |||
| #ifdef CONFIG_GROUP_SCHED | ||||
| 
 | ||||
| extern struct task_group init_task_group; | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| extern struct task_group root_task_group; | ||||
| #endif | ||||
| 
 | ||||
| extern struct task_group *sched_create_group(struct task_group *parent); | ||||
| extern void sched_destroy_group(struct task_group *tg); | ||||
|  |  | |||
|  | @ -274,6 +274,14 @@ struct task_group { | |||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| 
 | ||||
| /*
 | ||||
|  * Root task group. | ||||
|  * 	Every UID task group (including init_task_group aka UID-0) will | ||||
|  * 	be a child to this group. | ||||
|  */ | ||||
| struct task_group root_task_group; | ||||
| 
 | ||||
| #ifdef CONFIG_FAIR_GROUP_SCHED | ||||
| /* Default task group's sched entity on each cpu */ | ||||
| static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | ||||
|  | @ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
| static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | ||||
| static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | ||||
| #endif | ||||
| #else | ||||
| #define root_task_group init_task_group | ||||
| #endif | ||||
| 
 | ||||
| /* task_group_lock serializes add/remove of task groups and also changes to
 | ||||
|  | @ -7507,6 +7517,9 @@ void __init sched_init(void) | |||
| #endif | ||||
| #ifdef CONFIG_RT_GROUP_SCHED | ||||
| 	alloc_size += 2 * nr_cpu_ids * sizeof(void **); | ||||
| #endif | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| 	alloc_size *= 2; | ||||
| #endif | ||||
| 	/*
 | ||||
| 	 * As sched_init() is called before page_alloc is setup, | ||||
|  | @ -7521,12 +7534,29 @@ void __init sched_init(void) | |||
| 
 | ||||
| 		init_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| 
 | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| 		root_task_group.se = (struct sched_entity **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| 
 | ||||
| 		root_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| #endif | ||||
| #endif | ||||
| #ifdef CONFIG_RT_GROUP_SCHED | ||||
| 		init_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| 
 | ||||
| 		init_task_group.rt_rq = (struct rt_rq **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| 
 | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| 		root_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| 
 | ||||
| 		root_task_group.rt_rq = (struct rt_rq **)ptr; | ||||
| 		ptr += nr_cpu_ids * sizeof(void **); | ||||
| #endif | ||||
| #endif | ||||
| 	} | ||||
| 
 | ||||
|  | @ -7540,6 +7570,10 @@ void __init sched_init(void) | |||
| #ifdef CONFIG_RT_GROUP_SCHED | ||||
| 	init_rt_bandwidth(&init_task_group.rt_bandwidth, | ||||
| 			global_rt_period(), global_rt_runtime()); | ||||
| #ifdef CONFIG_USER_SCHED | ||||
| 	init_rt_bandwidth(&root_task_group.rt_bandwidth, | ||||
| 			global_rt_period(), RUNTIME_INF); | ||||
| #endif | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_GROUP_SCHED | ||||
|  | @ -7582,6 +7616,8 @@ void __init sched_init(void) | |||
| 		 */ | ||||
| 		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); | ||||
| #elif defined CONFIG_USER_SCHED | ||||
| 		root_task_group.shares = NICE_0_LOAD; | ||||
| 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); | ||||
| 		/*
 | ||||
| 		 * In case of task-groups formed thr' the user id of tasks, | ||||
| 		 * init_task_group represents tasks belonging to root user. | ||||
|  | @ -7595,7 +7631,8 @@ void __init sched_init(void) | |||
| 		 */ | ||||
| 		init_tg_cfs_entry(&init_task_group, | ||||
| 				&per_cpu(init_cfs_rq, i), | ||||
| 				&per_cpu(init_sched_entity, i), i, 1, NULL); | ||||
| 				&per_cpu(init_sched_entity, i), i, 1, | ||||
| 				root_task_group.se[i]); | ||||
| 
 | ||||
| #endif | ||||
| #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||||
|  | @ -7606,9 +7643,11 @@ void __init sched_init(void) | |||
| #ifdef CONFIG_CGROUP_SCHED | ||||
| 		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); | ||||
| #elif defined CONFIG_USER_SCHED | ||||
| 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | ||||
| 		init_tg_rt_entry(&init_task_group, | ||||
| 				&per_cpu(init_rt_rq, i), | ||||
| 				&per_cpu(init_sched_rt_entity, i), i, 1, NULL); | ||||
| 				&per_cpu(init_sched_rt_entity, i), i, 1, | ||||
| 				root_task_group.rt_se[i]); | ||||
| #endif | ||||
| #endif | ||||
| 
 | ||||
|  |  | |||
|  | @ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up) | |||
| { | ||||
| 	int rc = 0; | ||||
| 
 | ||||
| 	up->tg = sched_create_group(NULL); | ||||
| 	up->tg = sched_create_group(&root_task_group); | ||||
| 	if (IS_ERR(up->tg)) | ||||
| 		rc = -ENOMEM; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra