mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	arm64: stacktrace: rework stack boundary discovery
In subsequent patches we'll want to acquire the stack boundaries ahead-of-time, and we'll need to be able to acquire the relevant stack_info regardless of whether we have an object the happens to be on the stack. This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX() helpers, with the caller being responsible for the checking whether an object is on a relevant stack. For the moment this is moved into the on_accessible_stack() functions, making these slightly larger; subsequent patches will remove the on_accessible_stack() functions and simplify the logic. The on_irq_stack() and on_task_stack() helpers are kept as these are used by IRQ entry sequences and stackleak respectively. As they're only used as predicates, the stack_info pointer parameter is removed in both cases. As the on_accessible_stack() functions are always passed a non-NULL info pointer, these now update info unconditionally. When updating the type to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as these will not be consumed this should have no adverse affect. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Kalesh Singh <kaleshsingh@google.com> Reviewed-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> Reviewed-by: Mark Brown <broonie@kernel.org> Cc: Fuad Tabba <tabba@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
		
							parent
							
								
									36f9a8793c
								
							
						
					
					
						commit
						d1f684e46b
					
				
					 7 changed files with 151 additions and 94 deletions
				
			
		|  | @ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task); | |||
|  * The top of the current task's task stack | ||||
|  */ | ||||
| #define current_top_of_stack()	((unsigned long)current->stack + THREAD_SIZE) | ||||
| #define on_thread_stack()	(on_task_stack(current, current_stack_pointer, 1, NULL)) | ||||
| #define on_thread_stack()	(on_task_stack(current, current_stack_pointer, 1)) | ||||
| 
 | ||||
| #endif /* __ASSEMBLY__ */ | ||||
| #endif /* __ASM_PROCESSOR_H */ | ||||
|  |  | |||
|  | @ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, | |||
| 
 | ||||
| DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); | ||||
| 
 | ||||
| static inline bool on_irq_stack(unsigned long sp, unsigned long size, | ||||
| 				struct stack_info *info) | ||||
| static inline struct stack_info stackinfo_get_irq(void) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); | ||||
| 	unsigned long high = low + IRQ_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_IRQ, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static inline bool on_task_stack(const struct task_struct *tsk, | ||||
| 				 unsigned long sp, unsigned long size, | ||||
| 				 struct stack_info *info) | ||||
| static inline bool on_irq_stack(unsigned long sp, unsigned long size) | ||||
| { | ||||
| 	struct stack_info info = stackinfo_get_irq(); | ||||
| 	return stackinfo_on_stack(&info, sp, size); | ||||
| } | ||||
| 
 | ||||
| static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)task_stack_page(tsk); | ||||
| 	unsigned long high = low + THREAD_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_TASK, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static inline bool on_task_stack(const struct task_struct *tsk, | ||||
| 				 unsigned long sp, unsigned long size) | ||||
| { | ||||
| 	struct stack_info info = stackinfo_get_task(tsk); | ||||
| 	return stackinfo_on_stack(&info, sp, size); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_VMAP_STACK | ||||
| DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); | ||||
| 
 | ||||
| static inline bool on_overflow_stack(unsigned long sp, unsigned long size, | ||||
| 				struct stack_info *info) | ||||
| static inline struct stack_info stackinfo_get_overflow(void) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); | ||||
| 	unsigned long high = low + OVERFLOW_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_OVERFLOW, | ||||
| 	}; | ||||
| } | ||||
| #else | ||||
| static inline bool on_overflow_stack(unsigned long sp, unsigned long size, | ||||
| 				     struct stack_info *info) | ||||
| { | ||||
| 	return false; | ||||
| } | ||||
| #define stackinfo_get_overflow()	stackinfo_get_unknown() | ||||
| #endif | ||||
| 
 | ||||
| #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) | ||||
| DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); | ||||
| DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); | ||||
| 
 | ||||
| static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, | ||||
| 					struct stack_info *info) | ||||
| static inline struct stack_info stackinfo_get_sdei_normal(void) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); | ||||
| 	unsigned long high = low + SDEI_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_SDEI_NORMAL, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, | ||||
| 					  struct stack_info *info) | ||||
| static inline struct stack_info stackinfo_get_sdei_critical(void) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); | ||||
| 	unsigned long high = low + SDEI_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_SDEI_CRITICAL, | ||||
| 	}; | ||||
| } | ||||
| #else | ||||
| static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, | ||||
| 					struct stack_info *info) | ||||
| { | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, | ||||
| 					  struct stack_info *info) | ||||
| { | ||||
| 	return false; | ||||
| } | ||||
| #define stackinfo_get_sdei_normal()	stackinfo_get_unknown() | ||||
| #define stackinfo_get_sdei_critical()	stackinfo_get_unknown() | ||||
| #endif | ||||
| 
 | ||||
| #endif	/* __ASM_STACKTRACE_H */ | ||||
|  |  | |||
|  | @ -65,6 +65,15 @@ struct unwind_state { | |||
| 	struct task_struct *task; | ||||
| }; | ||||
| 
 | ||||
| static inline struct stack_info stackinfo_get_unknown(void) | ||||
| { | ||||
| 	return (struct stack_info) { | ||||
| 		.low = 0, | ||||
| 		.high = 0, | ||||
| 		.type = STACK_TYPE_UNKNOWN, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static inline bool stackinfo_on_stack(const struct stack_info *info, | ||||
| 				      unsigned long sp, unsigned long size) | ||||
| { | ||||
|  | @ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info, | |||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static inline bool on_stack(unsigned long sp, unsigned long size, | ||||
| 			    unsigned long low, unsigned long high, | ||||
| 			    enum stack_type type, struct stack_info *info) | ||||
| { | ||||
| 	struct stack_info tmp = { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = type, | ||||
| 	}; | ||||
| 
 | ||||
| 	if (!stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (info) | ||||
| 		*info = tmp; | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static inline void unwind_init_common(struct unwind_state *state, | ||||
| 				      struct task_struct *task) | ||||
| { | ||||
|  |  | |||
|  | @ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) | |||
| { | ||||
| 	return ((addr & ~(THREAD_SIZE - 1))  == | ||||
| 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || | ||||
| 		on_irq_stack(addr, sizeof(unsigned long), NULL); | ||||
| 		on_irq_stack(addr, sizeof(unsigned long)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  |  | |||
|  | @ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state, | |||
| 	state->pc = thread_saved_pc(task); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * We can only safely access per-cpu stacks from current in a non-preemptible | ||||
|  * context. | ||||
|  */ | ||||
| static bool on_accessible_stack(const struct task_struct *tsk, | ||||
| 				unsigned long sp, unsigned long size, | ||||
| 				struct stack_info *info) | ||||
| { | ||||
| 	if (info) | ||||
| 		info->type = STACK_TYPE_UNKNOWN; | ||||
| 	struct stack_info tmp; | ||||
| 
 | ||||
| 	if (on_task_stack(tsk, sp, size, info)) | ||||
| 		return true; | ||||
| 	tmp = stackinfo_get_task(tsk); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We can only safely access per-cpu stacks when unwinding the current | ||||
| 	 * task in a non-preemptible context. | ||||
| 	 */ | ||||
| 	if (tsk != current || preemptible()) | ||||
| 		return false; | ||||
| 	if (on_irq_stack(sp, size, info)) | ||||
| 		return true; | ||||
| 	if (on_overflow_stack(sp, size, info)) | ||||
| 		return true; | ||||
| 		goto not_found; | ||||
| 
 | ||||
| 	if (IS_ENABLED(CONFIG_VMAP_STACK) && | ||||
| 	    IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) && | ||||
| 	    in_nmi()) { | ||||
| 		if (on_sdei_critical_stack(sp, size, info)) | ||||
| 			return true; | ||||
| 		if (on_sdei_normal_stack(sp, size, info)) | ||||
| 			return true; | ||||
| 	} | ||||
| 	tmp = stackinfo_get_irq(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	tmp = stackinfo_get_overflow(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We can only safely access SDEI stacks which unwinding the current | ||||
| 	 * task in an NMI context. | ||||
| 	 */ | ||||
| 	if (!IS_ENABLED(CONFIG_VMAP_STACK) || | ||||
| 	    !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || | ||||
| 	    !in_nmi()) | ||||
| 		goto not_found; | ||||
| 
 | ||||
| 	tmp = stackinfo_get_sdei_normal(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	tmp = stackinfo_get_sdei_critical(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| not_found: | ||||
| 	*info = stackinfo_get_unknown(); | ||||
| 	return false; | ||||
| 
 | ||||
| found: | ||||
| 	*info = tmp; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) | |||
| 
 | ||||
| DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); | ||||
| 
 | ||||
| static bool on_overflow_stack(unsigned long sp, unsigned long size, | ||||
| 			      struct stack_info *info) | ||||
| static struct stack_info stackinfo_get_overflow(void) | ||||
| { | ||||
| 	unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); | ||||
| 	unsigned long high = low + OVERFLOW_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_OVERFLOW, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static bool on_hyp_stack(unsigned long sp, unsigned long size, | ||||
| 			      struct stack_info *info) | ||||
| static struct stack_info stackinfo_get_hyp(void) | ||||
| { | ||||
| 	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); | ||||
| 	unsigned long high = params->stack_hyp_va; | ||||
| 	unsigned long low = high - PAGE_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_HYP, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static bool on_accessible_stack(const struct task_struct *tsk, | ||||
| 				unsigned long sp, unsigned long size, | ||||
| 				struct stack_info *info) | ||||
| { | ||||
| 	if (info) | ||||
| 		info->type = STACK_TYPE_UNKNOWN; | ||||
| 	struct stack_info tmp; | ||||
| 
 | ||||
| 	return (on_overflow_stack(sp, size, info) || | ||||
| 		on_hyp_stack(sp, size, info)); | ||||
| 	tmp = stackinfo_get_overflow(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	tmp = stackinfo_get_hyp(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	*info = stackinfo_get_unknown(); | ||||
| 	return false; | ||||
| 
 | ||||
| found: | ||||
| 	*info = tmp; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static int unwind_next(struct unwind_state *state) | ||||
|  |  | |||
|  | @ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, | |||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static bool on_overflow_stack(unsigned long sp, unsigned long size, | ||||
| 			      struct stack_info *info) | ||||
| static struct stack_info stackinfo_get_overflow(void) | ||||
| { | ||||
| 	struct kvm_nvhe_stacktrace_info *stacktrace_info | ||||
| 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); | ||||
| 	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; | ||||
| 	unsigned long high = low + OVERFLOW_STACK_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_OVERFLOW, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static bool on_hyp_stack(unsigned long sp, unsigned long size, | ||||
| 			 struct stack_info *info) | ||||
| static struct stack_info stackinfo_get_hyp(void) | ||||
| { | ||||
| 	struct kvm_nvhe_stacktrace_info *stacktrace_info | ||||
| 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); | ||||
| 	unsigned long low = (unsigned long)stacktrace_info->stack_base; | ||||
| 	unsigned long high = low + PAGE_SIZE; | ||||
| 
 | ||||
| 	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); | ||||
| 	return (struct stack_info) { | ||||
| 		.low = low, | ||||
| 		.high = high, | ||||
| 		.type = STACK_TYPE_HYP, | ||||
| 	}; | ||||
| } | ||||
| 
 | ||||
| static bool on_accessible_stack(const struct task_struct *tsk, | ||||
| 				unsigned long sp, unsigned long size, | ||||
| 				struct stack_info *info) | ||||
| { | ||||
| 	if (info) | ||||
| 		info->type = STACK_TYPE_UNKNOWN; | ||||
| 	struct stack_info tmp; | ||||
| 
 | ||||
| 	return (on_overflow_stack(sp, size, info) || | ||||
| 		on_hyp_stack(sp, size, info)); | ||||
| 	tmp = stackinfo_get_overflow(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	tmp = stackinfo_get_hyp(); | ||||
| 	if (stackinfo_on_stack(&tmp, sp, size)) | ||||
| 		goto found; | ||||
| 
 | ||||
| 	*info = stackinfo_get_unknown(); | ||||
| 	return false; | ||||
| 
 | ||||
| found: | ||||
| 	*info = tmp; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static int unwind_next(struct unwind_state *state) | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Mark Rutland
						Mark Rutland