mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

For spinning loops people do often use barrier() or cpu_relax(). For most architectures cpu_relax and barrier are the same, but on some architectures cpu_relax can add some latency. For example on power,sparc64 and arc, cpu_relax can shift the CPU towards other hardware threads in an SMT environment. On s390 cpu_relax does even more, it uses an hypercall to the hypervisor to give up the timeslice. In contrast to the SMT yielding this can result in larger latencies. In some places this latency is unwanted, so another variant "cpu_relax_lowlatency" was introduced. Before this is used in more and more places, lets revert the logic and provide a cpu_relax_yield that can be called in places where yielding is more important than latency. By default this is the same as cpu_relax on all architectures. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Noam Camus <noamc@ezchip.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1477386195-32736-2-git-send-email-borntraeger@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
162 lines
3.9 KiB
C
162 lines
3.9 KiB
C
/*
|
|
* include/asm-m68k/processor.h
|
|
*
|
|
* Copyright (C) 1995 Hamish Macdonald
|
|
*/
|
|
|
|
#ifndef __ASM_M68K_PROCESSOR_H
|
|
#define __ASM_M68K_PROCESSOR_H
|
|
|
|
/*
|
|
* Default implementation of macro that returns current
|
|
* instruction pointer ("program counter").
|
|
*/
|
|
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
|
#include <linux/thread_info.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/fpu.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
static inline unsigned long rdusp(void)
|
|
{
|
|
#ifdef CONFIG_COLDFIRE_SW_A7
|
|
extern unsigned int sw_usp;
|
|
return sw_usp;
|
|
#else
|
|
register unsigned long usp __asm__("a0");
|
|
/* move %usp,%a0 */
|
|
__asm__ __volatile__(".word 0x4e68" : "=a" (usp));
|
|
return usp;
|
|
#endif
|
|
}
|
|
|
|
static inline void wrusp(unsigned long usp)
|
|
{
|
|
#ifdef CONFIG_COLDFIRE_SW_A7
|
|
extern unsigned int sw_usp;
|
|
sw_usp = usp;
|
|
#else
|
|
register unsigned long a0 __asm__("a0") = usp;
|
|
/* move %a0,%usp */
|
|
__asm__ __volatile__(".word 0x4e60" : : "a" (a0) );
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* User space process size: 3.75GB. This is hardcoded into a few places,
|
|
* so don't change it unless you know what you are doing.
|
|
*/
|
|
#ifdef CONFIG_MMU
|
|
#if defined(CONFIG_COLDFIRE)
|
|
#define TASK_SIZE (0xC0000000UL)
|
|
#elif defined(CONFIG_SUN3)
|
|
#define TASK_SIZE (0x0E000000UL)
|
|
#else
|
|
#define TASK_SIZE (0xF0000000UL)
|
|
#endif
|
|
#else
|
|
#define TASK_SIZE (0xFFFFFFFFUL)
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
#define STACK_TOP TASK_SIZE
|
|
#define STACK_TOP_MAX STACK_TOP
|
|
#endif
|
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
* space during mmap's.
|
|
*/
|
|
#ifdef CONFIG_MMU
|
|
#if defined(CONFIG_COLDFIRE)
|
|
#define TASK_UNMAPPED_BASE 0x60000000UL
|
|
#elif defined(CONFIG_SUN3)
|
|
#define TASK_UNMAPPED_BASE 0x0A000000UL
|
|
#else
|
|
#define TASK_UNMAPPED_BASE 0xC0000000UL
|
|
#endif
|
|
#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
|
|
#else
|
|
#define TASK_UNMAPPED_BASE 0
|
|
#endif
|
|
|
|
struct thread_struct {
|
|
unsigned long ksp; /* kernel stack pointer */
|
|
unsigned long usp; /* user stack pointer */
|
|
unsigned short sr; /* saved status register */
|
|
unsigned short fs; /* saved fs (sfc, dfc) */
|
|
unsigned long crp[2]; /* cpu root pointer */
|
|
unsigned long esp0; /* points to SR of stack frame */
|
|
unsigned long faddr; /* info about last fault */
|
|
int signo, code;
|
|
unsigned long fp[8*3];
|
|
unsigned long fpcntl[3]; /* fp control regs */
|
|
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
|
|
};
|
|
|
|
#define INIT_THREAD { \
|
|
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
|
|
.sr = PS_S, \
|
|
.fs = __KERNEL_DS, \
|
|
}
|
|
|
|
/*
|
|
* ColdFire stack format sbould be 0x4 for an aligned usp (will always be
|
|
* true on thread creation). We need to set this explicitly.
|
|
*/
|
|
#ifdef CONFIG_COLDFIRE
|
|
#define setframeformat(_regs) do { (_regs)->format = 0x4; } while(0)
|
|
#else
|
|
#define setframeformat(_regs) do { } while (0)
|
|
#endif
|
|
|
|
/*
|
|
* Do necessary setup to start up a newly executed thread.
|
|
*/
|
|
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
|
|
unsigned long usp)
|
|
{
|
|
regs->pc = pc;
|
|
regs->sr &= ~0x2000;
|
|
setframeformat(regs);
|
|
wrusp(usp);
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
extern int handle_kernel_fault(struct pt_regs *regs);
|
|
#else
|
|
static inline int handle_kernel_fault(struct pt_regs *regs)
|
|
{
|
|
/* Any fault in kernel is fatal on non-mmu */
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
struct task_struct;
|
|
|
|
/* Free all resources held by a thread. */
|
|
static inline void release_thread(struct task_struct *dead_task)
|
|
{
|
|
}
|
|
|
|
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
#define KSTK_EIP(tsk) \
|
|
({ \
|
|
unsigned long eip = 0; \
|
|
if ((tsk)->thread.esp0 > PAGE_SIZE && \
|
|
(virt_addr_valid((tsk)->thread.esp0))) \
|
|
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
|
|
eip; })
|
|
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
|
|
|
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
|
|
|
|
#define cpu_relax() barrier()
|
|
#define cpu_relax_yield() cpu_relax()
|
|
#define cpu_relax_lowlatency() cpu_relax()
|
|
|
|
#endif
|