mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

While the GCC and Clang compilers already define __ASSEMBLER__ automatically when compiling assembly code, __ASSEMBLY__ is a macro that only gets defined by the Makefiles in the kernel. This can be very confusing when switching between userspace and kernelspace coding, or when dealing with uapi headers that rather should use __ASSEMBLER__ instead. So let's standardize on the __ASSEMBLER__ macro that is provided by the compilers now. This is almost a completely mechanical patch (done with a simple "sed -i" statement), with one comment tweaked manually in arch/m68k/include/asm/mac_baboon.h (which was missing underscores). Signed-off-by: Thomas Huth <thuth@redhat.com> Acked-by: Greg Ungerer <gerg@linux-m68k.org> Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> Link: https://lore.kernel.org/20250611114056.118309-3-thuth@redhat.com Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
149 lines
3.3 KiB
C
149 lines
3.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _M68K_PAGE_MM_H
|
|
#define _M68K_PAGE_MM_H
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/compiler.h>
|
|
#include <asm/module.h>
|
|
|
|
/*
|
|
* We don't need to check for alignment etc.
|
|
*/
|
|
#ifdef CPU_M68040_OR_M68060_ONLY
|
|
static inline void copy_page(void *to, void *from)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__("1:\t"
|
|
".chip 68040\n\t"
|
|
"move16 %1@+,%0@+\n\t"
|
|
"move16 %1@+,%0@+\n\t"
|
|
".chip 68k\n\t"
|
|
"dbra %2,1b\n\t"
|
|
: "=a" (to), "=a" (from), "=d" (tmp)
|
|
: "0" (to), "1" (from), "2" (PAGE_SIZE / 32 - 1));
|
|
}
|
|
|
|
static inline void clear_page(void *page)
|
|
{
|
|
unsigned long tmp;
|
|
unsigned long *sp = page;
|
|
|
|
*sp++ = 0;
|
|
*sp++ = 0;
|
|
*sp++ = 0;
|
|
*sp++ = 0;
|
|
|
|
__asm__ __volatile__("1:\t"
|
|
".chip 68040\n\t"
|
|
"move16 %2@+,%0@+\n\t"
|
|
".chip 68k\n\t"
|
|
"subqw #8,%2\n\t"
|
|
"subqw #8,%2\n\t"
|
|
"dbra %1,1b\n\t"
|
|
: "=a" (sp), "=d" (tmp)
|
|
: "a" (page), "0" (sp),
|
|
"1" ((PAGE_SIZE - 16) / 16 - 1));
|
|
}
|
|
|
|
#else
|
|
#define clear_page(page) memset((page), 0, PAGE_SIZE)
|
|
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
|
|
#endif
|
|
|
|
#define clear_user_page(addr, vaddr, page) \
|
|
do { clear_page(addr); \
|
|
flush_dcache_page(page); \
|
|
} while (0)
|
|
#define copy_user_page(to, from, vaddr, page) \
|
|
do { copy_page(to, from); \
|
|
flush_dcache_page(page); \
|
|
} while (0)
|
|
|
|
extern unsigned long m68k_memoffset;
|
|
|
|
#ifndef CONFIG_SUN3
|
|
|
|
#define WANT_PAGE_VIRTUAL
|
|
|
|
static inline unsigned long ___pa(void *vaddr)
|
|
{
|
|
unsigned long paddr;
|
|
asm (
|
|
"1: addl #0,%0\n"
|
|
m68k_fixup(%c2, 1b+2)
|
|
: "=r" (paddr)
|
|
: "0" (vaddr), "i" (m68k_fixup_memoffset));
|
|
return paddr;
|
|
}
|
|
#define __pa(vaddr) ___pa((void *)(long)(vaddr))
|
|
static inline void *__va(unsigned long paddr)
|
|
{
|
|
void *vaddr;
|
|
asm (
|
|
"1: subl #0,%0\n"
|
|
m68k_fixup(%c2, 1b+2)
|
|
: "=r" (vaddr)
|
|
: "0" (paddr), "i" (m68k_fixup_memoffset));
|
|
return vaddr;
|
|
}
|
|
|
|
#else /* !CONFIG_SUN3 */
|
|
/* This #define is a horrible hack to suppress lots of warnings. --m */
|
|
#define __pa(x) ___pa((unsigned long)(x))
|
|
static inline unsigned long ___pa(unsigned long x)
|
|
{
|
|
if (x == 0)
|
|
return 0;
|
|
if (x >= PAGE_OFFSET)
|
|
return (x - PAGE_OFFSET);
|
|
else
|
|
return (x + 0x2000000);
|
|
}
|
|
|
|
static inline void *__va(unsigned long x)
|
|
{
|
|
if (x == 0)
|
|
return (void *)0;
|
|
|
|
if (x < 0x2000000)
|
|
return (void *)(x + PAGE_OFFSET);
|
|
else
|
|
return (void *)(x - 0x2000000);
|
|
}
|
|
#endif /* CONFIG_SUN3 */
|
|
|
|
/*
|
|
* NOTE: virtual isn't really correct, actually it should be the offset into the
|
|
* memory node, but we have no highmem, so that works for now.
|
|
* TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
|
|
* of the shifts unnecessary.
|
|
*/
|
|
static inline unsigned long virt_to_pfn(const void *kaddr)
|
|
{
|
|
return __pa(kaddr) >> PAGE_SHIFT;
|
|
}
|
|
|
|
static inline void *pfn_to_virt(unsigned long pfn)
|
|
{
|
|
return __va(pfn << PAGE_SHIFT);
|
|
}
|
|
|
|
extern int m68k_virt_to_node_shift;
|
|
|
|
#define virt_to_page(addr) ({ \
|
|
pfn_to_page(virt_to_pfn(addr)); \
|
|
})
|
|
#define page_to_virt(page) ({ \
|
|
pfn_to_virt(page_to_pfn(page)); \
|
|
})
|
|
|
|
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
|
|
|
|
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
|
|
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
#endif /* _M68K_PAGE_MM_H */
|