linux/arch/x86/include/asm/atomic.h
Uros Bizjak faa6f77b0d x86/locking/atomic: Improve performance by using asm_inline() for atomic locking instructions
According to:

  https://gcc.gnu.org/onlinedocs/gcc/Size-of-an-asm.html

the usage of asm pseudo directives in the asm template can confuse
the compiler to wrongly estimate the size of the generated
code.

The LOCK_PREFIX macro expands to several asm pseudo directives, so
its usage in atomic locking insns causes instruction length estimates
to fail significantly (the specially instrumented compiler reports
the estimated length of these asm templates to be 6 instructions long).

This incorrect estimate further causes unoptimal inlining decisions,
un-optimal instruction scheduling and un-optimal code block alignments
for functions that use these locking primitives.

Use asm_inline instead:

  https://gcc.gnu.org/pipermail/gcc-patches/2018-December/512349.html

which is a feature that makes GCC pretend some inline assembler code
is tiny (while it would think it is huge), instead of just asm.

For code size estimation, the size of the asm is then taken as
the minimum size of one instruction, ignoring how many instructions
compiler thinks it is.

bloat-o-meter reports the following code size increase
(x86_64 defconfig, gcc-14.2.1):

  add/remove: 82/283 grow/shrink: 870/372 up/down: 76272/-43618 (32654)
  Total: Before=22770320, After=22802974, chg +0.14%

with top grows (>500 bytes):

	Function                                     old     new   delta
	----------------------------------------------------------------
	copy_process                                6465   10191   +3726
	balance_dirty_pages_ratelimited_flags        237    2949   +2712
	icl_plane_update_noarm                      5800    7969   +2169
	samsung_input_mapping                       3375    5170   +1795
	ext4_do_update_inode.isra                      -    1526   +1526
	__schedule                                  2416    3472   +1056
	__i915_vma_resource_unhold                     -     946    +946
	sched_mm_cid_after_execve                    175    1097    +922
	__do_sys_membarrier                            -     862    +862
	filemap_fault                               2666    3462    +796
	nl80211_send_wiphy                         11185   11874    +689
	samsung_input_mapping.cold                   900    1500    +600
	virtio_gpu_queue_fenced_ctrl_buffer          839    1410    +571
	ilk_update_pipe_csc                         1201    1735    +534
	enable_step                                    -     525    +525
	icl_color_commit_noarm                      1334    1847    +513
	tg3_read_bc_ver                                -     501    +501

and top shrinks (>500 bytes):

	Function                                     old     new   delta
	----------------------------------------------------------------
	nl80211_send_iftype_data                     580       -    -580
	samsung_gamepad_input_mapping.isra.cold      604       -    -604
	virtio_gpu_queue_ctrl_sgs                    724       -    -724
	tg3_get_invariants                          9218    8376    -842
	__i915_vma_resource_unhold.part              899       -    -899
	ext4_mark_iloc_dirty                        1735     106   -1629
	samsung_gamepad_input_mapping.isra          2046       -   -2046
	icl_program_input_csc                       2203       -   -2203
	copy_mm                                     2242       -   -2242
	balance_dirty_pages                         2657       -   -2657

These code size changes can be grouped into 4 groups:

a) some functions now include once-called functions in full or
in part. These are:

	Function                                     old     new   delta
	----------------------------------------------------------------
	copy_process                                6465   10191   +3726
	balance_dirty_pages_ratelimited_flags        237    2949   +2712
	icl_plane_update_noarm                      5800    7969   +2169
	samsung_input_mapping                       3375    5170   +1795
	ext4_do_update_inode.isra                      -    1526   +1526

that now include:

	Function                                     old     new   delta
	----------------------------------------------------------------
	copy_mm                                     2242       -   -2242
	balance_dirty_pages                         2657       -   -2657
	icl_program_input_csc                       2203       -   -2203
	samsung_gamepad_input_mapping.isra          2046       -   -2046
	ext4_mark_iloc_dirty                        1735     106   -1629

b) ISRA [interprocedural scalar replacement of aggregates,
interprocedural pass that removes unused function return values
(turning functions returning a value which is never used into void
functions) and removes unused function parameters.  It can also
replace an aggregate parameter by a set of other parameters
representing part of the original, turning those passed by reference
into new ones which pass the value directly.]

Top grows and shrinks of this group are listed below:

	Function                                     old     new   delta
	----------------------------------------------------------------
	ext4_do_update_inode.isra                      -    1526   +1526
	nfs4_begin_drain_session.isra                  -     249    +249
	nfs4_end_drain_session.isra                    -     168    +168
	__guc_action_register_multi_lrc_v70.isra     335     500    +165
	__i915_gem_free_objects.isra                   -     144    +144
	...
	membarrier_register_private_expedited.isra     108       -    -108
	syncobj_eventfd_entry_func.isra              445     314    -131
	__ext4_sb_bread_gfp.isra                     140       -    -140
	class_preempt_notrace_destructor.isra        145       -    -145
	p9_fid_put.isra                              151       -    -151
	__mm_cid_try_get.isra                        238       -    -238
	membarrier_global_expedited.isra             294       -    -294
	mm_cid_get.isra                              295       -    -295
	samsung_gamepad_input_mapping.isra.cold      604       -    -604
	samsung_gamepad_input_mapping.isra          2046       -   -2046

c) different split points of hot/cold split that just move code around:

Top grows and shrinks of this group are listed below:

	Function                                     old     new   delta
	----------------------------------------------------------------
	samsung_input_mapping.cold                   900    1500    +600
	__i915_request_reset.cold                    311     389     +78
	nfs_update_inode.cold                         77     153     +76
	__do_sys_swapon.cold                         404     455     +51
	copy_process.cold                              -      45     +45
	tg3_get_invariants.cold                       73     115     +42
	...
	hibernate.cold                               671     643     -28
	copy_mm.cold                                  31       -     -31
	software_resume.cold                         249     207     -42
	io_poll_wake.cold                            106      54     -52
	samsung_gamepad_input_mapping.isra.cold      604       -    -604

c) full inline of small functions with locking insn (~150 cases).
These bring in most of the code size increase because the removed
function code is now inlined in multiple places. E.g.:

	0000000000a50e10 <release_devnum>:
	  a50e10:    48 63 07                 movslq (%rdi),%rax
	  a50e13:    85 c0                    test   %eax,%eax
	  a50e15:    7e 10                    jle    a50e27 <release_devnum+0x17>
	  a50e17:    48 8b 4f 50              mov    0x50(%rdi),%rcx
	  a50e1b:    f0 48 0f b3 41 50        lock btr %rax,0x50(%rcx)
	  a50e21:    c7 07 ff ff ff ff        movl   $0xffffffff,(%rdi)
	  a50e27:    e9 00 00 00 00           jmp    a50e2c <release_devnum+0x1c>
		    a50e28: R_X86_64_PLT32    __x86_return_thunk-0x4
	  a50e2c:    0f 1f 40 00              nopl   0x0(%rax)

is now fully inlined into the caller function. This is desirable due
to the per function overhead of CPU bug mitigations like retpolines.

FTR a) with -Os (where generated code size really matters) x86_64
defconfig object file decreases by 24.388 kbytes, representing 0.1%
code size decrease:

	    text           data     bss      dec            hex filename
	23883860        4617284  814212 29315356        1bf511c vmlinux-old.o
	23859472        4615404  814212 29289088        1beea80 vmlinux-new.o

FTR b) clang recognizes "asm inline", but there was no difference in
code sizes:

	    text           data     bss      dec            hex filename
	27577163        4503078  807732 32887973        1f5d4a5 vmlinux-clang-patched.o
	27577181        4503078  807732 32887991        1f5d4b7 vmlinux-clang-unpatched.o

The performance impact of the patch was assessed by recompiling
fedora-41 6.13.5 kernel and running lmbench with old and new kernel.
The most noticeable improvements were:

	Process fork+exit: 270.0952 microseconds
	Process fork+execve: 2620.3333 microseconds
	Process fork+/bin/sh -c: 6781.0000 microseconds
	File /usr/tmp/XXX write bandwidth: 1780350 KB/sec
	Pagefaults on /usr/tmp/XXX: 0.3875 microseconds

to:

	Process fork+exit: 298.6842 microseconds
	Process fork+execve: 1662.7500 microseconds
	Process fork+/bin/sh -c: 2127.6667 microseconds
	File /usr/tmp/XXX write bandwidth: 1950077 KB/sec
	Pagefaults on /usr/tmp/XXX: 0.1958 microseconds

and from:

	Socket bandwidth using localhost
	0.000001 2.52 MB/sec
	0.000064 163.02 MB/sec
	0.000128 321.70 MB/sec
	0.000256 630.06 MB/sec
	0.000512 1207.07 MB/sec
	0.001024 2004.06 MB/sec
	0.001437 2475.43 MB/sec
	10.000000 5817.34 MB/sec

	Avg xfer: 3.2KB, 41.8KB in 1.2230 millisecs, 34.15 MB/sec
	AF_UNIX sock stream bandwidth: 9850.01 MB/sec
	Pipe bandwidth: 4631.28 MB/sec

to:

	Socket bandwidth using localhost
	0.000001 3.13 MB/sec
	0.000064 187.08 MB/sec
	0.000128 324.12 MB/sec
	0.000256 618.51 MB/sec
	0.000512 1137.13 MB/sec
	0.001024 1962.95 MB/sec
	0.001437 2458.27 MB/sec
	10.000000 6168.08 MB/sec

	Avg xfer: 3.2KB, 41.8KB in 1.0060 millisecs, 41.52 MB/sec
	AF_UNIX sock stream bandwidth: 9921.68 MB/sec
	Pipe bandwidth: 4649.96 MB/sec

[ mingo: Prettified the changelog a bit. ]

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Link: https://lore.kernel.org/r/20250309170955.48919-1-ubizjak@gmail.com
2025-03-19 11:26:58 +01:00

177 lines
4.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_ATOMIC_H
#define _ASM_X86_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
static __always_inline int arch_atomic_read(const atomic_t *v)
{
/*
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
return __READ_ONCE((v)->counter);
}
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
__WRITE_ONCE(v->counter, i);
}
static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (v->counter)
: "ir" (i) : "memory");
}
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "subl %1, %0"
: "+m" (v->counter)
: "ir" (i) : "memory");
}
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
}
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_dec arch_atomic_dec
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
}
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
}
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
}
#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic_xchg arch_atomic_xchg
static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "andl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic_fetch_and arch_atomic_fetch_and
static __always_inline void arch_atomic_or(int i, atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "orl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic_fetch_or arch_atomic_fetch_or
static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{
asm_inline volatile(LOCK_PREFIX "xorl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
# include <asm/atomic64_64.h>
#endif
#endif /* _ASM_X86_ATOMIC_H */