Skip to content

Commit

Permalink
locking/atomics: Flip fallbacks and instrumentation
Browse files Browse the repository at this point in the history
Currently instrumentation of atomic primitives is done at the architecture
level, while composites or fallbacks are provided at the generic level.

The result is that there are no uninstrumented variants of the
fallbacks. Since there is now need of such variants to isolate text poke
from any form of instrumentation invert this ordering.

Doing this means moving the instrumentation into the generic code as
well as having (for now) two variants of the fallbacks.

Notes:

 - the various *cond_read* primitives are not proper fallbacks
   and got moved into linux/atomic.c. No arch_ variants are
   generated because the base primitives smp_cond_load*()
   are instrumented.

 - once all architectures are moved over to arch_atomic_ one of the
   fallback variants can be removed and some 2300 lines reclaimed.

 - atomic_{read,set}*() are no longer double-instrumented

Reported-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lkml.kernel.org/r/20200505134058.769149955@linutronix.de
Peter Zijlstra authored and Thomas Gleixner committed Jun 11, 2020
1 parent 765dcd2 commit 37f8173
Showing 28 changed files with 2,403 additions and 82 deletions.
6 changes: 3 additions & 3 deletions arch/arm64/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)

#define ATOMIC_INIT(i) { (i) }

#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))

#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)

#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

#include <asm-generic/atomic-instrumented.h>
#define ARCH_ATOMIC

#endif /* __ASM_ATOMIC_H */
17 changes: 13 additions & 4 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
return READ_ONCE((v)->counter);
return __READ_ONCE((v)->counter);
}

/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
__WRITE_ONCE(v->counter, i);
}

/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic_add_return arch_atomic_add_return

/**
* arch_atomic_sub_return - subtract integer and return
@@ -178,32 +179,37 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
#define arch_atomic_sub_return arch_atomic_sub_return

static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add

static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic_fetch_sub arch_atomic_fetch_sub

static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg

#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg

static inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic_xchg arch_atomic_xchg

static inline void arch_atomic_and(int i, atomic_t *v)
{
@@ -221,6 +227,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)

return val;
}
#define arch_atomic_fetch_and arch_atomic_fetch_and

static inline void arch_atomic_or(int i, atomic_t *v)
{
@@ -238,6 +245,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)

return val;
}
#define arch_atomic_fetch_or arch_atomic_fetch_or

static inline void arch_atomic_xor(int i, atomic_t *v)
{
@@ -255,13 +263,14 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)

return val;
}
#define arch_atomic_fetch_xor arch_atomic_fetch_xor

#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
# include <asm/atomic64_64.h>
#endif

#include <asm-generic/atomic-instrumented.h>
#define ARCH_ATOMIC

#endif /* _ASM_X86_ATOMIC_H */
9 changes: 9 additions & 0 deletions arch/x86/include/asm/atomic64_32.h
Original file line number Diff line number Diff line change
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg

/**
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
#define arch_atomic64_xchg arch_atomic64_xchg

/**
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
#define arch_atomic64_add_return arch_atomic64_add_return

/*
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
#define arch_atomic64_sub_return arch_atomic64_sub_return

static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
#define arch_atomic64_add_unless arch_atomic64_add_unless

static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)

return old;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and

static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)

return old;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or

static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)

return old;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor

static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)

return old;
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add

#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))

15 changes: 12 additions & 3 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
@@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
return __READ_ONCE((v)->counter);
}

/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
__WRITE_ONCE(v->counter, i);
}

/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic64_add_return arch_atomic64_add_return

static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
#define arch_atomic64_sub_return arch_atomic64_sub_return

static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add

static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub

static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg

#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg

static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic64_xchg arch_atomic64_xchg

static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and

static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or

static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor

#endif /* _ASM_X86_ATOMIC64_64_H */
2,291 changes: 2,291 additions & 0 deletions include/linux/atomic-arch-fallback.h

Large diffs are not rendered by default.

8 changes: 1 addition & 7 deletions include/linux/atomic-fallback.h
Original file line number Diff line number Diff line change
@@ -1180,9 +1180,6 @@ atomic_dec_if_positive(atomic_t *v)
#define atomic_dec_if_positive atomic_dec_if_positive
#endif

#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))

#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@@ -2290,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif

#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))

#endif /* _LINUX_ATOMIC_FALLBACK_H */
// baaf45f4c24ed88ceae58baca39d7fd80bb8101b
// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
11 changes: 11 additions & 0 deletions include/linux/atomic.h
Original file line number Diff line number Diff line change
@@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/

#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))

#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))

/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -71,7 +77,12 @@
__ret; \
})

#ifdef ARCH_ATOMIC
#include <linux/atomic-arch-fallback.h>
#include <asm-generic/atomic-instrumented.h>
#else
#include <linux/atomic-fallback.h>
#endif

#include <asm-generic/atomic-long.h>

4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/acquire
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/add_negative
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_add_negative - add and test if negative
* ${arch}${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
@@ -9,8 +9,8 @@ cat <<EOF
* result is greater than or equal to zero.
*/
static __always_inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;
return ${arch}${atomic}_add_return(i, v) < 0;
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/add_unless
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_add_unless - add unless the number is already a given value
* ${arch}${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -9,8 +9,8 @@ cat << EOF
* Returns true if the addition was done.
*/
static __always_inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;
return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/andnot
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/dec
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/dec_and_test
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
cat <<EOF
/**
* ${atomic}_dec_and_test - decrement and test
* ${arch}${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static __always_inline bool
${atomic}_dec_and_test(${atomic}_t *v)
${arch}${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;
return ${arch}${atomic}_dec_return(v) == 0;
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/dec_if_positive
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v)
${arch}${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${atomic}_read(v);
${int} dec, c = ${arch}${atomic}_read(v);

do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!${atomic}_try_cmpxchg(v, &c, dec));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));

return dec;
}
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/dec_unless_positive
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
cat <<EOF
static __always_inline bool
${atomic}_dec_unless_positive(${atomic}_t *v)
${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);

do {
if (unlikely(c > 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));

return true;
}
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/fence
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params})
${arch}${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}
8 changes: 4 additions & 4 deletions scripts/atomic/fallbacks/fetch_add_unless
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_fetch_add_unless - add unless the number is already a given value
* ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
* Returns original value of @v
*/
static __always_inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);

do {
if (unlikely(c == u))
break;
} while (!${atomic}_try_cmpxchg(v, &c, c + a));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));

return c;
}
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/inc
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/inc_and_test
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
cat <<EOF
/**
* ${atomic}_inc_and_test - increment and test
* ${arch}${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __always_inline bool
${atomic}_inc_and_test(${atomic}_t *v)
${arch}${atomic}_inc_and_test(${atomic}_t *v)
{
return ${atomic}_inc_return(v) == 0;
return ${arch}${atomic}_inc_return(v) == 0;
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/inc_not_zero
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
cat <<EOF
/**
* ${atomic}_inc_not_zero - increment unless the number is zero
* ${arch}${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static __always_inline bool
${atomic}_inc_not_zero(${atomic}_t *v)
${arch}${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${atomic}_add_unless(v, 1, 0);
return ${arch}${atomic}_add_unless(v, 1, 0);
}
EOF
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/inc_unless_negative
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
cat <<EOF
static __always_inline bool
${atomic}_inc_unless_negative(${atomic}_t *v)
${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);

do {
if (unlikely(c < 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));

return true;
}
2 changes: 1 addition & 1 deletion scripts/atomic/fallbacks/read_acquire
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v)
${arch}${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
}
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/release
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params})
${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF
2 changes: 1 addition & 1 deletion scripts/atomic/fallbacks/set_release
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat <<EOF
static __always_inline void
${atomic}_set_release(${atomic}_t *v, ${int} i)
${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
}
6 changes: 3 additions & 3 deletions scripts/atomic/fallbacks/sub_and_test
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_sub_and_test - subtract value from variable and test result
* ${arch}${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
@@ -9,8 +9,8 @@ cat <<EOF
* other cases.
*/
static __always_inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${atomic}_sub_return(i, v) == 0;
return ${arch}${atomic}_sub_return(i, v) == 0;
}
EOF
4 changes: 2 additions & 2 deletions scripts/atomic/fallbacks/try_cmpxchg
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
cat <<EOF
static __always_inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = ${atomic}_cmpxchg${order}(v, o, new);
r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
29 changes: 13 additions & 16 deletions scripts/atomic/gen-atomic-fallback.sh
Original file line number Diff line number Diff line change
@@ -2,10 +2,11 @@
# SPDX-License-Identifier: GPL-2.0

ATOMICDIR=$(dirname $0)
ARCH=$2

. ${ATOMICDIR}/atomic-tbl.sh

#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_template_fallback()
{
local template="$1"; shift
@@ -14,10 +15,11 @@ gen_template_fallback()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local arch="$1"; shift
local atomic="$1"; shift
local int="$1"; shift

local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"

local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
@@ -32,7 +34,7 @@ gen_template_fallback()
fi
}

#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_proto_fallback()
{
local meta="$1"; shift
@@ -56,16 +58,17 @@ cat << EOF
EOF
}

#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local atomic="$1"
local arch="$1"
local atomic="$2"

local basename="${atomic}_${pfx}${name}${sfx}"
local basename="${arch}${atomic}_${pfx}${name}${sfx}"

local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"

@@ -94,7 +97,7 @@ gen_proto_order_variants()
gen_basic_fallbacks "${basename}"

if [ ! -z "${template}" ]; then
printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@@ -153,31 +156,25 @@ cat << EOF
EOF

for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done

grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
done

cat <<EOF
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
EOF

grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
done

cat <<EOF
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#endif /* _LINUX_ATOMIC_FALLBACK_H */
EOF
5 changes: 3 additions & 2 deletions scripts/atomic/gen-atomics.sh
Original file line number Diff line number Diff line change
@@ -10,10 +10,11 @@ LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
HASH="$(sha1sum ${LINUXDIR}/include/${header})"
HASH="${HASH%% *}"
printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}

0 comments on commit 37f8173

Please sign in to comment.