Skip to content

Commit

Permalink
s390/bitops: Switch to generic bitops
Browse files Browse the repository at this point in the history
The generic bitops implementation is nearly identical to the s390
implementation therefore switch to the generic variant.

This results in a small kernel image size decrease. This is because for
the generic variant the nr parameter for most bitops functions is of
type unsigned int while the s390 variant uses unsigned long.

bloat-o-meter:
add/remove: 670/670 grow/shrink: 167/209 up/down: 21440/-21792 (-352)

Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
  • Loading branch information
Heiko Carstens authored and Alexander Gordeev committed Jan 13, 2025
1 parent 061a5e4 commit 8cae8e0
Showing 1 changed file with 3 additions and 178 deletions.
181 changes: 3 additions & 178 deletions arch/s390/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,184 +36,9 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h>

#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)

static inline unsigned long *
__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
{
unsigned long addr;

addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
return (unsigned long *)addr;
}

static inline unsigned long __bitops_mask(unsigned long nr)
{
return 1UL << (nr & (BITS_PER_LONG - 1));
}

static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);

__atomic64_or(mask, (long *)addr);
}

static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);

__atomic64_and(~mask, (long *)addr);
}

static __always_inline void arch_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);

__atomic64_xor(mask, (long *)addr);
}

static inline bool arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = __atomic64_or_barrier(mask, (long *)addr);
return old & mask;
}

static inline bool arch_test_and_clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = __atomic64_and_barrier(~mask, (long *)addr);
return old & mask;
}

static inline bool arch_test_and_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = __atomic64_xor_barrier(mask, (long *)addr);
return old & mask;
}

static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);

*p |= mask;
}

static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);

*p &= ~mask;
}

static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);

*p ^= mask;
}

static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = *p;
*p |= mask;
return old & mask;
}

static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = *p;
*p &= ~mask;
return old & mask;
}

static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;

old = *p;
*p ^= mask;
return old & mask;
}

#define arch_test_bit generic_test_bit
#define arch_test_bit_acquire generic_test_bit_acquire

static inline bool arch_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *ptr)
{
if (arch_test_bit(nr, ptr))
return true;
return arch_test_and_set_bit(nr, ptr);
}

static inline void arch_clear_bit_unlock(unsigned long nr,
volatile unsigned long *ptr)
{
smp_mb__before_atomic();
arch_clear_bit(nr, ptr);
}

static inline void arch___clear_bit_unlock(unsigned long nr,
volatile unsigned long *ptr)
{
smp_mb();
arch___clear_bit(nr, ptr);
}

static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *ptr)
{
unsigned long old;

old = __atomic64_xor_barrier(mask, (long *)ptr);
return old & BIT(7);
}
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte

#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-non-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/lock.h>

/*
* Functions which use MSB0 bit numbering.
Expand Down

0 comments on commit 8cae8e0

Please sign in to comment.