Skip to content

Commit

Permalink
Merge tag 'loongarch-6.7' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - support PREEMPT_DYNAMIC with static keys

 - relax memory ordering for atomic operations

 - support BPF CPU v4 instructions for LoongArch

 - some build and runtime warning fixes

* tag 'loongarch-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  selftests/bpf: Enable cpu v4 tests for LoongArch
  LoongArch: BPF: Support signed mod instructions
  LoongArch: BPF: Support signed div instructions
  LoongArch: BPF: Support 32-bit offset jmp instructions
  LoongArch: BPF: Support unconditional bswap instructions
  LoongArch: BPF: Support sign-extension mov instructions
  LoongArch: BPF: Support sign-extension load instructions
  LoongArch: Add more instruction opcodes and emit_* helpers
  LoongArch/smp: Call rcutree_report_cpu_starting() earlier
  LoongArch: Relax memory ordering for atomic operations
  LoongArch: Mark __percpu functions as always inline
  LoongArch: Disable module from accessing external data directly
  LoongArch: Support PREEMPT_DYNAMIC with static keys
  • Loading branch information
Linus Torvalds committed Nov 12, 2023
2 parents 5dd2020 + 1d375d6 commit 4eeee66
Show file tree
Hide file tree
Showing 13 changed files with 215 additions and 63 deletions.
1 change: 1 addition & 0 deletions arch/loongarch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ config LOONGARCH
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK
select HAVE_RSEQ
Expand Down
2 changes: 2 additions & 0 deletions arch/loongarch/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ LDFLAGS_vmlinux += -static -n -nostdlib
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += $(call cc-option,-mexplicit-relocs)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
Expand Down
88 changes: 68 additions & 20 deletions arch/loongarch/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,33 +36,33 @@
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.w" " $zero, %1, %0 \n" \
"am"#asm_op".w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}

#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}

#define ATOMIC_FETCH_OP(op, I, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
Expand All @@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \

#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)

ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)

#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_add_return_acquire arch_atomic_add_return
#define arch_atomic_add_return_release arch_atomic_add_return
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
#define arch_atomic_sub_return_release arch_atomic_sub_return
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed

#undef ATOMIC_OPS

#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)

ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)

#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed

#undef ATOMIC_OPS
Expand Down Expand Up @@ -172,32 +196,32 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " $zero, %1, %0 \n" \
"am"#asm_op".d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}

#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}

#define ATOMIC64_FETCH_OP(op, I, asm_op) \
static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
Expand All @@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \

#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)

ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)

#define arch_atomic64_add_return arch_atomic64_add_return
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
#define arch_atomic64_add_return_release arch_atomic64_add_return
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed

#undef ATOMIC64_OPS

#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)

ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)

#define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed

#undef ATOMIC64_OPS
Expand Down
13 changes: 13 additions & 0 deletions arch/loongarch/include/asm/inst.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ enum reg2_op {
revbd_op = 0x0f,
revh2w_op = 0x10,
revhd_op = 0x11,
extwh_op = 0x16,
extwb_op = 0x17,
iocsrrdb_op = 0x19200,
iocsrrdh_op = 0x19201,
iocsrrdw_op = 0x19202,
Expand Down Expand Up @@ -572,6 +574,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op)
DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op)
DEF_EMIT_REG2_FORMAT(revbd, revbd_op)
DEF_EMIT_REG2_FORMAT(extwh, extwh_op)
DEF_EMIT_REG2_FORMAT(extwb, extwb_op)

#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
Expand Down Expand Up @@ -623,6 +627,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op)
DEF_EMIT_REG2I12_FORMAT(andi, andi_op)
DEF_EMIT_REG2I12_FORMAT(ori, ori_op)
DEF_EMIT_REG2I12_FORMAT(xori, xori_op)
DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op)
DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op)
DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op)
DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op)
DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op)
DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op)
Expand Down Expand Up @@ -701,9 +708,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
insn->reg3_format.rk = rk; \
}

DEF_EMIT_REG3_FORMAT(addw, addw_op)
DEF_EMIT_REG3_FORMAT(addd, addd_op)
DEF_EMIT_REG3_FORMAT(subd, subd_op)
DEF_EMIT_REG3_FORMAT(muld, muld_op)
DEF_EMIT_REG3_FORMAT(divd, divd_op)
DEF_EMIT_REG3_FORMAT(modd, modd_op)
DEF_EMIT_REG3_FORMAT(divdu, divdu_op)
DEF_EMIT_REG3_FORMAT(moddu, moddu_op)
DEF_EMIT_REG3_FORMAT(and, and_op)
Expand All @@ -715,6 +725,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op)
DEF_EMIT_REG3_FORMAT(srld, srld_op)
DEF_EMIT_REG3_FORMAT(sraw, sraw_op)
DEF_EMIT_REG3_FORMAT(srad, srad_op)
DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op)
DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op)
DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op)
DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op)
DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op)
DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op)
Expand Down
10 changes: 5 additions & 5 deletions arch/loongarch/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
#define __my_cpu_offset __my_cpu_offset

#define PERCPU_OP(op, asm_op, c_op) \
static inline unsigned long __percpu_##op(void *ptr, \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
{ \
unsigned long ret; \
Expand Down Expand Up @@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP

static inline unsigned long __percpu_read(void *ptr, int size)
static __always_inline unsigned long __percpu_read(void *ptr, int size)
{
unsigned long ret;

Expand Down Expand Up @@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
return ret;
}

static inline void __percpu_write(void *ptr, unsigned long val, int size)
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
Expand Down Expand Up @@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
}
}

static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
{
switch (size) {
case 1:
Expand Down
3 changes: 2 additions & 1 deletion arch/loongarch/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -504,8 +504,9 @@ asmlinkage void start_secondary(void)
unsigned int cpu;

sync_counter();
cpu = smp_processor_id();
cpu = raw_smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu));
rcutree_report_cpu_starting(cpu);

cpu_probe();
constant_clockevent_init();
Expand Down
Loading

0 comments on commit 4eeee66

Please sign in to comment.