Skip to content

Commit

Permalink
bpf, x86: Support load-acquire and store-release instructions
Browse files Browse the repository at this point in the history
Recently we introduced BPF load-acquire (BPF_LOAD_ACQ) and store-release
(BPF_STORE_REL) instructions.  For x86-64, simply implement them as
regular BPF_LDX/BPF_STX loads and stores.  The verifier always rejects
misaligned load-acquires/store-releases (even if BPF_F_ANY_ALIGNMENT is
set), so emitted MOV* instructions are guaranteed to be atomic.

Arena accesses are supported.  8- and 16-bit load-acquires are
zero-extending (i.e., MOVZBQ, MOVZWQ).

Rename emit_atomic{,_index}() to emit_atomic_rmw{,_index}() to make it
clear that they only handle read-modify-write atomics, and extend their
@atomic_op parameter from u8 to u32, since we are starting to use more
than the lowest 8 bits of the 'imm' field.

Signed-off-by: Peilin Ye <yepeilin@google.com>
Link: https://lore.kernel.org/r/d22bb3c69f126af1d962b7314f3489eff606a3b7.1741049567.git.yepeilin@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Peilin Ye authored and Alexei Starovoitov committed Mar 15, 2025
1 parent 9bb1236 commit 5341c9a
Showing 1 changed file with 82 additions and 17 deletions.
99 changes: 82 additions & 17 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1242,8 +1242,8 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
}

static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;

Expand Down Expand Up @@ -1283,8 +1283,9 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0;
}

static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
u32 dst_reg, u32 src_reg, u32 index_reg, int off)
static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
u32 dst_reg, u32 src_reg, u32 index_reg,
int off)
{
u8 *prog = *pprog;

Expand All @@ -1297,7 +1298,7 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
break;
default:
pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
return -EFAULT;
}

Expand Down Expand Up @@ -1331,6 +1332,49 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
return 0;
}

static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
u32 src_reg, s16 off, u8 bpf_size)
{
switch (atomic_op) {
case BPF_LOAD_ACQ:
/* dst_reg = smp_load_acquire(src_reg + off16) */
emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
break;
case BPF_STORE_REL:
/* smp_store_release(dst_reg + off16, src_reg) */
emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
break;
default:
pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
atomic_op);
return -EFAULT;
}

return 0;
}

static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
u32 dst_reg, u32 src_reg, u32 index_reg,
int off)
{
switch (atomic_op) {
case BPF_LOAD_ACQ:
/* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
break;
case BPF_STORE_REL:
/* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
break;
default:
pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
atomic_op);
return -EFAULT;
}

return 0;
}

#define DONT_CLEAR 1

bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
Expand Down Expand Up @@ -2113,6 +2157,13 @@ st: if (is_imm8(insn->off))
}
break;

case BPF_STX | BPF_ATOMIC | BPF_B:
case BPF_STX | BPF_ATOMIC | BPF_H:
if (!bpf_atomic_is_load_store(insn)) {
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
return -EFAULT;
}
fallthrough;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (insn->imm == (BPF_AND | BPF_FETCH) ||
Expand Down Expand Up @@ -2148,10 +2199,10 @@ st: if (is_imm8(insn->off))
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
real_dst_reg, AUX_REG,
insn->off,
BPF_SIZE(insn->code));
err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
real_dst_reg, AUX_REG,
insn->off,
BPF_SIZE(insn->code));
if (WARN_ON(err))
return err;
/*
Expand All @@ -2166,17 +2217,35 @@ st: if (is_imm8(insn->off))
break;
}

err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
insn->off, BPF_SIZE(insn->code));
if (bpf_atomic_is_load_store(insn))
err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
insn->off, BPF_SIZE(insn->code));
else
err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
insn->off, BPF_SIZE(insn->code));
if (err)
return err;
break;

case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
if (!bpf_atomic_is_load_store(insn)) {
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
return -EFAULT;
}
fallthrough;
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
start_of_ldx = prog;
err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
dst_reg, src_reg, X86_REG_R12, insn->off);

if (bpf_atomic_is_load_store(insn))
err = emit_atomic_ld_st_index(&prog, insn->imm,
BPF_SIZE(insn->code), dst_reg,
src_reg, X86_REG_R12, insn->off);
else
err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
dst_reg, src_reg, X86_REG_R12,
insn->off);
if (err)
return err;
goto populate_extable;
Expand Down Expand Up @@ -3771,12 +3840,8 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
if (!in_arena)
return true;
switch (insn->code) {
case BPF_STX | BPF_ATOMIC | BPF_B:
case BPF_STX | BPF_ATOMIC | BPF_H:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (bpf_atomic_is_load_store(insn))
return false;
if (insn->imm == (BPF_AND | BPF_FETCH) ||
insn->imm == (BPF_OR | BPF_FETCH) ||
insn->imm == (BPF_XOR | BPF_FETCH))
Expand Down

0 comments on commit 5341c9a

Please sign in to comment.