Skip to content

Commit

Permalink
Merge branch 'arm-bpf-fixes'
Browse files Browse the repository at this point in the history
Nicolas Schichan says:

====================
BPF JIT fixes for ARM

These patches are fixing bugs in the ARM JIT and should probably find
their way to a stable kernel. All 60 test_bpf tests in Linux 4.1 release
are now passing OK (was 54 out of 60 before).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jul 22, 2015
2 parents 89e478a + c18fe54 commit 7c8cbac
Showing 1 changed file with 44 additions and 13 deletions.
57 changes: 44 additions & 13 deletions arch/arm/net/bpf_jit_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,32 +74,52 @@ struct jit_ctx {

int bpf_jit_enable __read_mostly;

static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
unsigned int size)
{
void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);

if (!ptr)
return -EFAULT;
memcpy(ret, ptr, size);
return 0;
}

static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
{
u8 ret;
int err;

err = skb_copy_bits(skb, offset, &ret, 1);
if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 1);
else
err = skb_copy_bits(skb, offset, &ret, 1);

return (u64)err << 32 | ret;
}

static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
{
u16 ret;
int err;

err = skb_copy_bits(skb, offset, &ret, 2);
if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 2);
else
err = skb_copy_bits(skb, offset, &ret, 2);

return (u64)err << 32 | ntohs(ret);
}

static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{
u32 ret;
int err;

err = skb_copy_bits(skb, offset, &ret, 4);
if (offset < 0)
err = call_neg_helper(skb, offset, &ret, 4);
else
err = skb_copy_bits(skb, offset, &ret, 4);

return (u64)err << 32 | ntohl(ret);
}
Expand Down Expand Up @@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
case BPF_LD | BPF_B | BPF_ABS:
load_order = 0;
load:
/* the interpreter will deal with the negative K */
if ((int)k < 0)
return -ENOTSUPP;
emit_mov_i(r_off, k, ctx);
load_common:
ctx->seen |= SEEN_DATA | SEEN_CALL;
Expand All @@ -547,12 +564,24 @@ static int build_body(struct jit_ctx *ctx)
emit(ARM_SUB_I(r_scratch, r_skb_hl,
1 << load_order), ctx);
emit(ARM_CMP_R(r_scratch, r_off), ctx);
condt = ARM_COND_HS;
condt = ARM_COND_GE;
} else {
emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
condt = ARM_COND_HI;
}

/*
* test for negative offset, only if we are
* currently scheduled to take the fast
* path. this will update the flags so that
* the slowpath instruction are ignored if the
* offset is negative.
*
* for loard_order == 0 the HI condition will
* make loads at offset 0 take the slow path too.
*/
_emit(condt, ARM_CMP_I(r_off, 0), ctx);

_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
ctx);

Expand Down Expand Up @@ -860,9 +889,11 @@ static int build_body(struct jit_ctx *ctx)
off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
else
OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
else {
OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
}
break;
case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB;
Expand Down

0 comments on commit 7c8cbac

Please sign in to comment.