Skip to content

Commit

Permalink
bpf: track stack depth of classic bpf programs
Browse files Browse the repository at this point in the history
To track stack depth of classic bpf programs we only need
to analyze ST|STX instructions, since check_load_and_stores()
verifies that programs can load from stack only after write.

We also need to change the way cBPF stack slots map to eBPF stack,
since typical classic programs are using slots 0 and 1, so they
need to map to stack offsets -4 and -8 respectively in order
to take advantage of small stack interpreter and JITs.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Alexei Starovoitov authored and David S. Miller committed May 31, 2017
1 parent 80a58d0 commit 50bbfed
Showing 1 changed file with 22 additions and 14 deletions.
36 changes: 22 additions & 14 deletions net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
* bpf_convert_filter - convert filter program
* @prog: the user passed filter program
* @len: the length of the user passed filter program
* @new_prog: buffer where converted program will be stored
* @new_prog: allocated 'struct bpf_prog' or NULL
* @new_len: pointer to store length of converted program
*
* Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
Expand All @@ -364,14 +364,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
*
* 2) 2nd pass to remap in two passes: 1st pass finds new
* jump offsets, 2nd pass remapping:
* new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
* bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
*/
static int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_insn *new_prog, int *new_len)
struct bpf_prog *new_prog, int *new_len)
{
int new_flen = 0, pass = 0, target, i;
struct bpf_insn *new_insn;
int new_flen = 0, pass = 0, target, i, stack_off;
struct bpf_insn *new_insn, *first_insn = NULL;
struct sock_filter *fp;
int *addrs = NULL;
u8 bpf_src;
Expand All @@ -383,18 +382,19 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
return -EINVAL;

if (new_prog) {
first_insn = new_prog->insnsi;
addrs = kcalloc(len, sizeof(*addrs),
GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
}

do_pass:
new_insn = new_prog;
new_insn = first_insn;
fp = prog;

/* Classic BPF related prologue emission. */
if (new_insn) {
if (new_prog) {
/* Classic BPF expects A and X to be reset first. These need
* to be guaranteed to be the first two instructions.
*/
Expand All @@ -415,7 +415,7 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_insn *insn = tmp_insns;

if (addrs)
addrs[i] = new_insn - new_prog;
addrs[i] = new_insn - first_insn;

switch (fp->code) {
/* All arithmetic insns and skb loads map as-is. */
Expand Down Expand Up @@ -561,17 +561,25 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
/* Store to stack. */
case BPF_ST:
case BPF_STX:
stack_off = fp->k * 4 + 4;
*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
BPF_ST ? BPF_REG_A : BPF_REG_X,
-(BPF_MEMWORDS - fp->k) * 4);
-stack_off);
/* check_load_and_stores() verifies that classic BPF can
* load from stack only after write, so tracking
* stack_depth for ST|STX insns is enough
*/
if (new_prog && new_prog->aux->stack_depth < stack_off)
new_prog->aux->stack_depth = stack_off;
break;

/* Load from stack. */
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
stack_off = fp->k * 4 + 4;
*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
BPF_REG_A : BPF_REG_X, BPF_REG_FP,
-(BPF_MEMWORDS - fp->k) * 4);
-stack_off);
break;

/* A = K or X = K */
Expand Down Expand Up @@ -619,13 +627,13 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,

if (!new_prog) {
/* Only calculating new length. */
*new_len = new_insn - new_prog;
*new_len = new_insn - first_insn;
return 0;
}

pass++;
if (new_flen != new_insn - new_prog) {
new_flen = new_insn - new_prog;
if (new_flen != new_insn - first_insn) {
new_flen = new_insn - first_insn;
if (pass > 2)
goto err;
goto do_pass;
Expand Down Expand Up @@ -1017,7 +1025,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
fp->len = new_len;

/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
err = bpf_convert_filter(old_prog, old_len, fp, &new_len);
if (err)
/* 2nd bpf_convert_filter() can fail only if it fails
* to allocate memory, remapping must succeed. Note,
Expand Down

0 comments on commit 50bbfed

Please sign in to comment.