Skip to content

Commit

Permalink
Merge branch 'bpf-helper-improvements'
Browse files Browse the repository at this point in the history
Daniel Borkmann says:

====================
BPF helper improvements

This set adds various BPF helper improvements, that is, cleaning
up and adding BPF_F_CURRENT_CPU flag for tracing helper, allowing
for preemption checks on bpf_get_smp_processor_id() helper, and
adding two new helpers bpf_skb_change_{proto, type} for tc related
programs. For further details please see individual patches.

Note, this set requires -net to be merged into -net-next tree first.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jun 30, 2016
2 parents ee58b57 + d2485c4 commit 545c321
Show file tree
Hide file tree
Showing 5 changed files with 275 additions and 21 deletions.
25 changes: 24 additions & 1 deletion include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,29 @@ enum bpf_func_id {
*/
BPF_FUNC_skb_get_tunnel_opt,
BPF_FUNC_skb_set_tunnel_opt,

/**
* bpf_skb_change_proto(skb, proto, flags)
* Change protocol of the skb. Currently supported is
* v4 -> v6, v6 -> v4 transitions. The helper will also
* resize the skb. eBPF program is expected to fill the
* new headers via skb_store_bytes and lX_csum_replace.
* @skb: pointer to skb
* @proto: new skb->protocol type
* @flags: reserved
* Return: 0 on success or negative error
*/
BPF_FUNC_skb_change_proto,

/**
* bpf_skb_change_type(skb, type)
* Change packet type of skb.
* @skb: pointer to skb
* @type: new skb->pkt_type type
* Return: 0 on success or negative error
*/
BPF_FUNC_skb_change_type,

__BPF_FUNC_MAX_ID,
};

Expand Down Expand Up @@ -347,7 +370,7 @@ enum bpf_func_id {
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)

/* BPF_FUNC_perf_event_output flags. */
/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
#define BPF_F_INDEX_MASK 0xffffffffULL
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK

Expand Down
3 changes: 1 addition & 2 deletions kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -719,14 +719,13 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)

if (unlikely(index >= array->map.max_entries))
goto out;

if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
goto out;

tail_call_cnt++;

prog = READ_ONCE(array->ptrs[index]);
if (unlikely(!prog))
if (!prog)
goto out;

/* ARG1 at this point is guaranteed to point to CTX from
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {

static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return raw_smp_processor_id();
return smp_processor_id();
}

const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
Expand Down
32 changes: 16 additions & 16 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,30 +188,35 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
return &bpf_trace_printk_proto;
}

static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
{
struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
struct bpf_array *array = container_of(map, struct bpf_array, map);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
struct bpf_event_entry *ee;
struct perf_event *event;

if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
if (index == BPF_F_CURRENT_CPU)
index = cpu;
if (unlikely(index >= array->map.max_entries))
return -E2BIG;

ee = READ_ONCE(array->ptrs[index]);
if (unlikely(!ee))
if (!ee)
return -ENOENT;

event = ee->event;
/* make sure event is local and doesn't have pmu::count */
if (event->oncpu != smp_processor_id() ||
event->pmu->count)
return -EINVAL;

if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
event->attr.type != PERF_TYPE_RAW))
return -EINVAL;

/* make sure event is local and doesn't have pmu::count */
if (unlikely(event->oncpu != cpu || event->pmu->count))
return -EINVAL;

/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
Expand All @@ -233,6 +238,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
struct pt_regs *regs = (struct pt_regs *) (long) r1;
struct bpf_map *map = (struct bpf_map *) (long) r2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
void *data = (void *) (long) r4;
struct perf_sample_data sample_data;
Expand All @@ -246,20 +252,20 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
if (index == BPF_F_CURRENT_CPU)
index = raw_smp_processor_id();
index = cpu;
if (unlikely(index >= array->map.max_entries))
return -E2BIG;

ee = READ_ONCE(array->ptrs[index]);
if (unlikely(!ee))
if (!ee)
return -ENOENT;

event = ee->event;
if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
return -EINVAL;

if (unlikely(event->oncpu != smp_processor_id()))
if (unlikely(event->oncpu != cpu))
return -EOPNOTSUPP;

perf_sample_data_init(&sample_data, 0, 0);
Expand Down Expand Up @@ -354,18 +360,12 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type)
{
/* check bounds */
if (off < 0 || off >= sizeof(struct pt_regs))
return false;

/* only read is allowed */
if (type != BPF_READ)
return false;

/* disallow misaligned access */
if (off % size != 0)
return false;

return true;
}

Expand Down
Loading

0 comments on commit 545c321

Please sign in to comment.