Skip to content

Commit

Permalink
bpf: try harder on clones when writing into skb
Browse files Browse the repository at this point in the history
When we're dealing with clones and the area is not writeable, try
harder and get a copy via pskb_expand_head(). Replace also other
occurences in tc actions with the new skb_try_make_writable().

Reported-by: Ashhad Sheikh <ashhadsheikh394@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Daniel Borkmann authored and David S. Miller committed Feb 22, 2016
1 parent 21cafc1 commit 3697649
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 28 deletions.
7 changes: 7 additions & 0 deletions include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -2630,6 +2630,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
skb_headroom(skb) + len <= skb->hdr_len;
}

static inline int skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}

static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
Expand Down
19 changes: 10 additions & 9 deletions net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -1364,9 +1364,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
*/
if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
return -EFAULT;

if (unlikely(skb_cloned(skb) &&
!skb_clone_writable(skb, offset + len)))
if (unlikely(skb_try_make_writable(skb, offset + len)))
return -EFAULT;

ptr = skb_header_pointer(skb, offset, len, sp->buff);
Expand Down Expand Up @@ -1439,9 +1437,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return -EINVAL;
if (unlikely((u32) offset > 0xffff))
return -EFAULT;

if (unlikely(skb_cloned(skb) &&
!skb_clone_writable(skb, offset + sizeof(sum))))
if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
return -EFAULT;

ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
Expand Down Expand Up @@ -1488,9 +1484,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return -EINVAL;
if (unlikely((u32) offset > 0xffff))
return -EFAULT;

if (unlikely(skb_cloned(skb) &&
!skb_clone_writable(skb, offset + sizeof(sum))))
if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
return -EFAULT;

ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
Expand Down Expand Up @@ -1734,6 +1728,13 @@ bool bpf_helper_changes_skb_data(void *func)
return true;
if (func == bpf_skb_vlan_pop)
return true;
if (func == bpf_skb_store_bytes)
return true;
if (func == bpf_l3_csum_replace)
return true;
if (func == bpf_l4_csum_replace)
return true;

return false;
}

Expand Down
8 changes: 2 additions & 6 deletions net/sched/act_csum.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,7 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
int hl = ihl + jhl;

if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
(skb_cloned(skb) &&
!skb_clone_writable(skb, hl + ntkoff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
skb_try_make_writable(skb, hl + ntkoff))
return NULL;
else
return (void *)(skb_network_header(skb) + ihl);
Expand Down Expand Up @@ -365,9 +363,7 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
}

if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
if (skb_cloned(skb) &&
!skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
goto fail;

ip_send_check(ip_hdr(skb));
Expand Down
18 changes: 5 additions & 13 deletions net/sched/act_nat.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
addr = iph->daddr;

if (!((old_addr ^ addr) & mask)) {
if (skb_cloned(skb) &&
!skb_clone_writable(skb, sizeof(*iph) + noff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_try_make_writable(skb, sizeof(*iph) + noff))
goto drop;

new_addr &= mask;
Expand Down Expand Up @@ -156,9 +154,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct tcphdr *tcph;

if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
(skb_cloned(skb) &&
!skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
goto drop;

tcph = (void *)(skb_network_header(skb) + ihl);
Expand All @@ -171,9 +167,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct udphdr *udph;

if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
(skb_cloned(skb) &&
!skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
goto drop;

udph = (void *)(skb_network_header(skb) + ihl);
Expand Down Expand Up @@ -213,10 +207,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
if ((old_addr ^ addr) & mask)
break;

if (skb_cloned(skb) &&
!skb_clone_writable(skb, ihl + sizeof(*icmph) +
sizeof(*iph) + noff) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
sizeof(*iph) + noff))
goto drop;

icmph = (void *)(skb_network_header(skb) + ihl);
Expand Down

0 comments on commit 3697649

Please sign in to comment.