From ed8f5b478a3e7017816fd87cbc11a4b91b3da1e6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 19 Mar 2013 06:39:30 +0000 Subject: [PATCH] --- yaml --- r: 368323 b: refs/heads/master c: f77668dc25b27270fe589031b22c432c3462b1d8 h: refs/heads/master i: 368321: 327c105c4af58c710c6b42d146e991864c826b4a 368319: 99fe734ea327423df37e04e7ce34bd630354345e v: v3 --- [refs] | 2 +- trunk/include/linux/skbuff.h | 2 ++ trunk/net/core/flow_dissector.c | 57 +++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index fc64a5248fc4..6a83369acea6 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 61816596d1c9026d0ecb20c44f90452c41596ffe +refs/heads/master: f77668dc25b27270fe589031b22c432c3462b1d8 diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index b66ecc6ef102..497412165b1c 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -2840,6 +2840,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb) bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); +u32 __skb_get_poff(const struct sk_buff *skb); + /** * skb_head_is_locked - Determine if the skb->head is locked down * @skb: skb to check diff --git a/trunk/net/core/flow_dissector.c b/trunk/net/core/flow_dissector.c index f4be293bab9e..00ee068efc1c 100644 --- a/trunk/net/core/flow_dissector.c +++ b/trunk/net/core/flow_dissector.c @@ -5,6 +5,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -228,6 +232,59 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, } EXPORT_SYMBOL(__skb_tx_hash); +/* __skb_get_poff() returns the offset to the payload as far as it could + * be dissected. The main user is currently BPF, so that we can dynamically + * truncate packets without needing to push actual payload to the user + * space and can analyze headers only, instead. + */ +u32 __skb_get_poff(const struct sk_buff *skb) +{ + struct flow_keys keys; + u32 poff = 0; + + if (!skb_flow_dissect(skb, &keys)) + return 0; + + poff += keys.thoff; + switch (keys.ip_proto) { + case IPPROTO_TCP: { + const struct tcphdr *tcph; + struct tcphdr _tcph; + + tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); + if (!tcph) + return poff; + + poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); + break; + } + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + poff += sizeof(struct udphdr); + break; + /* For the rest, we do not really care about header + * extensions at this point for now. + */ + case IPPROTO_ICMP: + poff += sizeof(struct icmphdr); + break; + case IPPROTO_ICMPV6: + poff += sizeof(struct icmp6hdr); + break; + case IPPROTO_IGMP: + poff += sizeof(struct igmphdr); + break; + case IPPROTO_DCCP: + poff += sizeof(struct dccp_hdr); + break; + case IPPROTO_SCTP: + poff += sizeof(struct sctphdr); + break; + } + + return poff; +} + static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) {