Skip to content

Commit

Permalink
netfilter: nf_flow_table: add support for sending flows back to the s…
Browse files Browse the repository at this point in the history
…low path

Since conntrack hasn't seen any packets from the offloaded flow in a
while, and the timeout for offloaded flows is set to an extremely long
value, we need to fix up the state before we can send a flow back to the
slow path.

For TCP, reset td_maxwin in both directions, which makes it resync its
state on the next packets.

Use the regular timeout for TCP and UDP established connections.

This allows the slow path to take over again once the offload state has
been torn down

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
  • Loading branch information
Felix Fietkau authored and Pablo Neira Ayuso committed Apr 24, 2018
1 parent ba03137 commit da5984e
Showing 1 changed file with 49 additions and 1 deletion.
50 changes: 49 additions & 1 deletion net/netfilter/nf_flow_table_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,52 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);

static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{
tcp->state = TCP_CONNTRACK_ESTABLISHED;
tcp->seen[0].td_maxwin = 0;
tcp->seen[1].td_maxwin = 0;
}

static void flow_offload_fixup_ct_state(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
unsigned int *timeouts;
unsigned int timeout;
int l4num;

l4num = nf_ct_protonum(ct);
if (l4num == IPPROTO_TCP)
flow_offload_fixup_tcp(&ct->proto.tcp);

l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
if (!l4proto)
return;

timeouts = l4proto->get_timeouts(net);
if (!timeouts)
return;

if (l4num == IPPROTO_TCP)
timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
else if (l4num == IPPROTO_UDP)
timeout = timeouts[UDP_CT_REPLIED];
else
return;

ct->timeout = nfct_time_stamp + timeout;
}

void flow_offload_free(struct flow_offload *flow)
{
struct flow_offload_entry *e;

dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
e = container_of(flow, struct flow_offload_entry, flow);
nf_ct_delete(e->ct, 0, 0);
if (flow->flags & FLOW_OFFLOAD_DYING)
nf_ct_delete(e->ct, 0, 0);
nf_ct_put(e->ct);
kfree_rcu(e, rcu_head);
}
Expand Down Expand Up @@ -164,19 +202,29 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
struct flow_offload_entry *e;

rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
nf_flow_offload_rhash_params);
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params);

e = container_of(flow, struct flow_offload_entry, flow);
clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);

flow_offload_free(flow);
}

void flow_offload_teardown(struct flow_offload *flow)
{
struct flow_offload_entry *e;

flow->flags |= FLOW_OFFLOAD_TEARDOWN;

e = container_of(flow, struct flow_offload_entry, flow);
flow_offload_fixup_ct_state(e->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);

Expand Down

0 comments on commit da5984e

Please sign in to comment.