Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…l/git/netfilter/nf

Florian Westphal says:

====================
netfilter fixes for net

1. On-demand overlap detection in 'rbtree' set can cause memory leaks.
   This is broken since 6.2.

2. An earlier fix in 6.4 to address an imbalance in refcounts during
   transaction error unwinding was incomplete, from Pablo Neira.

3. Disallow adding a rule to a deleted chain, also from Pablo.
   Broken since 5.9.

* tag 'nf-23-07-26' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID
  netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR
  netfilter: nft_set_rbtree: fix overlap expiration walk
====================

Link: https://lore.kernel.org/r/20230726152524.26268-1-fw@strlen.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Jul 27, 2023
2 parents 2526612 + 0ebc106 commit ff0df20
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 17 deletions.
5 changes: 3 additions & 2 deletions net/netfilter/nf_tables_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -3811,8 +3811,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;

} else if (nla[NFTA_RULE_CHAIN_ID]) {
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
Expand All @@ -3825,6 +3823,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return -EINVAL;
}

if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;

if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
Expand Down
27 changes: 18 additions & 9 deletions net/netfilter/nft_immediate.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,15 +125,27 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
}

static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
struct nft_chain *chain,
enum nft_trans_phase phase)
{
struct nft_ctx chain_ctx;
struct nft_rule *rule;

chain_ctx = *ctx;
chain_ctx.chain = chain;

list_for_each_entry(rule, &chain->rules, list)
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
}

static void nft_immediate_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
struct nft_ctx chain_ctx;
struct nft_chain *chain;
struct nft_rule *rule;

if (priv->dreg == NFT_REG_VERDICT) {
switch (data->verdict.code) {
Expand All @@ -143,20 +155,17 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
if (!nft_chain_binding(chain))
break;

chain_ctx = *ctx;
chain_ctx.chain = chain;

list_for_each_entry(rule, &chain->rules, list)
nft_rule_expr_deactivate(&chain_ctx, rule, phase);

switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nf_tables_unbind_chain(ctx, chain);
fallthrough;
nft_deactivate_next(ctx->net, chain);
break;
case NFT_TRANS_PREPARE:
nft_immediate_chain_deactivate(ctx, chain, phase);
nft_deactivate_next(ctx->net, chain);
break;
default:
nft_immediate_chain_deactivate(ctx, chain, phase);
nft_chain_del(chain);
chain->bound = false;
nft_use_dec(&chain->table->use);
Expand Down
20 changes: 14 additions & 6 deletions net/netfilter/nft_set_rbtree.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,

static int nft_rbtree_gc_elem(const struct nft_set *__set,
struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe)
struct nft_rbtree_elem *rbe,
u8 genmask)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
struct nft_rbtree_elem *rbe_prev = NULL;
struct nft_rbtree_elem *rbe_prev;
struct nft_set_gc_batch *gcb;

gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
if (!gcb)
return -ENOMEM;

/* search for expired end interval coming before this element. */
/* search for end interval coming before this element.
* end intervals don't carry a timeout extension, they
* are coupled with the interval start element.
*/
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev))
if (nft_rbtree_interval_end(rbe_prev) &&
nft_set_elem_active(&rbe_prev->ext, genmask))
break;

prev = rb_prev(prev);
}

if (rbe_prev) {
if (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);

rb_erase(&rbe_prev->node, &priv->root);
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe_prev);
}

rb_erase(&rbe->node, &priv->root);
Expand Down Expand Up @@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,

/* perform garbage collection to avoid bogus overlap reports. */
if (nft_set_elem_expired(&rbe->ext)) {
err = nft_rbtree_gc_elem(set, priv, rbe);
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (err < 0)
return err;

Expand Down

0 comments on commit ff0df20

Please sign in to comment.