Skip to content

Commit

Permalink
sched: act: ife: migrate to use per-cpu counters
Browse files Browse the repository at this point in the history
This patch migrates the current counter handling which is protected by a
spinlock to a per-cpu counter handling. This reduce the time where the
spinlock is being held.

Signed-off-by: Alexander Aring <aring@mojatatu.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Alexander Aring authored and David S. Miller committed Oct 13, 2017
1 parent 734534e commit ced273e
Showing 1 changed file with 11 additions and 18 deletions.
29 changes: 11 additions & 18 deletions net/sched/act_ife.c
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,

if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
bind, false);
bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
Expand Down Expand Up @@ -638,19 +638,15 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlv_data;
u16 metalen;

spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);
spin_unlock(&ife->tcf_lock);

if (skb_at_tc_ingress(skb))
skb_push(skb, skb->dev->hard_header_len);

tlv_data = ife_decode(skb, &metalen);
if (unlikely(!tlv_data)) {
spin_lock(&ife->tcf_lock);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}

Expand All @@ -668,14 +664,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
*/
pr_info_ratelimited("Unknown metaid %d dlen %d\n",
mtype, dlen);
ife->tcf_qstats.overlimits++;
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
}
}

if (WARN_ON(tlv_data != ifehdr_end)) {
spin_lock(&ife->tcf_lock);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}

Expand Down Expand Up @@ -727,23 +721,20 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
exceed_mtu = true;
}

spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);

if (!metalen) { /* no metadata to send */
/* abuse overlimits to count when we allow packet
* with no metadata
*/
ife->tcf_qstats.overlimits++;
spin_unlock(&ife->tcf_lock);
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
return action;
}
/* could be stupid policy setup or mtu config
* so lets be conservative.. */
if ((action == TC_ACT_SHOT) || exceed_mtu) {
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}

Expand All @@ -752,6 +743,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,

ife_meta = ife_encode(skb, metalen);

spin_lock(&ife->tcf_lock);

/* XXX: we dont have a clever way of telling encode to
* not repeat some of the computations that are done by
* ops->presence_check...
Expand All @@ -763,8 +756,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
}
if (err < 0) {
/* too corrupt to keep around if overwritten */
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
skboff += err;
Expand Down

0 comments on commit ced273e

Please sign in to comment.