Skip to content

Commit

Permalink
Merge branch 'net_sched-dump-no-rtnl'
Browse files Browse the repository at this point in the history
Eric Dumazet says:

====================
net_sched: first series for RTNL-less qdisc dumps

Medium term goal is to implement "tc qdisc show" without needing
to acquire RTNL.

This first series makes the requested changes in 14 qdisc.

Notes :

 - RTNL is still held in "tc qdisc show", more changes are needed.

 - Qdisc returning many attributes might want/need to provide
   a consistent set of attributes. If that is the case, their
   dump() method could acquire the qdisc spinlock, to pair the
   spinlock acquision in their change() method.

V2: Addressed Simon feedback (Thanks a lot Simon)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Apr 19, 2024
2 parents fdf4123 + c85cedb commit 00ac0dc
Show file tree
Hide file tree
Showing 15 changed files with 323 additions and 234 deletions.
12 changes: 6 additions & 6 deletions include/net/red.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,18 +233,18 @@ static inline void red_set_parms(struct red_parms *p,
int delta = qth_max - qth_min;
u32 max_p_delta;

p->qth_min = qth_min << Wlog;
p->qth_max = qth_max << Wlog;
p->Wlog = Wlog;
p->Plog = Plog;
WRITE_ONCE(p->qth_min, qth_min << Wlog);
WRITE_ONCE(p->qth_max, qth_max << Wlog);
WRITE_ONCE(p->Wlog, Wlog);
WRITE_ONCE(p->Plog, Plog);
if (delta <= 0)
delta = 1;
p->qth_delta = delta;
if (!max_P) {
max_P = red_maxp(Plog);
max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
}
p->max_P = max_P;
WRITE_ONCE(p->max_P, max_P);
max_p_delta = max_P / delta;
max_p_delta = max(max_p_delta, 1U);
p->max_P_reciprocal = reciprocal_value(max_p_delta);
Expand All @@ -257,7 +257,7 @@ static inline void red_set_parms(struct red_parms *p,
p->target_min = qth_min + 2*delta;
p->target_max = qth_min + 3*delta;

p->Scell_log = Scell_log;
WRITE_ONCE(p->Scell_log, Scell_log);
p->Scell_max = (255 << Scell_log);

if (stab)
Expand Down
110 changes: 63 additions & 47 deletions net/sched/sch_cake.c
Original file line number Diff line number Diff line change
Expand Up @@ -2572,17 +2572,20 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CAKE_MAX + 1];
u16 rate_flags;
u8 flow_mode;
int err;

err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
extack);
if (err < 0)
return err;

flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
q->flow_mode |= CAKE_FLOW_NAT_FLAG *
flow_mode &= ~CAKE_FLOW_NAT_FLAG;
flow_mode |= CAKE_FLOW_NAT_FLAG *
!!nla_get_u32(tb[TCA_CAKE_NAT]);
#else
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
Expand All @@ -2592,29 +2595,34 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}

if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
WRITE_ONCE(q->rate_bps,
nla_get_u64(tb[TCA_CAKE_BASE_RATE64]));

if (tb[TCA_CAKE_DIFFSERV_MODE])
q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
WRITE_ONCE(q->tin_mode,
nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]));

rate_flags = q->rate_flags;
if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
q->rate_flags |= CAKE_FLAG_WASH;
rate_flags |= CAKE_FLAG_WASH;
else
q->rate_flags &= ~CAKE_FLAG_WASH;
rate_flags &= ~CAKE_FLAG_WASH;
}

if (tb[TCA_CAKE_FLOW_MODE])
q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
flow_mode = ((flow_mode & CAKE_FLOW_NAT_FLAG) |
(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK));

if (tb[TCA_CAKE_ATM])
q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
WRITE_ONCE(q->atm_mode,
nla_get_u32(tb[TCA_CAKE_ATM]));

if (tb[TCA_CAKE_OVERHEAD]) {
q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
q->rate_flags |= CAKE_FLAG_OVERHEAD;
WRITE_ONCE(q->rate_overhead,
nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
rate_flags |= CAKE_FLAG_OVERHEAD;

q->max_netlen = 0;
q->max_adjlen = 0;
Expand All @@ -2623,7 +2631,7 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}

if (tb[TCA_CAKE_RAW]) {
q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
rate_flags &= ~CAKE_FLAG_OVERHEAD;

q->max_netlen = 0;
q->max_adjlen = 0;
Expand All @@ -2632,54 +2640,58 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}

if (tb[TCA_CAKE_MPU])
q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
WRITE_ONCE(q->rate_mpu,
nla_get_u32(tb[TCA_CAKE_MPU]));

if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
u32 interval = nla_get_u32(tb[TCA_CAKE_RTT]);

if (!q->interval)
q->interval = 1;
WRITE_ONCE(q->interval, max(interval, 1U));
}

if (tb[TCA_CAKE_TARGET]) {
q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
u32 target = nla_get_u32(tb[TCA_CAKE_TARGET]);

if (!q->target)
q->target = 1;
WRITE_ONCE(q->target, max(target, 1U));
}

if (tb[TCA_CAKE_AUTORATE]) {
if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
else
q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
}

if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
q->rate_flags |= CAKE_FLAG_INGRESS;
rate_flags |= CAKE_FLAG_INGRESS;
else
q->rate_flags &= ~CAKE_FLAG_INGRESS;
rate_flags &= ~CAKE_FLAG_INGRESS;
}

if (tb[TCA_CAKE_ACK_FILTER])
q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
WRITE_ONCE(q->ack_filter,
nla_get_u32(tb[TCA_CAKE_ACK_FILTER]));

if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
WRITE_ONCE(q->buffer_config_limit,
nla_get_u32(tb[TCA_CAKE_MEMORY]));

if (tb[TCA_CAKE_SPLIT_GSO]) {
if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
rate_flags |= CAKE_FLAG_SPLIT_GSO;
else
q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
}

if (tb[TCA_CAKE_FWMARK]) {
q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK]));
WRITE_ONCE(q->fwmark_shft,
q->fwmark_mask ? __ffs(q->fwmark_mask) : 0);
}

WRITE_ONCE(q->rate_flags, rate_flags);
WRITE_ONCE(q->flow_mode, flow_mode);
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
Expand Down Expand Up @@ -2774,68 +2786,72 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
u16 rate_flags;
u8 flow_mode;

opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!opts)
goto nla_put_failure;

if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
TCA_CAKE_PAD))
if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64,
READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
q->flow_mode & CAKE_FLOW_MASK))
flow_mode = READ_ONCE(q->flow_mode);
if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, flow_mode & CAKE_FLOW_MASK))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
if (nla_put_u32(skb, TCA_CAKE_MEMORY,
READ_ONCE(q->buffer_config_limit)))
goto nla_put_failure;

rate_flags = READ_ONCE(q->rate_flags);
if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
!!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
!!(rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_INGRESS,
!!(q->rate_flags & CAKE_FLAG_INGRESS)))
!!(rate_flags & CAKE_FLAG_INGRESS)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_NAT,
!!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
!!(flow_mode & CAKE_FLOW_NAT_FLAG)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_WASH,
!!(q->rate_flags & CAKE_FLAG_WASH)))
!!(rate_flags & CAKE_FLAG_WASH)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead)))
goto nla_put_failure;

if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
if (!(rate_flags & CAKE_FLAG_OVERHEAD))
if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
!!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
!!(rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure;

if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask)))
goto nla_put_failure;

return nla_nest_end(skb, opts);
Expand Down
20 changes: 10 additions & 10 deletions net/sched/sch_cbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -389,11 +389,11 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
}

/* Everything went OK, save the parameters used. */
q->hicredit = qopt->hicredit;
q->locredit = qopt->locredit;
q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
q->offload = qopt->offload;
WRITE_ONCE(q->hicredit, qopt->hicredit);
WRITE_ONCE(q->locredit, qopt->locredit);
WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT);
WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT);
WRITE_ONCE(q->offload, qopt->offload);

return 0;
}
Expand Down Expand Up @@ -459,11 +459,11 @@ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!nest)
goto nla_put_failure;

opt.hicredit = q->hicredit;
opt.locredit = q->locredit;
opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT);
opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT);
opt.offload = q->offload;
opt.hicredit = READ_ONCE(q->hicredit);
opt.locredit = READ_ONCE(q->locredit);
opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT);
opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT);
opt.offload = READ_ONCE(q->offload);

if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
Expand Down
21 changes: 11 additions & 10 deletions net/sched/sch_choke.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
} else
sch_tree_lock(sch);

q->flags = ctl->flags;
q->limit = ctl->limit;
WRITE_ONCE(q->flags, ctl->flags);
WRITE_ONCE(q->limit, ctl->limit);

red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
Expand All @@ -431,23 +431,24 @@ static int choke_init(struct Qdisc *sch, struct nlattr *opt,
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct choke_sched_data *q = qdisc_priv(sch);
u8 Wlog = READ_ONCE(q->parms.Wlog);
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
.limit = q->limit,
.flags = q->flags,
.qth_min = q->parms.qth_min >> q->parms.Wlog,
.qth_max = q->parms.qth_max >> q->parms.Wlog,
.Wlog = q->parms.Wlog,
.Plog = q->parms.Plog,
.Scell_log = q->parms.Scell_log,
.limit = READ_ONCE(q->limit),
.flags = READ_ONCE(q->flags),
.qth_min = READ_ONCE(q->parms.qth_min) >> Wlog,
.qth_max = READ_ONCE(q->parms.qth_max) >> Wlog,
.Wlog = Wlog,
.Plog = READ_ONCE(q->parms.Plog),
.Scell_log = READ_ONCE(q->parms.Scell_log),
};

opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;

if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
goto nla_put_failure;
return nla_nest_end(skb, opts);

Expand Down
Loading

0 comments on commit 00ac0dc

Please sign in to comment.