diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 110d6c403bdd2..4ffd303c64eac 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1745,7 +1745,7 @@ static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index) static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry, struct felix_stream_gate *sgi) { - sgi->index = entry->gate.index; + sgi->index = entry->hw_index; sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1; sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0; sgi->basetime = entry->gate.basetime; @@ -1947,7 +1947,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port, kfree(sgi); break; case FLOW_ACTION_POLICE: - index = a->police.index + VSC9959_PSFP_POLICER_BASE; + index = a->hw_index + VSC9959_PSFP_POLICER_BASE; if (index > VSC9959_PSFP_POLICER_MAX) { ret = -EINVAL; goto err; diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 72b9b39b0989d..7dcdd784aea4c 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -379,7 +379,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port, vl_rule = true; rc = sja1105_vl_gate(priv, port, extack, cookie, - &key, act->gate.index, + &key, act->hw_index, act->gate.prio, act->gate.basetime, act->gate.cycletime, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 1471b6130a2b9..d8afcf8d6b30e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { - if (!bnxt_is_netdev_indr_offload(netdev)) + if (!netdev || !bnxt_is_netdev_indr_offload(netdev)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 0536d2c76fbc4..3555c12edb45a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1182,7 +1182,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } /* parsing gate action */ - if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { + if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) { NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); err = -ENOSPC; goto free_filter; @@ -1202,7 +1202,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } refcount_set(&sgi->refcount, 1); - sgi->index = entryg->gate.index; + sgi->index = entryg->hw_index; sgi->init_ipv = entryg->gate.prio; sgi->basetime = entryg->gate.basetime; sgi->cycletime = entryg->gate.cycletime; @@ -1244,7 +1244,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, refcount_set(&fmi->refcount, 1); fmi->cir = entryp->police.rate_bytes_ps; fmi->cbs = entryp->police.burst; - fmi->index = entryp->police.index; + fmi->index = entryp->hw_index; filter->flags |= ENETC_PSFP_FLAGS_FMI; filter->fmi_index = fmi->index; sfi->meter_id = fmi->index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index fcb0892c08a9f..0991345c4ae5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -517,6 +517,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void * void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { + if (!netdev) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_BLOCK: return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index be3791ca6069d..186c556f0de14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -203,7 +203,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, */ burst = roundup_pow_of_two(act->police.burst); err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, - act->police.index, + act->hw_index, act->police.rate_bytes_ps, burst, extack); if (err) diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 58fce173f95b1..beb9379424c00 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -303,7 +303,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, } filter->action.police_ena = true; - pol_ix = a->police.index + ocelot->vcap_pol.base; + pol_ix = a->hw_index + ocelot->vcap_pol.base; pol_max = ocelot->vcap_pol.max; if (ocelot->vcap_pol.max2 && pol_ix > pol_max) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 224089d04d983..f97eff5afd125 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1867,6 +1867,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void * void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { + if (!netdev) + return -EOPNOTSUPP; + if (!nfp_fl_is_netdev_to_offload(netdev)) return -EOPNOTSUPP; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a419718612c6f..8b0bdeb4734e7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -920,6 +920,7 @@ enum tc_setup_type { TC_SETUP_QDISC_TBF, TC_SETUP_QDISC_FIFO, TC_SETUP_QDISC_HTB, + TC_SETUP_ACT, }; /* These structures hold the attributes of bpf state that are being passed diff --git a/include/net/act_api.h b/include/net/act_api.h index b5b624c7e4888..3049cb69c0252 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -44,6 +45,7 @@ struct tc_action { u8 hw_stats; u8 used_hw_stats; bool used_hw_stats_valid; + u32 in_hw_count; }; #define tcf_index common.tcfa_index #define tcf_refcnt common.tcfa_refcnt @@ -88,6 +90,16 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) dtm->expires = jiffies_to_clock_t(stm->expires); } +static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) +{ + if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) + return FLOW_ACTION_HW_STATS_DONT_CARE; + else if (!hw_stats) + return FLOW_ACTION_HW_STATS_DISABLED; + + return hw_stats; +} + #ifdef CONFIG_NET_CLS_ACT #define ACT_P_CREATED 1 @@ -121,6 +133,8 @@ struct tc_action_ops { struct psample_group * (*get_psample_group)(const struct tc_action *a, tc_action_priv_destructor *destructor); + int (*offload_act_setup)(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind); }; struct tc_action_net { @@ -189,7 +203,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, struct tc_action *actions[], int init_res[], size_t *attr_size, - u32 flags, struct netlink_ext_ack *extack); + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police, bool rtnl_held, struct netlink_ext_ack *extack); @@ -240,6 +254,9 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, u64 drops, bool hw); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); +int tcf_action_update_hw_stats(struct tc_action *action); +int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, + void *cb_priv, bool add); int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, struct tcf_chain **handle, struct netlink_ext_ack *newchain); @@ -251,6 +268,14 @@ DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count); #endif int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); + +#else /* !CONFIG_NET_CLS_ACT */ + +static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, + void *cb_priv, bool add) { + return 0; +} + #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3961461d9c8bc..5b8c54eb7a6b8 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -197,6 +197,7 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie); struct flow_action_entry { enum flow_action_id id; + u32 hw_index; enum flow_action_hw_stats hw_stats; action_destr destructor; void *destructor_priv; @@ -232,7 +233,6 @@ struct flow_action_entry { bool truncate; } sample; struct { /* FLOW_ACTION_POLICE */ - u32 index; u32 burst; u64 rate_bytes_ps; u64 burst_pkt; @@ -267,7 +267,6 @@ struct flow_action_entry { u8 ttl; } mpls_mangle; struct { - u32 index; s32 prio; u64 basetime; u64 cycletime; @@ -552,6 +551,23 @@ struct flow_cls_offload { u32 classid; }; +enum offload_act_command { + FLOW_ACT_REPLACE, + FLOW_ACT_DESTROY, + FLOW_ACT_STATS, +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/ + enum offload_act_command command; + enum flow_action_id id; + u32 index; + struct flow_stats stats; + struct flow_action action; +}; + +struct flow_offload_action *offload_action_alloc(unsigned int num_actions); + static inline struct flow_rule * flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index cebc1bd713b68..ebef45e821afd 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -262,26 +262,31 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) for (; 0; (void)(i), (void)(a), (void)(exts)) #endif +#define tcf_act_for_each_action(i, a, actions) \ + for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++) + static inline void -tcf_exts_stats_update(const struct tcf_exts *exts, - u64 bytes, u64 packets, u64 drops, u64 lastuse, - u8 used_hw_stats, bool used_hw_stats_valid) +tcf_exts_hw_stats_update(const struct tcf_exts *exts, + u64 bytes, u64 packets, u64 drops, u64 lastuse, + u8 used_hw_stats, bool used_hw_stats_valid) { #ifdef CONFIG_NET_CLS_ACT int i; - preempt_disable(); - for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, drops, - lastuse, true); - a->used_hw_stats = used_hw_stats; - a->used_hw_stats_valid = used_hw_stats_valid; - } + /* if stats from hw, just skip */ + if (tcf_action_update_hw_stats(a)) { + preempt_disable(); + tcf_action_stats_update(a, bytes, packets, drops, + lastuse, true); + preempt_enable(); - preempt_enable(); + a->used_hw_stats = used_hw_stats; + a->used_hw_stats_valid = used_hw_stats_valid; + } + } #endif } @@ -325,6 +330,9 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, u32 flags, struct netlink_ext_ack *extack); +int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); @@ -536,9 +544,11 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) return ifindex == skb->skb_iif; } -int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts); -void tc_cleanup_flow_action(struct flow_action *flow_action); +int tc_setup_offload_action(struct flow_action *flow_action, + const struct tcf_exts *exts); +void tc_cleanup_offload_action(struct flow_action *flow_action); +int tc_setup_action(struct flow_action *flow_action, + struct tc_action *actions[]); int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop, bool rtnl_held); diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h index 8bc6be81a7adf..c8fa11ebb3978 100644 --- a/include/net/tc_act/tc_gate.h +++ b/include/net/tc_act/tc_gate.h @@ -60,11 +60,6 @@ static inline bool is_tcf_gate(const struct tc_action *a) return false; } -static inline u32 tcf_gate_index(const struct tc_action *a) -{ - return a->tcfa_index; -} - static inline s32 tcf_gate_prio(const struct tc_action *a) { s32 tcfg_prio; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 6836ccb9c45dc..ee38b35c3f57e 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -19,13 +19,16 @@ enum { TCA_ACT_FLAGS, TCA_ACT_HW_STATS, TCA_ACT_USED_HW_STATS, + TCA_ACT_IN_HW_COUNT, __TCA_ACT_MAX }; /* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */ -#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for - * actions stats. - */ +#define TCA_ACT_FLAGS_NO_PERCPU_STATS (1 << 0) /* Don't use percpu allocator for + * actions stats. + */ +#define TCA_ACT_FLAGS_SKIP_HW (1 << 1) /* don't offload action to HW */ +#define TCA_ACT_FLAGS_SKIP_SW (1 << 2) /* don't use action in SW */ /* tca HW stats type * When user does not pass the attribute, he does not care. diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 6beaea13564a8..73f68d4625f32 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include #include #include #include @@ -27,6 +28,26 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions) } EXPORT_SYMBOL(flow_rule_alloc); +struct flow_offload_action *offload_action_alloc(unsigned int num_actions) +{ + struct flow_offload_action *fl_action; + int i; + + fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), + GFP_KERNEL); + if (!fl_action) + return NULL; + + fl_action->action.num_entries = num_actions; + /* Pre-fill each action hw_stats with DONT_CARE. + * Caller can override this if it wants stats for a given action. + */ + for (i = 0; i < num_actions; i++) + fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; + + return fl_action; +} + #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ @@ -397,6 +418,8 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, true); + return 0; } EXPORT_SYMBOL(flow_indr_dev_register); @@ -449,6 +472,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } @@ -549,19 +573,25 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; + u32 count = 0; + int err; mutex_lock(&flow_indr_block_lock); + if (bo) { + if (bo->command == FLOW_BLOCK_BIND) + indir_dev_add(data, dev, sch, type, cleanup, bo); + else if (bo->command == FLOW_BLOCK_UNBIND) + indir_dev_remove(data); + } - if (bo->command == FLOW_BLOCK_BIND) - indir_dev_add(data, dev, sch, type, cleanup, bo); - else if (bo->command == FLOW_BLOCK_UNBIND) - indir_dev_remove(data); - - list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + list_for_each_entry(this, &flow_block_indr_dev_list, list) { + err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + if (!err) + count++; + } mutex_unlock(&flow_indr_block_lock); - return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; + return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3258da3d5bed5..b2f8a393d3c51 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -19,8 +19,10 @@ #include #include #include +#include #include #include +#include #ifdef CONFIG_INET DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); @@ -129,8 +131,237 @@ static void free_tcf(struct tc_action *p) kfree(p); } +static void offload_action_hw_count_set(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = hw_count; +} + +static void offload_action_hw_count_inc(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count += hw_count; +} + +static void offload_action_hw_count_dec(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = act->in_hw_count > hw_count ? + act->in_hw_count - hw_count : 0; +} + +static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act) +{ + if (is_tcf_pedit(act)) + return tcf_pedit_nkeys(act); + else + return 1; +} + +static bool tc_act_skip_hw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false; +} + +static bool tc_act_skip_sw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false; +} + +static bool tc_act_in_hw(struct tc_action *act) +{ + return !!act->in_hw_count; +} + +/* SKIP_HW and SKIP_SW are mutually exclusive flags. */ +static bool tc_act_flags_valid(u32 flags) +{ + flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW; + + return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW); +} + +static int offload_action_init(struct flow_offload_action *fl_action, + struct tc_action *act, + enum offload_act_command cmd, + struct netlink_ext_ack *extack) +{ + fl_action->extack = extack; + fl_action->command = cmd; + fl_action->index = act->tcfa_index; + + if (act->ops->offload_act_setup) + return act->ops->offload_act_setup(act, fl_action, NULL, false); + + return -EOPNOTSUPP; +} + +static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act, + u32 *hw_count) +{ + int err; + + err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, + fl_act, NULL, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = err; + + return 0; +} + +static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + int err; + + err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = 1; + + return 0; +} + +static int tcf_action_offload_cmd(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count, + cb, cb_priv) : + tcf_action_offload_cmd_ex(fl_act, hw_count); +} + +static int tcf_action_offload_add_ex(struct tc_action *action, + struct netlink_ext_ack *extack, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + bool skip_sw = tc_act_skip_sw(action->tcfa_flags); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + struct flow_offload_action *fl_action; + u32 in_hw_count = 0; + int num, err = 0; + + if (tc_act_skip_hw(action->tcfa_flags)) + return 0; + + num = tcf_offload_act_num_actions_single(action); + fl_action = offload_action_alloc(num); + if (!fl_action) + return -ENOMEM; + + err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack); + if (err) + goto fl_err; + + err = tc_setup_action(&fl_action->action, actions); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup tc actions for offload\n"); + goto fl_err; + } + + err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv); + if (!err) + cb ? offload_action_hw_count_inc(action, in_hw_count) : + offload_action_hw_count_set(action, in_hw_count); + + if (skip_sw && !tc_act_in_hw(action)) + err = -EINVAL; + + tc_cleanup_offload_action(&fl_action->action); + +fl_err: + kfree(fl_action); + + return err; +} + +/* offload the tc action after it is inserted */ +static int tcf_action_offload_add(struct tc_action *action, + struct netlink_ext_ack *extack) +{ + return tcf_action_offload_add_ex(action, extack, NULL, NULL); +} + +int tcf_action_update_hw_stats(struct tc_action *action) +{ + struct flow_offload_action fl_act = {}; + int err; + + if (!tc_act_in_hw(action)) + return -EOPNOTSUPP; + + err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL); + if (!err) { + preempt_disable(); + tcf_action_stats_update(action, fl_act.stats.bytes, + fl_act.stats.pkts, + fl_act.stats.drops, + fl_act.stats.lastused, + true); + preempt_enable(); + action->used_hw_stats = fl_act.stats.used_hw_stats; + action->used_hw_stats_valid = true; + } else { + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(tcf_action_update_hw_stats); + +static int tcf_action_offload_del_ex(struct tc_action *action, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + struct flow_offload_action fl_act = {}; + u32 in_hw_count = 0; + int err = 0; + + if (!tc_act_in_hw(action)) + return 0; + + err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv); + if (err < 0) + return err; + + if (!cb && action->in_hw_count != in_hw_count) + return -EINVAL; + + /* do not need to update hw state when deleting action */ + if (cb && in_hw_count) + offload_action_hw_count_dec(action, in_hw_count); + + return 0; +} + +static int tcf_action_offload_del(struct tc_action *action) +{ + return tcf_action_offload_del_ex(action, NULL, NULL); +} + static void tcf_action_cleanup(struct tc_action *p) { + tcf_action_offload_del(p); if (p->ops->cleanup) p->ops->cleanup(p); @@ -497,7 +728,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; p->tcfa_tm.firstuse = 0; - p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK; + p->tcfa_flags = flags; if (est) { err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, @@ -622,6 +853,59 @@ EXPORT_SYMBOL(tcf_idrinfo_destroy); static LIST_HEAD(act_base); static DEFINE_RWLOCK(act_mod_lock); +/* since act ops id is stored in pernet subsystem list, + * then there is no way to walk through only all the action + * subsystem, so we keep tc action pernet ops id for + * reoffload to walk through. + */ +static LIST_HEAD(act_pernet_id_list); +static DEFINE_MUTEX(act_id_mutex); +struct tc_act_pernet_id { + struct list_head list; + unsigned int id; +}; + +static int tcf_pernet_add_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + int ret = 0; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + ret = -EEXIST; + goto err_out; + } + } + + id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL); + if (!id_ptr) { + ret = -ENOMEM; + goto err_out; + } + id_ptr->id = id; + + list_add_tail(&id_ptr->list, &act_pernet_id_list); + +err_out: + mutex_unlock(&act_id_mutex); + return ret; +} + +static void tcf_pernet_del_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + list_del(&id_ptr->list); + kfree(id_ptr); + break; + } + } + mutex_unlock(&act_id_mutex); +} int tcf_register_action(struct tc_action_ops *act, struct pernet_operations *ops) @@ -640,18 +924,31 @@ int tcf_register_action(struct tc_action_ops *act, if (ret) return ret; + if (ops->id) { + ret = tcf_pernet_add_id_list(*ops->id); + if (ret) + goto err_id; + } + write_lock(&act_mod_lock); list_for_each_entry(a, &act_base, head) { if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { - write_unlock(&act_mod_lock); - unregister_pernet_subsys(ops); - return -EEXIST; + ret = -EEXIST; + goto err_out; } } list_add_tail(&act->head, &act_base); write_unlock(&act_mod_lock); return 0; + +err_out: + write_unlock(&act_mod_lock); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); +err_id: + unregister_pernet_subsys(ops); + return ret; } EXPORT_SYMBOL(tcf_register_action); @@ -670,8 +967,11 @@ int tcf_unregister_action(struct tc_action_ops *act, } } write_unlock(&act_mod_lock); - if (!err) + if (!err) { unregister_pernet_subsys(ops); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); + } return err; } EXPORT_SYMBOL(tcf_unregister_action); @@ -735,6 +1035,9 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, jmp_prgcnt -= 1; continue; } + + if (tc_act_skip_sw(a->tcfa_flags)) + continue; repeat: ret = a->ops->act(skb, a, res); if (ret == TC_ACT_REPEAT) @@ -821,6 +1124,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + u32 flags; if (tcf_action_dump_terse(skb, a, false)) goto nla_put_failure; @@ -835,9 +1139,13 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) goto nla_put_failure; - if (a->tcfa_flags && + flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; + if (flags && nla_put_bitfield32(skb, TCA_ACT_FLAGS, - a->tcfa_flags, a->tcfa_flags)) + flags, flags)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) goto nla_put_failure; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); @@ -919,7 +1227,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { [TCA_ACT_COOKIE] = { .type = NLA_BINARY, .len = TC_COOKIE_MAX_SIZE }, [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, - [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), + [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS | + TCA_ACT_FLAGS_SKIP_HW | + TCA_ACT_FLAGS_SKIP_SW), [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), }; @@ -1032,8 +1342,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, } } hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); - if (tb[TCA_ACT_FLAGS]) + if (tb[TCA_ACT_FLAGS]) { userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); + if (!tc_act_flags_valid(userflags.value)) { + err = -EINVAL; + goto err_out; + } + } err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, userflags.value | flags, extack); @@ -1061,11 +1376,17 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, return ERR_PTR(err); } +static bool tc_act_bind(u32 flags) +{ + return !!(flags & TCA_ACT_FLAGS_BIND); +} + /* Returns numbers of initialized actions or negative error. */ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, struct tc_action *actions[], - int init_res[], size_t *attr_size, u32 flags, + int init_res[], size_t *attr_size, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; @@ -1103,6 +1424,22 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, sz += tcf_action_fill_size(act); /* Start from index 0 */ actions[i - 1] = act; + if (tc_act_bind(flags)) { + bool skip_sw = tc_skip_sw(fl_flags); + bool skip_hw = tc_skip_hw(fl_flags); + + if (tc_act_bind(act->tcfa_flags)) + continue; + if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || + skip_hw != tc_act_skip_hw(act->tcfa_flags)) { + err = -EINVAL; + goto err; + } + } else { + err = tcf_action_offload_add(act, extack); + if (tc_act_skip_sw(act->tcfa_flags) && err) + goto err; + } } /* We have to commit them all together, because if any error happened in @@ -1154,6 +1491,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, if (p == NULL) goto errout; + /* update hw stats for this action */ + tcf_action_update_hw_stats(p); + /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ @@ -1395,6 +1735,96 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) return 0; } +static int +tcf_reoffload_del_notify(struct net *net, struct tc_action *action) +{ + size_t attr_size = tcf_action_fill_size(action); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + const struct tc_action_ops *ops = action->ops; + struct sk_buff *skb; + int ret; + + skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, + GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + ret = tcf_idr_release_unsafe(action); + if (ret == ACT_P_DELETED) { + module_put(ops->owner); + ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0); + } else { + kfree_skb(skb); + } + + return ret; +} + +int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, + void *cb_priv, bool add) +{ + struct tc_act_pernet_id *id_ptr; + struct tcf_idrinfo *idrinfo; + struct tc_action_net *tn; + struct tc_action *p; + unsigned int act_id; + unsigned long tmp; + unsigned long id; + struct idr *idr; + struct net *net; + int ret; + + if (!cb) + return -EINVAL; + + down_read(&net_rwsem); + mutex_lock(&act_id_mutex); + + for_each_net(net) { + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + act_id = id_ptr->id; + tn = net_generic(net, act_id); + if (!tn) + continue; + idrinfo = tn->idrinfo; + if (!idrinfo) + continue; + + mutex_lock(&idrinfo->lock); + idr = &idrinfo->action_idr; + idr_for_each_entry_ul(idr, p, tmp, id) { + if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) + continue; + if (add) { + tcf_action_offload_add_ex(p, NULL, cb, + cb_priv); + continue; + } + + /* cb unregister to update hw count */ + ret = tcf_action_offload_del_ex(p, cb, cb_priv); + if (ret < 0) + continue; + if (tc_act_skip_sw(p->tcfa_flags) && + !tc_act_in_hw(p)) + tcf_reoffload_del_notify(net, p); + } + mutex_unlock(&idrinfo->lock); + } + } + mutex_unlock(&act_id_mutex); + up_read(&net_rwsem); + + return 0; +} + static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], u32 portid, size_t attr_size, struct netlink_ext_ack *extack) @@ -1508,7 +1938,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, for (loop = 0; loop < 10; loop++) { ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, 0, extack); if (ret != -EAGAIN) break; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index f2bf896331a59..a77d8908e7372 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, act, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, act, - &act_bpf_ops, bind, true, 0); + &act_bpf_ops, bind, true, flags); if (ret < 0) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 94e78ac7a7487..09e2aafc8943b 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, a, - &act_connmark_ops, bind, false, 0); + &act_connmark_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index a15ec95e69c36..e0f515b774cad 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -695,6 +695,24 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } +static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CSUM; + entry->csum_flags = tcf_csum_update_flags(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CSUM; + } + + return 0; +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .id = TCA_ID_CSUM, @@ -706,6 +724,7 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .offload_act_setup = tcf_csum_offload_act_setup, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index ab1810f2e6607..1c537913a189a 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1493,6 +1493,26 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } +static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CT; + entry->ct.action = tcf_ct_action(act); + entry->ct.zone = tcf_ct_zone(act); + entry->ct.flow_table = tcf_ct_ft(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CT; + } + + return 0; +} + static struct tc_action_ops act_ct_ops = { .kind = "ct", .id = TCA_ID_CT, @@ -1504,6 +1524,7 @@ static struct tc_action_ops act_ct_ops = { .walk = tcf_ct_walker, .lookup = tcf_ct_search, .stats_update = tcf_stats_update, + .offload_act_setup = tcf_ct_offload_act_setup, .size = sizeof(struct tcf_ct), }; diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 549374a2d0086..0281e45987a47 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -212,7 +212,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_ctinfo_ops, bind, false, 0); + &act_ctinfo_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index d8dce173df374..bde6a6c01e64c 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -252,6 +252,43 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } +static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_gact_ok(act)) { + entry->id = FLOW_ACTION_ACCEPT; + } else if (is_tcf_gact_shot(act)) { + entry->id = FLOW_ACTION_DROP; + } else if (is_tcf_gact_trap(act)) { + entry->id = FLOW_ACTION_TRAP; + } else if (is_tcf_gact_goto_chain(act)) { + entry->id = FLOW_ACTION_GOTO; + entry->chain_index = tcf_gact_goto_chain_index(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_gact_ok(act)) + fl_action->id = FLOW_ACTION_ACCEPT; + else if (is_tcf_gact_shot(act)) + fl_action->id = FLOW_ACTION_DROP; + else if (is_tcf_gact_trap(act)) + fl_action->id = FLOW_ACTION_TRAP; + else if (is_tcf_gact_goto_chain(act)) + fl_action->id = FLOW_ACTION_GOTO; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_gact_ops = { .kind = "gact", .id = TCA_ID_GACT, @@ -263,6 +300,7 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, + .offload_act_setup = tcf_gact_offload_act_setup, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 7df72a4197a3f..d56e73843a4b7 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_gate_ops, bind, false, 0); + &act_gate_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -597,6 +597,54 @@ static size_t tcf_gate_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_gate)); } +static void tcf_gate_entry_destructor(void *priv) +{ + struct action_gate_entry *oe = priv; + + kfree(oe); +} + +static int tcf_gate_get_entries(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->gate.entries = tcf_gate_get_list(act); + + if (!entry->gate.entries) + return -EINVAL; + + entry->destructor = tcf_gate_entry_destructor; + entry->destructor_priv = entry->gate.entries; + + return 0; +} + +static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_GATE; + entry->gate.prio = tcf_gate_prio(act); + entry->gate.basetime = tcf_gate_basetime(act); + entry->gate.cycletime = tcf_gate_cycletime(act); + entry->gate.cycletimeext = tcf_gate_cycletimeext(act); + entry->gate.num_entries = tcf_gate_num_entries(act); + err = tcf_gate_get_entries(entry, act); + if (err) + return err; + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_GATE; + } + + return 0; +} + static struct tc_action_ops act_gate_ops = { .kind = "gate", .id = TCA_ID_GATE, @@ -609,6 +657,7 @@ static struct tc_action_ops act_gate_ops = { .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, .lookup = tcf_gate_search, + .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index b757f90a2d589..41ba55e60b1bf 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, - bind, true, 0); + bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 265b1443e252f..2f3d507c24a1f 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -145,7 +145,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, - false, 0); + false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 952416bd65e6a..39acd1d186098 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -450,6 +450,55 @@ static size_t tcf_mirred_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_mirred)); } +static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->dev = act->ops->get_dev(act, &entry->destructor); + if (!entry->dev) + return; + entry->destructor_priv = entry->dev; +} + +static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_egress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT; + else if (is_tcf_mirred_egress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED; + else if (is_tcf_mirred_ingress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; + else if (is_tcf_mirred_ingress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED_INGRESS; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .id = TCA_ID_MIRRED, @@ -462,6 +511,7 @@ static struct tc_action_ops act_mirred_ops = { .walk = tcf_mirred_walker, .lookup = tcf_mirred_search, .get_fill_size = tcf_mirred_get_fill_size, + .offload_act_setup = tcf_mirred_offload_act_setup, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, }; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 8faa4c58305e3..b9ff3459fdab9 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -248,7 +248,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, 0); + &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -384,6 +384,57 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + entry->id = FLOW_ACTION_MPLS_PUSH; + entry->mpls_push.proto = tcf_mpls_proto(act); + entry->mpls_push.label = tcf_mpls_label(act); + entry->mpls_push.tc = tcf_mpls_tc(act); + entry->mpls_push.bos = tcf_mpls_bos(act); + entry->mpls_push.ttl = tcf_mpls_ttl(act); + break; + case TCA_MPLS_ACT_POP: + entry->id = FLOW_ACTION_MPLS_POP; + entry->mpls_pop.proto = tcf_mpls_proto(act); + break; + case TCA_MPLS_ACT_MODIFY: + entry->id = FLOW_ACTION_MPLS_MANGLE; + entry->mpls_mangle.label = tcf_mpls_label(act); + entry->mpls_mangle.tc = tcf_mpls_tc(act); + entry->mpls_mangle.bos = tcf_mpls_bos(act); + entry->mpls_mangle.ttl = tcf_mpls_ttl(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + fl_action->id = FLOW_ACTION_MPLS_PUSH; + break; + case TCA_MPLS_ACT_POP: + fl_action->id = FLOW_ACTION_MPLS_POP; + break; + case TCA_MPLS_ACT_MODIFY: + fl_action->id = FLOW_ACTION_MPLS_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_mpls_ops = { .kind = "mpls", .id = TCA_ID_MPLS, @@ -394,6 +445,7 @@ static struct tc_action_ops act_mpls_ops = { .cleanup = tcf_mpls_cleanup, .walk = tcf_mpls_walker, .lookup = tcf_mpls_search, + .offload_act_setup = tcf_mpls_offload_act_setup, .size = sizeof(struct tcf_mpls), }; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 7dd6b586ba7f6..2a39b3729e844 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_nat_ops, bind, false, 0); + &act_nat_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index c6c862c459cc3..31fcd279c1776 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, 0); + &act_pedit_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); goto out_free; @@ -487,6 +487,39 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + int k; + + for (k = 0; k < tcf_pedit_nkeys(act); k++) { + switch (tcf_pedit_cmd(act, k)) { + case TCA_PEDIT_KEY_EX_CMD_SET: + entry->id = FLOW_ACTION_MANGLE; + break; + case TCA_PEDIT_KEY_EX_CMD_ADD: + entry->id = FLOW_ACTION_ADD; + break; + default: + return -EOPNOTSUPP; + } + entry->mangle.htype = tcf_pedit_htype(act, k); + entry->mangle.mask = tcf_pedit_mask(act, k); + entry->mangle.val = tcf_pedit_val(act, k); + entry->mangle.offset = tcf_pedit_offset(act, k); + entry->hw_stats = tc_act_hw_stats(act->hw_stats); + entry++; + } + *index_inc = k; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .id = TCA_ID_PEDIT, @@ -498,6 +531,7 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, + .offload_act_setup = tcf_pedit_offload_act_setup, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 9e77ba8401e53..0923aa2b8f8a7 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -90,7 +90,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, - &act_police_ops, bind, true, 0); + &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -405,6 +405,30 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_POLICE; + entry->police.burst = tcf_police_burst(act); + entry->police.rate_bytes_ps = + tcf_police_rate_bytes_ps(act); + entry->police.burst_pkt = tcf_police_burst_pkt(act); + entry->police.rate_pkt_ps = + tcf_police_rate_pkt_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_POLICE; + } + + return 0; +} + MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -420,6 +444,7 @@ static struct tc_action_ops act_police_ops = { .walk = tcf_police_walker, .lookup = tcf_police_search, .cleanup = tcf_police_cleanup, + .offload_act_setup = tcf_police_offload_act_setup, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index ce859b0e0deb9..9a22cdda6bbdc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -70,7 +70,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_sample_ops, bind, true, 0); + &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -282,6 +282,35 @@ tcf_sample_get_group(const struct tc_action *a, return group; } +static void tcf_offload_sample_get_group(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->sample.psample_group = + act->ops->get_psample_group(act, &entry->destructor); + entry->destructor_priv = entry->sample.psample_group; +} + +static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_SAMPLE; + entry->sample.trunc_size = tcf_sample_trunc_size(act); + entry->sample.truncate = tcf_sample_truncate(act); + entry->sample.rate = tcf_sample_rate(act); + tcf_offload_sample_get_group(entry, act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_SAMPLE; + } + + return 0; +} + static struct tc_action_ops act_sample_ops = { .kind = "sample", .id = TCA_ID_SAMPLE, @@ -294,6 +323,7 @@ static struct tc_action_ops act_sample_ops = { .walk = tcf_sample_walker, .lookup = tcf_sample_search, .get_psample_group = tcf_sample_get_group, + .offload_act_setup = tcf_sample_offload_act_setup, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e617ab4505ca4..8c1d60bde93e0 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -129,7 +129,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_simp_ops, bind, false, 0); + &act_simp_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index d30ecbfc8f846..ceba11b198bba 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbedit_ops, bind, true, 0); + &act_skbedit_ops, bind, true, act_flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -327,6 +327,41 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ } +static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_skbedit_mark(act)) { + entry->id = FLOW_ACTION_MARK; + entry->mark = tcf_skbedit_mark(act); + } else if (is_tcf_skbedit_ptype(act)) { + entry->id = FLOW_ACTION_PTYPE; + entry->ptype = tcf_skbedit_ptype(act); + } else if (is_tcf_skbedit_priority(act)) { + entry->id = FLOW_ACTION_PRIORITY; + entry->priority = tcf_skbedit_priority(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_skbedit_mark(act)) + fl_action->id = FLOW_ACTION_MARK; + else if (is_tcf_skbedit_ptype(act)) + fl_action->id = FLOW_ACTION_PTYPE; + else if (is_tcf_skbedit_priority(act)) + fl_action->id = FLOW_ACTION_PRIORITY; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .id = TCA_ID_SKBEDIT, @@ -339,6 +374,7 @@ static struct tc_action_ops act_skbedit_ops = { .walk = tcf_skbedit_walker, .get_fill_size = tcf_skbedit_get_fill_size, .lookup = tcf_skbedit_search, + .offload_act_setup = tcf_skbedit_offload_act_setup, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 9b6b52c5e24ec..2083612d87803 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -168,7 +168,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbmod_ops, bind, true, 0); + &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index d9cd174eecb79..23aba03d26a8d 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -787,6 +787,59 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static void tcf_tunnel_encap_put_tunnel(void *priv) +{ + struct ip_tunnel_info *tunnel = priv; + + kfree(tunnel); +} + +static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->tunnel = tcf_tunnel_info_copy(act); + if (!entry->tunnel) + return -ENOMEM; + entry->destructor = tcf_tunnel_encap_put_tunnel; + entry->destructor_priv = entry->tunnel; + return 0; +} + +static int tcf_tunnel_key_offload_act_setup(struct tc_action *act, + void *entry_data, + u32 *index_inc, + bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_tunnel_set(act)) { + entry->id = FLOW_ACTION_TUNNEL_ENCAP; + err = tcf_tunnel_encap_get_tunnel(entry, act); + if (err) + return err; + } else if (is_tcf_tunnel_release(act)) { + entry->id = FLOW_ACTION_TUNNEL_DECAP; + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_tunnel_set(act)) + fl_action->id = FLOW_ACTION_TUNNEL_ENCAP; + else if (is_tcf_tunnel_release(act)) + fl_action->id = FLOW_ACTION_TUNNEL_DECAP; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .id = TCA_ID_TUNNEL_KEY, @@ -797,6 +850,7 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .offload_act_setup = tcf_tunnel_key_offload_act_setup, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index e4dc5a555bd85..756e2dcde1cd8 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -368,6 +368,53 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act) + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */ } +static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + entry->id = FLOW_ACTION_VLAN_PUSH; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + case TCA_VLAN_ACT_POP: + entry->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + entry->id = FLOW_ACTION_VLAN_MANGLE; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + fl_action->id = FLOW_ACTION_VLAN_PUSH; + break; + case TCA_VLAN_ACT_POP: + fl_action->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + fl_action->id = FLOW_ACTION_VLAN_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .id = TCA_ID_VLAN, @@ -380,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = { .stats_update = tcf_vlan_stats_update, .get_fill_size = tcf_vlan_get_fill_size, .lookup = tcf_vlan_search, + .offload_act_setup = tcf_vlan_offload_act_setup, .size = sizeof(struct tcf_vlan), }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e54f0a42270c1..e3e26d358c7fb 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3025,9 +3025,9 @@ void tcf_exts_destroy(struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_destroy); -int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, - struct nlattr *rate_tlv, struct tcf_exts *exts, - u32 flags, struct netlink_ext_ack *extack) +int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT { @@ -3061,7 +3061,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, flags |= TCA_ACT_FLAGS_BIND; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, exts->actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, fl_flags, + extack); if (err < 0) return err; exts->nr_actions = err; @@ -3077,6 +3078,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, return 0; } +EXPORT_SYMBOL(tcf_exts_validate_ex); + +int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, struct netlink_ext_ack *extack) +{ + return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, + flags, 0, extack); +} EXPORT_SYMBOL(tcf_exts_validate); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) @@ -3461,7 +3471,7 @@ static void tcf_act_put_cookie(struct flow_action_entry *entry) flow_action_cookie_destroy(entry->cookie); } -void tc_cleanup_flow_action(struct flow_action *flow_action) +void tc_cleanup_offload_action(struct flow_action *flow_action) { struct flow_action_entry *entry; int i; @@ -3472,93 +3482,37 @@ void tc_cleanup_flow_action(struct flow_action *flow_action) entry->destructor(entry->destructor_priv); } } -EXPORT_SYMBOL(tc_cleanup_flow_action); +EXPORT_SYMBOL(tc_cleanup_offload_action); -static void tcf_mirred_get_dev(struct flow_action_entry *entry, - const struct tc_action *act) +static int tc_setup_offload_act(struct tc_action *act, + struct flow_action_entry *entry, + u32 *index_inc) { #ifdef CONFIG_NET_CLS_ACT - entry->dev = act->ops->get_dev(act, &entry->destructor); - if (!entry->dev) - return; - entry->destructor_priv = entry->dev; -#endif -} - -static void tcf_tunnel_encap_put_tunnel(void *priv) -{ - struct ip_tunnel_info *tunnel = priv; - - kfree(tunnel); -} - -static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->tunnel = tcf_tunnel_info_copy(act); - if (!entry->tunnel) - return -ENOMEM; - entry->destructor = tcf_tunnel_encap_put_tunnel; - entry->destructor_priv = entry->tunnel; + if (act->ops->offload_act_setup) + return act->ops->offload_act_setup(act, entry, index_inc, true); + else + return -EOPNOTSUPP; +#else return 0; -} - -static void tcf_sample_get_group(struct flow_action_entry *entry, - const struct tc_action *act) -{ -#ifdef CONFIG_NET_CLS_ACT - entry->sample.psample_group = - act->ops->get_psample_group(act, &entry->destructor); - entry->destructor_priv = entry->sample.psample_group; #endif } -static void tcf_gate_entry_destructor(void *priv) -{ - struct action_gate_entry *oe = priv; - - kfree(oe); -} - -static int tcf_gate_get_entries(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->gate.entries = tcf_gate_get_list(act); - - if (!entry->gate.entries) - return -EINVAL; - - entry->destructor = tcf_gate_entry_destructor; - entry->destructor_priv = entry->gate.entries; - - return 0; -} - -static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) -{ - if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) - return FLOW_ACTION_HW_STATS_DONT_CARE; - else if (!hw_stats) - return FLOW_ACTION_HW_STATS_DISABLED; - - return hw_stats; -} - -int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts) +int tc_setup_action(struct flow_action *flow_action, + struct tc_action *actions[]) { + int i, j, index, err = 0; struct tc_action *act; - int i, j, k, err = 0; BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); - if (!exts) + if (!actions) return 0; j = 0; - tcf_exts_for_each_action(i, act, exts) { + tcf_act_for_each_action(i, act, actions) { struct flow_action_entry *entry; entry = &flow_action->entries[j]; @@ -3568,165 +3522,39 @@ int tc_setup_flow_action(struct flow_action *flow_action, goto err_out_locked; entry->hw_stats = tc_act_hw_stats(act->hw_stats); - - if (is_tcf_gact_ok(act)) { - entry->id = FLOW_ACTION_ACCEPT; - } else if (is_tcf_gact_shot(act)) { - entry->id = FLOW_ACTION_DROP; - } else if (is_tcf_gact_trap(act)) { - entry->id = FLOW_ACTION_TRAP; - } else if (is_tcf_gact_goto_chain(act)) { - entry->id = FLOW_ACTION_GOTO; - entry->chain_index = tcf_gact_goto_chain_index(act); - } else if (is_tcf_mirred_egress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_egress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_vlan(act)) { - switch (tcf_vlan_action(act)) { - case TCA_VLAN_ACT_PUSH: - entry->id = FLOW_ACTION_VLAN_PUSH; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - case TCA_VLAN_ACT_POP: - entry->id = FLOW_ACTION_VLAN_POP; - break; - case TCA_VLAN_ACT_MODIFY: - entry->id = FLOW_ACTION_VLAN_MANGLE; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - } else if (is_tcf_tunnel_set(act)) { - entry->id = FLOW_ACTION_TUNNEL_ENCAP; - err = tcf_tunnel_encap_get_tunnel(entry, act); - if (err) - goto err_out_locked; - } else if (is_tcf_tunnel_release(act)) { - entry->id = FLOW_ACTION_TUNNEL_DECAP; - } else if (is_tcf_pedit(act)) { - for (k = 0; k < tcf_pedit_nkeys(act); k++) { - switch (tcf_pedit_cmd(act, k)) { - case TCA_PEDIT_KEY_EX_CMD_SET: - entry->id = FLOW_ACTION_MANGLE; - break; - case TCA_PEDIT_KEY_EX_CMD_ADD: - entry->id = FLOW_ACTION_ADD; - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - entry->mangle.htype = tcf_pedit_htype(act, k); - entry->mangle.mask = tcf_pedit_mask(act, k); - entry->mangle.val = tcf_pedit_val(act, k); - entry->mangle.offset = tcf_pedit_offset(act, k); - entry->hw_stats = tc_act_hw_stats(act->hw_stats); - entry = &flow_action->entries[++j]; - } - } else if (is_tcf_csum(act)) { - entry->id = FLOW_ACTION_CSUM; - entry->csum_flags = tcf_csum_update_flags(act); - } else if (is_tcf_skbedit_mark(act)) { - entry->id = FLOW_ACTION_MARK; - entry->mark = tcf_skbedit_mark(act); - } else if (is_tcf_sample(act)) { - entry->id = FLOW_ACTION_SAMPLE; - entry->sample.trunc_size = tcf_sample_trunc_size(act); - entry->sample.truncate = tcf_sample_truncate(act); - entry->sample.rate = tcf_sample_rate(act); - tcf_sample_get_group(entry, act); - } else if (is_tcf_police(act)) { - entry->id = FLOW_ACTION_POLICE; - entry->police.burst = tcf_police_burst(act); - entry->police.rate_bytes_ps = - tcf_police_rate_bytes_ps(act); - entry->police.burst_pkt = tcf_police_burst_pkt(act); - entry->police.rate_pkt_ps = - tcf_police_rate_pkt_ps(act); - entry->police.mtu = tcf_police_tcfp_mtu(act); - entry->police.index = act->tcfa_index; - } else if (is_tcf_ct(act)) { - entry->id = FLOW_ACTION_CT; - entry->ct.action = tcf_ct_action(act); - entry->ct.zone = tcf_ct_zone(act); - entry->ct.flow_table = tcf_ct_ft(act); - } else if (is_tcf_mpls(act)) { - switch (tcf_mpls_action(act)) { - case TCA_MPLS_ACT_PUSH: - entry->id = FLOW_ACTION_MPLS_PUSH; - entry->mpls_push.proto = tcf_mpls_proto(act); - entry->mpls_push.label = tcf_mpls_label(act); - entry->mpls_push.tc = tcf_mpls_tc(act); - entry->mpls_push.bos = tcf_mpls_bos(act); - entry->mpls_push.ttl = tcf_mpls_ttl(act); - break; - case TCA_MPLS_ACT_POP: - entry->id = FLOW_ACTION_MPLS_POP; - entry->mpls_pop.proto = tcf_mpls_proto(act); - break; - case TCA_MPLS_ACT_MODIFY: - entry->id = FLOW_ACTION_MPLS_MANGLE; - entry->mpls_mangle.label = tcf_mpls_label(act); - entry->mpls_mangle.tc = tcf_mpls_tc(act); - entry->mpls_mangle.bos = tcf_mpls_bos(act); - entry->mpls_mangle.ttl = tcf_mpls_ttl(act); - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - } else if (is_tcf_skbedit_ptype(act)) { - entry->id = FLOW_ACTION_PTYPE; - entry->ptype = tcf_skbedit_ptype(act); - } else if (is_tcf_skbedit_priority(act)) { - entry->id = FLOW_ACTION_PRIORITY; - entry->priority = tcf_skbedit_priority(act); - } else if (is_tcf_gate(act)) { - entry->id = FLOW_ACTION_GATE; - entry->gate.index = tcf_gate_index(act); - entry->gate.prio = tcf_gate_prio(act); - entry->gate.basetime = tcf_gate_basetime(act); - entry->gate.cycletime = tcf_gate_cycletime(act); - entry->gate.cycletimeext = tcf_gate_cycletimeext(act); - entry->gate.num_entries = tcf_gate_num_entries(act); - err = tcf_gate_get_entries(entry, act); - if (err) - goto err_out_locked; - } else { - err = -EOPNOTSUPP; + entry->hw_index = act->tcfa_index; + index = 0; + err = tc_setup_offload_act(act, entry, &index); + if (!err) + j += index; + else goto err_out_locked; - } spin_unlock_bh(&act->tcfa_lock); - - if (!is_tcf_pedit(act)) - j++; } err_out: if (err) - tc_cleanup_flow_action(flow_action); + tc_cleanup_offload_action(flow_action); return err; err_out_locked: spin_unlock_bh(&act->tcfa_lock); goto err_out; } -EXPORT_SYMBOL(tc_setup_flow_action); + +int tc_setup_offload_action(struct flow_action *flow_action, + const struct tcf_exts *exts) +{ +#ifdef CONFIG_NET_CLS_ACT + if (!exts) + return 0; + + return tc_setup_action(flow_action, exts->actions); +#else + return 0; +#endif +} +EXPORT_SYMBOL(tc_setup_offload_action); unsigned int tcf_exts_num_actions(struct tcf_exts *exts) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index aab13ba117672..c73f65738ef7b 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -461,7 +461,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.rule->match.key = &f->mkey; cls_flower.classid = f->res.classid; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (skip_sw) { @@ -473,7 +473,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw, &f->flags, &f->in_hw_count, rtnl_held); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { @@ -501,12 +501,12 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, rtnl_held); - tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, - cls_flower.stats.pkts, - cls_flower.stats.drops, - cls_flower.stats.lastused, - cls_flower.stats.used_hw_stats, - cls_flower.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes, + cls_flower.stats.pkts, + cls_flower.stats.drops, + cls_flower.stats.lastused, + cls_flower.stats.used_hw_stats, + cls_flower.stats.used_hw_stats_valid); } static void __fl_put(struct cls_fl_filter *f) @@ -1917,12 +1917,14 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, struct nlattr *est, - struct fl_flow_tmplt *tmplt, u32 flags, + struct fl_flow_tmplt *tmplt, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -2036,7 +2038,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], - tp->chain->tmplt_priv, flags, extack); + tp->chain->tmplt_priv, flags, fnew->flags, + extack); if (err) goto errout; @@ -2266,7 +2269,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (tc_skip_sw(f->flags)) { @@ -2283,7 +2286,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_SETUP_CLSFLOWER, &cls_flower, cb_priv, &f->flags, &f->in_hw_count); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 24f0046ce0b33..ca5670fd5228e 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.cookie = cookie; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); mall_destroy_hw_filter(tp, head, cookie, NULL); @@ -111,7 +111,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw, &head->flags, &head->in_hw_count, true); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) { @@ -163,12 +163,13 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { static int mall_set_parms(struct net *net, struct tcf_proto *tp, struct cls_mall_head *head, unsigned long base, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &head->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -226,8 +227,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, goto err_alloc_percpu; } - err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], flags, - extack); + err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], + flags, new->flags, extack); if (err) goto err_set_parms; @@ -301,7 +302,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; cls_mall.cookie = (unsigned long)head; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); if (add && tc_skip_sw(head->flags)) { @@ -314,7 +315,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv, &head->flags, &head->in_hw_count); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) @@ -336,11 +337,11 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); - tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.drops, - cls_mall.stats.lastused, - cls_mall.stats.used_hw_stats, - cls_mall.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, + cls_mall.stats.used_hw_stats, + cls_mall.stats.used_hw_stats_valid); } static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4272814487f09..cf5649292ee00 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -709,12 +709,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { static int u32_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tc_u_knode *n, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &n->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -895,7 +896,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOMEM; err = u32_set_parms(net, tp, base, new, tb, - tca[TCA_RATE], flags, extack); + tca[TCA_RATE], flags, new->flags, + extack); if (err) { u32_destroy_key(new, false); @@ -1060,8 +1062,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], flags, - extack); + err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], + flags, n->flags, extack); if (err == 0) { struct tc_u_knode __rcu **ins; struct tc_u_knode *pins; diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index 8e45792703ed3..b7205a0695349 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json @@ -812,5 +812,29 @@ "teardown": [ "$TC actions flush action police" ] + }, + { + "id": "7d64", + "name": "Add police action with skip_hw option", + "category": [ + "actions", + "police" + ], + "setup": [ + [ + "$TC actions flush action police", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action police rate 1kbit burst 10k index 100 skip_hw", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action police | grep skip_hw", + "matchPattern": "skip_hw", + "matchCount": "1", + "teardown": [ + "$TC actions flush action police" + ] } ] diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json index 51799874a9721..2df68017dfb8f 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json @@ -387,5 +387,77 @@ "$TC qdisc del dev $DUMMY ingress", "$IP link del dev $DUMMY type dummy" ] + }, + { + "id": "3329", + "name": "Validate flags of the matchall filter with skip_sw and police action with skip_hw", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199 skip_hw" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_sw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] + }, + { + "id": "0eeb", + "name": "Validate flags of the matchall filter with skip_hw and police action", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_hw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] + }, + { + "id": "eee4", + "name": "Validate flags of the matchall filter with skip_sw and police action", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_sw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] } ]