Skip to content

Commit

Permalink
Merge tag 'mlx5-updates-2021-04-02' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-02

This series provides trivial updates and cleanup to mlx5 driver

1) Support for matching on ct_state inv and rel flag in connection tracking
2) Reject TC rules that redirect from a VF to itself
3) Parav provided some E-Switch cleanups that could be summarized to:
  3.1) Packing and Reduce structure sizes
  3.2) Dynamic allocation of rate limit tables and structures
4) Vu Makes the netdev arfs and vlan tables allocation dynamic.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Apr 4, 2021
2 parents 428e68e + 6783f0a commit cd77ce9
Show file tree
Hide file tree
Showing 14 changed files with 323 additions and 198 deletions.
46 changes: 7 additions & 39 deletions drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,18 +49,10 @@ struct mlx5e_promisc_table {
struct mlx5_flow_handle *rule;
};

struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
DECLARE_BITMAP(active_svlans, VLAN_N_VID);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
/* Forward declaration and APIs to get private fields of vlan_table */
struct mlx5e_vlan_table;
unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan);
struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan);

struct mlx5e_l2_table {
struct mlx5e_flow_table ft;
Expand Down Expand Up @@ -200,31 +192,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
#endif /* CONFIG_MLX5_EN_RXNFC */

#ifdef CONFIG_MLX5_EN_ARFS
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)

struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};

enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};

struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
struct mlx5e_arfs_tables;

int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
Expand Down Expand Up @@ -255,12 +223,12 @@ struct mlx5e_flow_steering {
#endif
struct mlx5e_tc_table tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables arfs;
struct mlx5e_arfs_tables *arfs;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
Expand Down
26 changes: 24 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)

#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
Expand Down Expand Up @@ -1207,8 +1209,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
Expand Down Expand Up @@ -1236,7 +1238,9 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
TCA_FLOWER_KEY_CT_FLAGS_NEW |
TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
TCA_FLOWER_KEY_CT_FLAGS_REPLY |
TCA_FLOWER_KEY_CT_FLAGS_RELATED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
NL_SET_ERR_MSG_MOD(extack,
"only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
Expand All @@ -1248,16 +1252,34 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;

ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;

if (rel) {
NL_SET_ERR_MSG_MOD(extack,
"matching on ct_state +rel isn't supported");
return -EOPNOTSUPP;
}

if (inv) {
NL_SET_ERR_MSG_MOD(extack,
"matching on ct_state +inv isn't supported");
return -EOPNOTSUPP;
}

if (new) {
NL_SET_ERR_MSG_MOD(extack,
Expand Down
95 changes: 67 additions & 28 deletions drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,32 @@
#include <linux/ipv6.h>
#include "en.h"

#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)

struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};

enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};

struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};

struct arfs_tuple {
__be16 etype;
u8 ip_proto;
Expand Down Expand Up @@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)

dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
if (err) {
Expand All @@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}

void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
{
int i;

if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;

arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs.wq);
destroy_workqueue(priv->fs.arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
}
}

void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;

_mlx5e_cleanup_tables(priv);
kvfree(priv->fs.arfs);
}

static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
Expand Down Expand Up @@ -290,7 +322,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
Expand Down Expand Up @@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;

spin_lock_init(&priv->fs.arfs.arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs.rules);
priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs.wq)
priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
if (!priv->fs.arfs)
return -ENOMEM;

spin_lock_init(&priv->fs.arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs->rules);
priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs->wq)
goto err;

for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
goto err;
goto err_des;
}
return 0;

err_des:
_mlx5e_cleanup_tables(priv);
err:
mlx5e_arfs_destroy_tables(priv);
kvfree(priv->fs.arfs);
return err;
}

Expand All @@ -358,8 +397,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int j;

HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
Expand All @@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
Expand All @@ -387,12 +426,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int j;

HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
spin_unlock_bh(&priv->fs.arfs->arfs_lock);

hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
Expand Down Expand Up @@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
Expand Down Expand Up @@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work)

mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs.arfs_lock);
spin_lock_bh(&priv->fs.arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
spin_unlock_bh(&priv->fs.arfs->arfs_lock);

mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
Expand Down Expand Up @@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;

rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;

hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
Expand Down Expand Up @@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
Expand Down Expand Up @@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
Expand Down
Loading

0 comments on commit cd77ce9

Please sign in to comment.