Skip to content

Commit

Permalink
net/mlx5: Change lag mutex lock to spin lock
Browse files Browse the repository at this point in the history
The lag lock could be a spin lock, the critical section is short
and there is no need that the thread will sleep.
Change the lock that protects the LAG structure from mutex
to spin lock. It is required for next patch that need to
access this structure from context that we can't sleep.
In addition there is no need to hold this lock when query the
congestion counters.

Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
  • Loading branch information
Maor Gottlieb authored and Saeed Mahameed committed May 1, 2020
1 parent 33720aa commit 64363e6
Showing 1 changed file with 21 additions and 21 deletions.
42 changes: 21 additions & 21 deletions drivers/net/ethernet/mellanox/mlx5/core/lag.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
static DEFINE_MUTEX(lag_mutex);
static DEFINE_SPINLOCK(lag_lock);

static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
Expand Down Expand Up @@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);

Expand Down Expand Up @@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev->tracker = tracker;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

if (changed)
mlx5_queue_bond_work(ldev, 0);
Expand Down Expand Up @@ -502,15 +502,15 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;

dev->priv.lag = ldev;

mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
}

static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
Expand All @@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));

dev->priv.lag = NULL;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
}

/* Must be called with intf_mutex held */
Expand Down Expand Up @@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

return res;
}
Expand All @@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

return res;
}
Expand All @@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

return res;
}
Expand All @@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);

if (!(ldev && __mlx5_lag_is_roce(ldev)))
Expand All @@ -681,7 +681,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);

unlock:
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);

return ndev;
}
Expand Down Expand Up @@ -723,7 +723,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,

memset(values, 0, sizeof(*values) * num_counters);

mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
Expand All @@ -733,6 +733,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
spin_unlock(&lag_lock);

for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
Expand All @@ -742,14 +743,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out);
if (ret)
goto unlock;
goto free;

for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}

unlock:
mutex_unlock(&lag_mutex);
free:
kvfree(out);
return ret;
}
Expand Down

0 comments on commit 64363e6

Please sign in to comment.