Skip to content

Commit

Permalink
octeontx2-af: Fix multicast/mirror group lock/unlock issue
Browse files Browse the repository at this point in the history
As per the existing implementation, there exists a race between finding
a multicast/mirror group entry and deleting that entry. The group lock
was taken and released independently by rvu_nix_mcast_find_grp_elem()
function. Which is incorrect and group lock should be taken during the
entire operation of group updation/deletion. This patch fixes the same.

Fixes: 51b2804 ("octeontx2-af: Add new mbox to support multicast/mirror offload")
Signed-off-by: Suman Ghosh <sumang@marvell.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Suman Ghosh authored and David S. Miller committed Dec 15, 2023
1 parent 12da68e commit 10b7572
Showing 1 changed file with 54 additions and 30 deletions.
84 changes: 54 additions & 30 deletions drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
Original file line number Diff line number Diff line change
Expand Up @@ -6142,14 +6142,12 @@ static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_g
struct nix_mcast_grp_elem *iter;
bool is_found = false;

mutex_lock(&mcast_grp->mcast_grp_lock);
list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
if (iter->mcast_grp_idx == mcast_grp_idx) {
is_found = true;
break;
}
}
mutex_unlock(&mcast_grp->mcast_grp_lock);

if (is_found)
return iter;
Expand All @@ -6162,19 +6160,23 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
struct nix_mcast_grp_elem *elem;
struct nix_mcast_grp *mcast_grp;
struct nix_hw *nix_hw;
int blkaddr;
int blkaddr, ret;

blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;

mcast_grp = &nix_hw->mcast_grp;
mutex_lock(&mcast_grp->mcast_grp_lock);
elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
if (!elem)
return NIX_AF_ERR_INVALID_MCAST_GRP;
ret = NIX_AF_ERR_INVALID_MCAST_GRP;
else
ret = elem->mce_start_index;

return elem->mce_start_index;
mutex_unlock(&mcast_grp->mcast_grp_lock);
return ret;
}

void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
Expand Down Expand Up @@ -6238,21 +6240,23 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
struct nix_mcast_grp_elem *elem;
struct nix_mcast_grp *mcast_grp;
struct nix_hw *nix_hw;
int blkaddr;
int blkaddr, ret = 0;

blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;

mcast_grp = &nix_hw->mcast_grp;
mutex_lock(&mcast_grp->mcast_grp_lock);
elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
if (!elem)
return NIX_AF_ERR_INVALID_MCAST_GRP;

elem->mcam_index = mcam_index;
ret = NIX_AF_ERR_INVALID_MCAST_GRP;
else
elem->mcam_index = mcam_index;

return 0;
mutex_unlock(&mcast_grp->mcast_grp_lock);
return ret;
}

int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
Expand Down Expand Up @@ -6297,18 +6301,27 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct nix_mcast_grp_elem *elem;
struct nix_mcast_grp *mcast_grp;
int blkaddr, err, ret = 0;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
int blkaddr, err;

err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
if (err)
return err;

mcast_grp = &nix_hw->mcast_grp;

/* If AF is requesting for the deletion,
* then AF is already taking the lock
*/
if (!req->is_af)
mutex_lock(&mcast_grp->mcast_grp_lock);

elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
if (!elem)
return NIX_AF_ERR_INVALID_MCAST_GRP;
if (!elem) {
ret = NIX_AF_ERR_INVALID_MCAST_GRP;
goto unlock_grp;
}

/* If no mce entries are associated with the group
* then just remove it from the global list.
Expand All @@ -6333,19 +6346,15 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
mutex_unlock(&mcast->mce_lock);

delete_grp:
/* If AF is requesting for the deletion,
* then AF is already taking the lock
*/
if (!req->is_af)
mutex_lock(&mcast_grp->mcast_grp_lock);

list_del(&elem->list);
kfree(elem);
mcast_grp->count--;

unlock_grp:
if (!req->is_af)
mutex_unlock(&mcast_grp->mcast_grp_lock);

return 0;
return ret;
}

int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
Expand All @@ -6370,9 +6379,18 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
return err;

mcast_grp = &nix_hw->mcast_grp;

/* If AF is requesting for the updation,
* then AF is already taking the lock
*/
if (!req->is_af)
mutex_lock(&mcast_grp->mcast_grp_lock);

elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
if (!elem)
return NIX_AF_ERR_INVALID_MCAST_GRP;
if (!elem) {
ret = NIX_AF_ERR_INVALID_MCAST_GRP;
goto unlock_grp;
}

/* If any pcifunc matches the group's pcifunc, then we can
* delete the entire group.
Expand All @@ -6383,9 +6401,10 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
/* Delete group */
dreq.hdr.pcifunc = elem->pcifunc;
dreq.mcast_grp_idx = elem->mcast_grp_idx;
dreq.is_af = req->is_af;
dreq.is_af = 1;
rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
return 0;
ret = 0;
goto unlock_grp;
}
}
}
Expand All @@ -6410,7 +6429,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
elem->mcam_index, true);
ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
goto done;
goto unlock_mce;
}
}

Expand All @@ -6426,15 +6445,15 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
elem->mcam_index, true);

goto done;
goto unlock_mce;
}
} else {
if (!prev_count || prev_count < req->num_mce_entry) {
if (elem->mcam_index != -1)
npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
elem->mcam_index, true);
ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
goto done;
goto unlock_mce;
}

nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
Expand All @@ -6450,22 +6469,27 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
elem->mcam_index,
true);

goto done;
goto unlock_mce;
}
}

if (elem->mcam_index == -1) {
rsp->mce_start_index = elem->mce_start_index;
ret = 0;
goto done;
goto unlock_mce;
}

nix_mcast_update_action(rvu, elem);
npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
rsp->mce_start_index = elem->mce_start_index;
ret = 0;

done:
unlock_mce:
mutex_unlock(&mcast->mce_lock);

unlock_grp:
if (!req->is_af)
mutex_unlock(&mcast_grp->mcast_grp_lock);

return ret;
}

0 comments on commit 10b7572

Please sign in to comment.