From 10b7572d17871b027de1d17152f08a2dc9c3aef6 Mon Sep 17 00:00:00 2001 From: Suman Ghosh Date: Wed, 13 Dec 2023 15:23:49 +0530 Subject: [PATCH] octeontx2-af: Fix multicast/mirror group lock/unlock issue As per the existing implementation, there exists a race between finding a multicast/mirror group entry and deleting that entry. The group lock was taken and released independently by rvu_nix_mcast_find_grp_elem() function. Which is incorrect and group lock should be taken during the entire operation of group updation/deletion. This patch fixes the same. Fixes: 51b2804c19cd ("octeontx2-af: Add new mbox to support multicast/mirror offload") Signed-off-by: Suman Ghosh Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/marvell/octeontx2/af/rvu_nix.c | 84 ++++++++++++------- 1 file changed, 54 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index b01503acd5208..72e0a7717c3eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -6142,14 +6142,12 @@ static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_g struct nix_mcast_grp_elem *iter; bool is_found = false; - mutex_lock(&mcast_grp->mcast_grp_lock); list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { if (iter->mcast_grp_idx == mcast_grp_idx) { is_found = true; break; } } - mutex_unlock(&mcast_grp->mcast_grp_lock); if (is_found) return iter; @@ -6162,7 +6160,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) struct nix_mcast_grp_elem *elem; struct nix_mcast_grp *mcast_grp; struct nix_hw *nix_hw; - int blkaddr; + int blkaddr, ret; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); nix_hw = get_nix_hw(rvu->hw, blkaddr); @@ -6170,11 +6168,15 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) return NIX_AF_ERR_INVALID_NIXBLK; mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); if (!elem) - return NIX_AF_ERR_INVALID_MCAST_GRP; + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + ret = elem->mce_start_index; - return elem->mce_start_index; + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; } void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) @@ -6238,7 +6240,7 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, struct nix_mcast_grp_elem *elem; struct nix_mcast_grp *mcast_grp; struct nix_hw *nix_hw; - int blkaddr; + int blkaddr, ret = 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); nix_hw = get_nix_hw(rvu->hw, blkaddr); @@ -6246,13 +6248,15 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, return NIX_AF_ERR_INVALID_NIXBLK; mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); if (!elem) - return NIX_AF_ERR_INVALID_MCAST_GRP; - - elem->mcam_index = mcam_index; + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + elem->mcam_index = mcam_index; - return 0; + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; } int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, @@ -6297,18 +6301,27 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct nix_mcast_grp_elem *elem; struct nix_mcast_grp *mcast_grp; + int blkaddr, err, ret = 0; struct nix_mcast *mcast; struct nix_hw *nix_hw; - int blkaddr, err; err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); if (err) return err; mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the deletion, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); - if (!elem) - return NIX_AF_ERR_INVALID_MCAST_GRP; + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } /* If no mce entries are associated with the group * then just remove it from the global list. @@ -6333,19 +6346,15 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, mutex_unlock(&mcast->mce_lock); delete_grp: - /* If AF is requesting for the deletion, - * then AF is already taking the lock - */ - if (!req->is_af) - mutex_lock(&mcast_grp->mcast_grp_lock); - list_del(&elem->list); kfree(elem); mcast_grp->count--; + +unlock_grp: if (!req->is_af) mutex_unlock(&mcast_grp->mcast_grp_lock); - return 0; + return ret; } int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, @@ -6370,9 +6379,18 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, return err; mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the updation, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); - if (!elem) - return NIX_AF_ERR_INVALID_MCAST_GRP; + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } /* If any pcifunc matches the group's pcifunc, then we can * delete the entire group. @@ -6383,9 +6401,10 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, /* Delete group */ dreq.hdr.pcifunc = elem->pcifunc; dreq.mcast_grp_idx = elem->mcast_grp_idx; - dreq.is_af = req->is_af; + dreq.is_af = 1; rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); - return 0; + ret = 0; + goto unlock_grp; } } } @@ -6410,7 +6429,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; - goto done; + goto unlock_mce; } } @@ -6426,7 +6445,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); - goto done; + goto unlock_mce; } } else { if (!prev_count || prev_count < req->num_mce_entry) { @@ -6434,7 +6453,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; - goto done; + goto unlock_mce; } nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); @@ -6450,14 +6469,14 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, elem->mcam_index, true); - goto done; + goto unlock_mce; } } if (elem->mcam_index == -1) { rsp->mce_start_index = elem->mce_start_index; ret = 0; - goto done; + goto unlock_mce; } nix_mcast_update_action(rvu, elem); @@ -6465,7 +6484,12 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, rsp->mce_start_index = elem->mce_start_index; ret = 0; -done: +unlock_mce: mutex_unlock(&mcast->mce_lock); + +unlock_grp: + if (!req->is_af) + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; }