Skip to content

Commit

Permalink
CIFS: SMBD: Do not call ib_dereg_mr on invalidated memory registration
Browse files Browse the repository at this point in the history
It is not necessary to deregister a memory registration after it has been
successfully invalidated.

Signed-off-by: Long Li <longli@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
  • Loading branch information
Long Li authored and Steve French committed Oct 24, 2018
1 parent 6d3adb2 commit ff526d8
Showing 1 changed file with 19 additions and 19 deletions.
38 changes: 19 additions & 19 deletions fs/cifs/smbdirect.c
Original file line number Diff line number Diff line change
Expand Up @@ -2295,8 +2295,12 @@ static void smbd_mr_recovery_work(struct work_struct *work)
int rc;

list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
if (smbdirect_mr->state == MR_INVALIDATED ||
smbdirect_mr->state == MR_ERROR) {
if (smbdirect_mr->state == MR_INVALIDATED)
ib_dma_unmap_sg(
info->id->device, smbdirect_mr->sgl,
smbdirect_mr->sgl_count,
smbdirect_mr->dir);
else if (smbdirect_mr->state == MR_ERROR) {

/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
Expand All @@ -2320,25 +2324,21 @@ static void smbd_mr_recovery_work(struct work_struct *work)
smbd_disconnect_rdma_connection(info);
continue;
}
} else
/* This MR is being used, don't recover it */
continue;

if (smbdirect_mr->state == MR_INVALIDATED)
ib_dma_unmap_sg(
info->id->device, smbdirect_mr->sgl,
smbdirect_mr->sgl_count,
smbdirect_mr->dir);

smbdirect_mr->state = MR_READY;
smbdirect_mr->state = MR_READY;

/* smbdirect_mr->state is updated by this function
* and is read and updated by I/O issuing CPUs trying
* to get a MR, the call to atomic_inc_return
* implicates a memory barrier and guarantees this
* value is updated before waking up any calls to
* get_mr() from the I/O issuing CPUs
*/
if (atomic_inc_return(&info->mr_ready_count) == 1)
wake_up_interruptible(&info->wait_mr);
}
/* smbdirect_mr->state is updated by this function
* and is read and updated by I/O issuing CPUs trying
* to get a MR, the call to atomic_inc_return
* implicates a memory barrier and guarantees this
* value is updated before waking up any calls to
* get_mr() from the I/O issuing CPUs
*/
if (atomic_inc_return(&info->mr_ready_count) == 1)
wake_up_interruptible(&info->wait_mr);
}
}

Expand Down

0 comments on commit ff526d8

Please sign in to comment.