Skip to content

Commit

Permalink
scsi: elx: efct: Hardware queues processing
Browse files Browse the repository at this point in the history
Add driver definitions for:

 - Routines for EQ, CQ, WQ and RQ processing.

 - Routines for I/O object pool allocation and deallocation.

Link: https://lore.kernel.org/r/20210601235512.20104-23-jsmart2021@gmail.com
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Co-developed-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
  • Loading branch information
James Smart authored and Martin K. Petersen committed Jun 16, 2021
1 parent 63de513 commit e2cf422
Show file tree
Hide file tree
Showing 4 changed files with 764 additions and 0 deletions.
360 changes: 360 additions & 0 deletions drivers/scsi/elx/efct/efct_hw.c
Original file line number Diff line number Diff line change
Expand Up @@ -2131,3 +2131,363 @@ efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)

return wqcb;
}

int
efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
{
int index = -1;
int i = id & (EFCT_HW_Q_HASH_SIZE - 1);

/*
* Since the hash is always bigger than the maximum number of Qs, then
* we never have to worry about an infinite loop. We will always find
* an unused entry.
*/
do {
if (hash[i].in_use && hash[i].id == id)
index = hash[i].index;
else
i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
} while (index == -1 && hash[i].in_use);

return index;
}

int
efct_hw_process(struct efct_hw *hw, u32 vector,
u32 max_isr_time_msec)
{
struct hw_eq *eq;

/*
* The caller should disable interrupts if they wish to prevent us
* from processing during a shutdown. The following states are defined:
* EFCT_HW_STATE_UNINITIALIZED - No queues allocated
* EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
* queues are cleared.
* EFCT_HW_STATE_ACTIVE - Chip and queues are operational
* EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
* EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
* completions.
*/
if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
return 0;

/* Get pointer to struct hw_eq */
eq = hw->hw_eq[vector];
if (!eq)
return 0;

eq->use_count++;

return efct_hw_eq_process(hw, eq, max_isr_time_msec);
}

int
efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
u32 max_isr_time_msec)
{
u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
u32 tcheck_count;
u64 tstart;
u64 telapsed;
bool done = false;

tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
tstart = jiffies_to_msecs(jiffies);

while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
u16 cq_id = 0;
int rc;

rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
if (unlikely(rc)) {
if (rc == SLI4_EQE_STATUS_EQ_FULL) {
u32 i;

/*
* Received a sentinel EQE indicating the
* EQ is full. Process all CQs
*/
for (i = 0; i < hw->cq_count; i++)
efct_hw_cq_process(hw, hw->hw_cq[i]);
continue;
} else {
return rc;
}
} else {
int index;

index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);

if (likely(index >= 0))
efct_hw_cq_process(hw, hw->hw_cq[index]);
else
efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
}

if (eq->queue->n_posted > eq->queue->posted_limit)
sli_queue_arm(&hw->sli, eq->queue, false);

if (tcheck_count && (--tcheck_count == 0)) {
tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
telapsed = jiffies_to_msecs(jiffies) - tstart;
if (telapsed >= max_isr_time_msec)
done = true;
}
}
sli_queue_eq_arm(&hw->sli, eq->queue, true);

return 0;
}

static int
_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
{
int queue_rc;

/* Every so often, set the wqec bit to generate comsummed completions */
if (wq->wqec_count)
wq->wqec_count--;

if (wq->wqec_count == 0) {
struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;

genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
wq->wqec_count = wq->wqec_set_count;
}

/* Decrement WQ free count */
wq->free_count--;

queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);

return (queue_rc < 0) ? -EIO : 0;
}

static void
hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
{
struct efct_hw_wqe *wqe;
unsigned long flags = 0;

spin_lock_irqsave(&wq->queue->lock, flags);

/* Update free count with value passed in */
wq->free_count += update_free_count;

while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
wqe = list_first_entry(&wq->pending_list,
struct efct_hw_wqe, list_entry);
list_del_init(&wqe->list_entry);
_efct_hw_wq_write(wq, wqe);

if (wqe->abort_wqe_submit_needed) {
wqe->abort_wqe_submit_needed = false;
efct_hw_fill_abort_wqe(wq->hw, wqe);
INIT_LIST_HEAD(&wqe->list_entry);
list_add_tail(&wqe->list_entry, &wq->pending_list);
wq->wq_pending_count++;
}
}

spin_unlock_irqrestore(&wq->queue->lock, flags);
}

void
efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
{
u8 cqe[sizeof(struct sli4_mcqe)];
u16 rid = U16_MAX;
/* completion type */
enum sli4_qentry ctype;
u32 n_processed = 0;
u32 tstart, telapsed;

tstart = jiffies_to_msecs(jiffies);

while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
int status;

status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
/*
* The sign of status is significant. If status is:
* == 0 : call completed correctly and
* the CQE indicated success
* > 0 : call completed correctly and
* the CQE indicated an error
* < 0 : call failed and no information is available about the
* CQE
*/
if (status < 0) {
if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
/*
* Notification that an entry was consumed,
* but not completed
*/
continue;

break;
}

switch (ctype) {
case SLI4_QENTRY_ASYNC:
sli_cqe_async(&hw->sli, cqe);
break;
case SLI4_QENTRY_MQ:
/*
* Process MQ entry. Note there is no way to determine
* the MQ_ID from the completion entry.
*/
efct_hw_mq_process(hw, status, hw->mq);
break;
case SLI4_QENTRY_WQ:
efct_hw_wq_process(hw, cq, cqe, status, rid);
break;
case SLI4_QENTRY_WQ_RELEASE: {
u32 wq_id = rid;
int index;
struct hw_wq *wq = NULL;

index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);

if (likely(index >= 0)) {
wq = hw->hw_wq[index];
} else {
efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
break;
}
/* Submit any HW IOs that are on the WQ pending list */
hw_wq_submit_pending(wq, wq->wqec_set_count);

break;
}

case SLI4_QENTRY_RQ:
efct_hw_rqpair_process_rq(hw, cq, cqe);
break;
case SLI4_QENTRY_XABT: {
efct_hw_xabt_process(hw, cq, cqe, rid);
break;
}
default:
efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
ctype, rid);
break;
}

n_processed++;
if (n_processed == cq->queue->proc_limit)
break;

if (cq->queue->n_posted >= cq->queue->posted_limit)
sli_queue_arm(&hw->sli, cq->queue, false);
}

sli_queue_arm(&hw->sli, cq->queue, true);

if (n_processed > cq->queue->max_num_processed)
cq->queue->max_num_processed = n_processed;
telapsed = jiffies_to_msecs(jiffies) - tstart;
if (telapsed > cq->queue->max_process_time)
cq->queue->max_process_time = telapsed;
}

void
efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
u8 *cqe, int status, u16 rid)
{
struct hw_wq_callback *wqcb;

if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
if (status)
efc_log_err(hw->os, "reque xri failed, status = %d\n",
status);
return;
}

wqcb = efct_hw_reqtag_get_instance(hw, rid);
if (!wqcb) {
efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
return;
}

if (!wqcb->callback) {
efc_log_err(hw->os, "wqcb callback is NULL\n");
return;
}

(*wqcb->callback)(wqcb->arg, cqe, status);
}

void
efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
u8 *cqe, u16 rid)
{
/* search IOs wait free list */
struct efct_hw_io *io = NULL;
unsigned long flags = 0;

io = efct_hw_io_lookup(hw, rid);
if (!io) {
/* IO lookup failure should never happen */
efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
return;
}

if (!io->xbusy)
efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
else
/* mark IO as no longer busy */
io->xbusy = false;

/*
* For IOs that were aborted internally, we need to issue any pending
* callback here.
*/
if (io->done) {
efct_hw_done_t done = io->done;
void *arg = io->arg;

/*
* Use latched status as this is always saved for an internal
* abort
*/
int status = io->saved_status;
u32 len = io->saved_len;
u32 ext = io->saved_ext;

io->done = NULL;
io->status_saved = false;

done(io, len, status, ext, arg);
}

spin_lock_irqsave(&hw->io_lock, flags);
if (io->state == EFCT_HW_IO_STATE_INUSE ||
io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
/* if on wait_free list, caller has already freed IO;
* remove from wait_free list and add to free list.
* if on in-use list, already marked as no longer busy;
* just leave there and wait for caller to free.
*/
if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
io->state = EFCT_HW_IO_STATE_FREE;
list_del_init(&io->list_entry);
efct_hw_io_free_move_correct_list(hw, io);
}
}
spin_unlock_irqrestore(&hw->io_lock, flags);
}

static int
efct_hw_flush(struct efct_hw *hw)
{
u32 i = 0;

/* Process any remaining completions */
for (i = 0; i < hw->eq_count; i++)
efct_hw_process(hw, i, ~0);

return 0;
}
Loading

0 comments on commit e2cf422

Please sign in to comment.