Skip to content

Commit

Permalink
skd: Inline skd_process_request()
Browse files Browse the repository at this point in the history
This patch does not change any functionality but makes the skd
driver code more similar to that of other blk-mq kernel drivers.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Bart Van Assche authored and Jens Axboe committed Aug 23, 2017
1 parent 49f16e2 commit c39c6c7
Showing 1 changed file with 10 additions and 20 deletions.
30 changes: 10 additions & 20 deletions drivers/block/skd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,8 +478,10 @@ static bool skd_fail_all(struct request_queue *q)
}
}

static void skd_process_request(struct request *req, bool last)
static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *mqd)
{
struct request *const req = mqd->rq;
struct request_queue *const q = req->q;
struct skd_device *skdev = q->queuedata;
struct skd_fitmsg_context *skmsg;
Expand All @@ -492,6 +494,11 @@ static void skd_process_request(struct request *req, bool last)
const u32 count = blk_rq_sectors(req);
const int data_dir = rq_data_dir(req);

if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;

blk_mq_start_request(req);

WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
tag, skd_max_queue_depth, q->nr_requests);

Expand All @@ -514,7 +521,7 @@ static void skd_process_request(struct request *req, bool last)
dev_dbg(&skdev->pdev->dev, "error Out\n");
skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
BLK_STS_RESOURCE);
return;
return BLK_STS_OK;
}

dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
Expand Down Expand Up @@ -578,30 +585,13 @@ static void skd_process_request(struct request *req, bool last)
if (skd_max_req_per_msg == 1) {
skd_send_fitmsg(skdev, skmsg);
} else {
if (last ||
if (mqd->last ||
fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg);
skdev->skmsg = NULL;
}
spin_unlock_irqrestore(&skdev->lock, flags);
}
}

static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *mqd)
{
struct request *req = mqd->rq;
struct request_queue *q = req->q;
struct skd_device *skdev = q->queuedata;

if (skdev->state == SKD_DRVR_STATE_ONLINE) {
blk_mq_start_request(req);
skd_process_request(req, mqd->last);

return BLK_STS_OK;
} else {
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
}

return BLK_STS_OK;
}
Expand Down

0 comments on commit c39c6c7

Please sign in to comment.