Skip to content

Commit

Permalink
lightnvm: pblk: rework write error recovery path
Browse files Browse the repository at this point in the history
The write error recovery path is incomplete, so rework
the write error recovery handling to do resubmits directly
from the write buffer.

When a write error occurs, the remaining sectors in the chunk are
mapped out and invalidated and the request inserted in a resubmit list.

The writer thread checks if there are any requests to resubmit,
scans and invalidates any lbas that have been overwritten by later
writes and resubmits the failed entries.

Signed-off-by: Hans Holmberg <hans.holmberg@cnexlabs.com>
Reviewed-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Hans Holmberg authored and Jens Axboe committed Jun 1, 2018
1 parent 72b6cdb commit 6a3abf5
Show file tree
Hide file tree
Showing 5 changed files with 181 additions and 229 deletions.
2 changes: 2 additions & 0 deletions drivers/lightnvm/pblk-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,7 @@ static int pblk_core_init(struct pblk *pblk)
goto free_r_end_wq;

INIT_LIST_HEAD(&pblk->compl_list);
INIT_LIST_HEAD(&pblk->resubmit_list);

return 0;

Expand Down Expand Up @@ -1185,6 +1186,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->state = PBLK_STATE_RUNNING;
pblk->gc.gc_enabled = 0;

spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);

Expand Down
39 changes: 0 additions & 39 deletions drivers/lightnvm/pblk-rb.c
Original file line number Diff line number Diff line change
Expand Up @@ -502,45 +502,6 @@ int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
return 1;
}

/*
* The caller of this function must ensure that the backpointer will not
* overwrite the entries passed on the list.
*/
unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max)
{
struct pblk_rb_entry *entry, *tentry;
struct page *page;
unsigned int read = 0;
int ret;

list_for_each_entry_safe(entry, tentry, list, index) {
if (read > max) {
pr_err("pblk: too many entries on list\n");
goto out;
}

page = virt_to_page(entry->data);
if (!page) {
pr_err("pblk: could not allocate write bio page\n");
goto out;
}

ret = bio_add_page(bio, page, rb->seg_size, 0);
if (ret != rb->seg_size) {
pr_err("pblk: could not add page to write bio\n");
goto out;
}

list_del(&entry->index);
read++;
}

out:
return read;
}

/*
* Read available entries on rb and add them to the given bio. To avoid a memory
* copy, a page reference to the write buffer is used to be added to the bio.
Expand Down
91 changes: 0 additions & 91 deletions drivers/lightnvm/pblk-recovery.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,97 +16,6 @@

#include "pblk.h"

void pblk_submit_rec(struct work_struct *work)
{
struct pblk_rec_ctx *recovery =
container_of(work, struct pblk_rec_ctx, ws_rec);
struct pblk *pblk = recovery->pblk;
struct nvm_rq *rqd = recovery->rqd;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio;
unsigned int nr_rec_secs;
unsigned int pgs_read;
int ret;

nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
NVM_MAX_VLBA);

bio = bio_alloc(GFP_KERNEL, nr_rec_secs);

bio->bi_iter.bi_sector = 0;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
rqd->bio = bio;
rqd->nr_ppas = nr_rec_secs;

pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
nr_rec_secs);
if (pgs_read != nr_rec_secs) {
pr_err("pblk: could not read recovery entries\n");
goto err;
}

if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
pr_err("pblk: could not setup recovery request\n");
goto err;
}

#ifdef CONFIG_NVM_DEBUG
atomic_long_add(nr_rec_secs, &pblk->recov_writes);
#endif

ret = pblk_submit_io(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
goto err;
}

mempool_free(recovery, &pblk->rec_pool);
return;

err:
bio_put(bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
}

int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp)
{
struct nvm_rq *rec_rqd;
struct pblk_c_ctx *rec_ctx;
int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;

rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
rec_ctx = nvm_rq_to_pdu(rec_rqd);

/* Copy completion bitmap, but exclude the first X completed entries */
bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
(unsigned long int *)comp_bits,
comp, NVM_MAX_VLBA);

/* Save the context for the entries that need to be re-written and
* update current context with the completed entries.
*/
rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
if (comp >= c_ctx->nr_valid) {
rec_ctx->nr_valid = 0;
rec_ctx->nr_padded = nr_entries - comp;

c_ctx->nr_padded = comp - c_ctx->nr_valid;
} else {
rec_ctx->nr_valid = c_ctx->nr_valid - comp;
rec_ctx->nr_padded = c_ctx->nr_padded;

c_ctx->nr_valid = comp;
c_ctx->nr_padded = 0;
}

recovery->rqd = rec_rqd;
recovery->pblk = pblk;

return 0;
}

int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
Expand Down
Loading

0 comments on commit 6a3abf5

Please sign in to comment.