Skip to content

Commit

Permalink
UBI: fix atomic LEB change problems
Browse files Browse the repository at this point in the history
When the UBI device is nearly full, i.e. all LEBs are mapped, we have
only one spare LEB left - the one we reserved for WL purposes. Well,
I do not count the LEBs which were reserved for bad PEB handling -
suppose NOR flash for simplicity. If an "atomic LEB change operation"
is run, and the WL unit is moving a LEB, we have no spare LEBs to
finish the operation and fail, which is not good. Moreover, if there
are 2 or more simultanious "atomic LEB change" requests, only one of
them has chances to succeed, the other will fail with -ENOSPC. Not
good either.

This patch does 2 things:
1. Reserves one PEB for the "atomic LEB change" operation.
2. Serealize the operations so that only on of them may run
   at a time (by means of a mutex).

Pointed-to-by: Brijesh Singh <brijesh.s.singh@gmail.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
  • Loading branch information
Artem Bityutskiy authored and Artem Bityutskiy committed Oct 14, 2007
1 parent 6986646 commit e8823bd
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 21 deletions.
48 changes: 29 additions & 19 deletions drivers/mtd/ubi/eba.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@
#include <linux/err.h>
#include "ubi.h"

/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1

/**
* struct ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes
Expand Down Expand Up @@ -827,6 +830,9 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
* data, which has to be aligned. This function guarantees that in case of an
* unclean reboot the old contents is preserved. Returns zero in case of
* success and a negative error code in case of failure.
*
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype)
Expand All @@ -843,11 +849,10 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
if (!vid_hdr)
return -ENOMEM;

mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
if (err)
goto out_mutex;

vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
Expand All @@ -864,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return pnum;
err = pnum;
goto out_leb_unlock;
}

dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
Expand All @@ -888,17 +892,18 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,

if (vol->eba_tbl[lnum] >= 0) {
err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
if (err)
goto out_leb_unlock;
}

vol->eba_tbl[lnum] = pnum;

out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
return err;

write_error:
if (err != -EIO || !ubi->bad_allowed) {
Expand All @@ -908,17 +913,13 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
* mode just in case.
*/
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
goto out_leb_unlock;
}

err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
goto out_leb_unlock;
}

vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
Expand Down Expand Up @@ -1122,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
dbg_eba("initialize EBA unit");

spin_lock_init(&ubi->ltree_lock);
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;

if (ubi_devices_cnt == 0) {
Expand Down Expand Up @@ -1183,6 +1185,14 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}

if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;

dbg_eba("EBA unit is initialized");
return 0;

Expand Down
6 changes: 4 additions & 2 deletions drivers/mtd/ubi/ubi.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,14 +221,15 @@ struct ubi_wl_entry;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
* @vtbl_mutex: protects on-flash volume table
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
*
* global_sqnum: global sequence number
* @global_sqnum: global sequence number
* @ltree_lock: protects the lock tree and @global_sqnum
* @ltree: the lock tree
* @vtbl_mutex: protects on-flash volume table
* @alc_mutex: serializes "atomic LEB change" operations
*
* @used: RB-tree of used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
Expand Down Expand Up @@ -308,6 +309,7 @@ struct ubi_device {
unsigned long long global_sqnum;
spinlock_t ltree_lock;
struct rb_root ltree;
struct mutex alc_mutex;

/* Wear-leveling unit's stuff */
struct rb_root used;
Expand Down

0 comments on commit e8823bd

Please sign in to comment.