Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 294605
b: refs/heads/master
c: 09a423a
h: refs/heads/master
i:
  294603: 127d427
v: v3
  • Loading branch information
Christoph Hellwig authored and Ben Myers committed Feb 23, 2012
1 parent 0cbf10a commit cc9e623
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 81 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 70b5437653d9c6c8de287affd38836cce98ebde5
refs/heads/master: 09a423a3d6c70905f1090f01aadb8e6abff527ce
74 changes: 31 additions & 43 deletions trunk/fs/xfs/xfs_log.c
Original file line number Diff line number Diff line change
Expand Up @@ -760,37 +760,35 @@ xfs_log_item_init(
INIT_LIST_HEAD(&item->li_cil);
}

/*
* Wake up processes waiting for log space after we have moved the log tail.
*
* If opportunistic is set wake up one waiter even if we do not have enough
* free space by our strict accounting.
*/
void
xfs_log_move_tail(xfs_mount_t *mp,
xfs_lsn_t tail_lsn)
xfs_log_space_wake(
struct xfs_mount *mp,
bool opportunistic)
{
xlog_ticket_t *tic;
xlog_t *log = mp->m_log;
int need_bytes, free_bytes;
struct xlog_ticket *tic;
struct log *log = mp->m_log;
int need_bytes, free_bytes;

if (XLOG_FORCED_SHUTDOWN(log))
return;

if (tail_lsn == 0)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);

/* tail_lsn == 1 implies that we weren't passed a valid value. */
if (tail_lsn != 1)
atomic64_set(&log->l_tail_lsn, tail_lsn);

if (!list_empty_careful(&log->l_writeq)) {
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
panic("Recovery problem");
#endif
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));

spin_lock(&log->l_grant_write_lock);
free_bytes = xlog_space_left(log, &log->l_grant_write_head);
list_for_each_entry(tic, &log->l_writeq, t_queue) {
ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);

if (free_bytes < tic->t_unit_res && tail_lsn != 1)
if (free_bytes < tic->t_unit_res && !opportunistic)
break;
tail_lsn = 0;
opportunistic = false;
free_bytes -= tic->t_unit_res;
trace_xfs_log_regrant_write_wake_up(log, tic);
wake_up(&tic->t_wait);
Expand All @@ -799,20 +797,18 @@ xfs_log_move_tail(xfs_mount_t *mp,
}

if (!list_empty_careful(&log->l_reserveq)) {
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
panic("Recovery problem");
#endif
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));

spin_lock(&log->l_grant_reserve_lock);
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
list_for_each_entry(tic, &log->l_reserveq, t_queue) {
if (tic->t_flags & XLOG_TIC_PERM_RESERV)
need_bytes = tic->t_unit_res*tic->t_cnt;
else
need_bytes = tic->t_unit_res;
if (free_bytes < need_bytes && tail_lsn != 1)
if (free_bytes < need_bytes && !opportunistic)
break;
tail_lsn = 0;
opportunistic = false;
free_bytes -= need_bytes;
trace_xfs_log_grant_wake_up(log, tic);
wake_up(&tic->t_wait);
Expand Down Expand Up @@ -867,21 +863,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
return needed;
}

/******************************************************************************
*
* local routines
*
******************************************************************************
*/

/* xfs_trans_tail_ail returns 0 when there is nothing in the list.
* The log manager must keep track of the last LR which was committed
* to disk. The lsn of this LR will become the new tail_lsn whenever
* xfs_trans_tail_ail returns 0. If we don't do this, we run into
* the situation where stuff could be written into the log but nothing
* was ever in the AIL when asked. Eventually, we panic since the
* tail hits the head.
*
/*
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
Expand All @@ -891,10 +873,17 @@ xlog_assign_tail_lsn(
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;

/*
* To make sure we always have a valid LSN for the log tail we keep
* track of the last LSN which was committed in log->l_last_sync_lsn,
* and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
*
* If the AIL has been emptied we also need to wake any process
* waiting for this condition.
*/
tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);

atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
}
Expand Down Expand Up @@ -2759,9 +2748,8 @@ xlog_ungrant_log_space(xlog_t *log,

trace_xfs_log_ungrant_exit(log, ticket);

xfs_log_move_tail(log->l_mp, 1);
} /* xlog_ungrant_log_space */

xfs_log_space_wake(log->l_mp, true);
}

/*
* Flush iclog to disk if this is the last reference to the given iclog and
Expand Down
5 changes: 3 additions & 2 deletions trunk/fs/xfs/xfs_log.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,9 @@ int xfs_log_mount(struct xfs_mount *mp,
xfs_daddr_t start_block,
int num_bblocks);
int xfs_log_mount_finish(struct xfs_mount *mp);
void xfs_log_move_tail(struct xfs_mount *mp,
xfs_lsn_t tail_lsn);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp,
bool opportunistic);
int xfs_log_notify(struct xfs_mount *mp,
struct xlog_in_core *iclog,
xfs_log_callback_t *callback_entry);
Expand Down
1 change: 0 additions & 1 deletion trunk/fs/xfs/xfs_log_priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,6 @@ typedef struct log {
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)

/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
extern int xlog_recover(xlog_t *log);
extern int xlog_recover_finish(xlog_t *log);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
Expand Down
45 changes: 11 additions & 34 deletions trunk/fs/xfs/xfs_trans_ail.c
Original file line number Diff line number Diff line change
Expand Up @@ -643,15 +643,15 @@ xfs_trans_unlocked_item(
* at the tail, it doesn't matter what result we get back. This
* is slightly racy because since we were just unlocked, we could
* go to sleep between the call to xfs_ail_min and the call to
* xfs_log_move_tail, have someone else lock us, commit to us disk,
* xfs_log_space_wake, have someone else lock us, commit to us disk,
* move us out of the tail of the AIL, and then we wake up. However,
* the call to xfs_log_move_tail() doesn't do anything if there's
* the call to xfs_log_space_wake() doesn't do anything if there's
* not enough free space to wake people up so we're safe calling it.
*/
min_lip = xfs_ail_min(ailp);

if (min_lip == lip)
xfs_log_move_tail(ailp->xa_mount, 1);
xfs_log_space_wake(ailp->xa_mount, true);
} /* xfs_trans_unlocked_item */

/*
Expand Down Expand Up @@ -685,7 +685,6 @@ xfs_trans_ail_update_bulk(
xfs_lsn_t lsn) __releases(ailp->xa_lock)
{
xfs_log_item_t *mlip;
xfs_lsn_t tail_lsn;
int mlip_changed = 0;
int i;
LIST_HEAD(tmp);
Expand All @@ -712,22 +711,12 @@ xfs_trans_ail_update_bulk(

if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
spin_unlock(&ailp->xa_lock);

if (!mlip_changed) {
spin_unlock(&ailp->xa_lock);
return;
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
xlog_assign_tail_lsn(ailp->xa_mount);
xfs_log_space_wake(ailp->xa_mount, false);
}

/*
* It is not safe to access mlip after the AIL lock is dropped, so we
* must get a copy of li_lsn before we do so. This is especially
* important on 32-bit platforms where accessing and updating 64-bit
* values like li_lsn is not atomic.
*/
mlip = xfs_ail_min(ailp);
tail_lsn = mlip->li_lsn;
spin_unlock(&ailp->xa_lock);
xfs_log_move_tail(ailp->xa_mount, tail_lsn);
}

/*
Expand Down Expand Up @@ -758,7 +747,6 @@ xfs_trans_ail_delete_bulk(
int nr_items) __releases(ailp->xa_lock)
{
xfs_log_item_t *mlip;
xfs_lsn_t tail_lsn;
int mlip_changed = 0;
int i;

Expand All @@ -785,23 +773,12 @@ xfs_trans_ail_delete_bulk(
if (mlip == lip)
mlip_changed = 1;
}
spin_unlock(&ailp->xa_lock);

if (!mlip_changed) {
spin_unlock(&ailp->xa_lock);
return;
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
xlog_assign_tail_lsn(ailp->xa_mount);
xfs_log_space_wake(ailp->xa_mount, false);
}

/*
* It is not safe to access mlip after the AIL lock is dropped, so we
* must get a copy of li_lsn before we do so. This is especially
* important on 32-bit platforms where accessing and updating 64-bit
* values like li_lsn is not atomic. It is possible we've emptied the
* AIL here, so if that is the case, pass an LSN of 0 to the tail move.
*/
mlip = xfs_ail_min(ailp);
tail_lsn = mlip ? mlip->li_lsn : 0;
spin_unlock(&ailp->xa_lock);
xfs_log_move_tail(ailp->xa_mount, tail_lsn);
}

/*
Expand Down

0 comments on commit cc9e623

Please sign in to comment.