Skip to content

Commit

Permalink
NTFS: - Split ntfs_map_runlist() into ntfs_map_runlist() and a non-lo…
Browse files Browse the repository at this point in the history
…cking

	helper ntfs_map_runlist_nolock() which is used by ntfs_map_runlist().
	This allows us to map runlist fragments with the runlist lock already
	held without having to drop and reacquire it around the call.  Adapt
	all callers.
      - Change ntfs_find_vcn() to ntfs_find_vcn_nolock() which takes a locked
	runlist.  This allows us to find runlist elements with the runlist
	lock already held without having to drop and reacquire it around the
	call.  Adapt all callers.

Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
  • Loading branch information
Anton Altaparmakov committed May 5, 2005
1 parent 1a0df15 commit b6ad6c5
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 113 deletions.
9 changes: 9 additions & 0 deletions fs/ntfs/ChangeLog
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,15 @@ ToDo/Notes:
- Fix a bug in fs/ntfs/runlist.c::ntfs_mapping_pairs_decompress() in
the creation of the unmapped runlist element for the base attribute
extent.
- Split ntfs_map_runlist() into ntfs_map_runlist() and a non-locking
helper ntfs_map_runlist_nolock() which is used by ntfs_map_runlist().
This allows us to map runlist fragments with the runlist lock already
held without having to drop and reacquire it around the call. Adapt
all callers.
- Change ntfs_find_vcn() to ntfs_find_vcn_nolock() which takes a locked
runlist. This allows us to find runlist elements with the runlist
lock already held without having to drop and reacquire it around the
call. Adapt all callers.

2.1.22 - Many bug and race fixes and error handling improvements.

Expand Down
34 changes: 10 additions & 24 deletions fs/ntfs/aops.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2001-2005 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
Expand Down Expand Up @@ -135,7 +135,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
i * rec_size), rec_size);
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(!PageError(page) && page_uptodate))
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
}
unlock_page(page);
Expand Down Expand Up @@ -347,11 +347,11 @@ static int ntfs_read_block(struct page *page)
*/
static int ntfs_readpage(struct file *file, struct page *page)
{
loff_t i_size;
ntfs_inode *ni, *base_ni;
u8 *kaddr;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
unsigned long flags;
u32 attr_len;
int err = 0;

Expand Down Expand Up @@ -389,9 +389,9 @@ static int ntfs_readpage(struct file *file, struct page *page)
* Attribute is resident, implying it is not compressed or encrypted.
* This also means the attribute is smaller than an mft record and
* hence smaller than a page, so can simply zero out any pages with
* index above 0. We can also do this if the file size is 0.
* index above 0.
*/
if (unlikely(page->index > 0 || !i_size_read(VFS_I(ni)))) {
if (unlikely(page->index > 0)) {
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
Expand All @@ -418,9 +418,10 @@ static int ntfs_readpage(struct file *file, struct page *page)
if (unlikely(err))
goto put_unm_err_out;
attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
i_size = i_size_read(VFS_I(ni));
if (unlikely(attr_len > i_size))
attr_len = i_size;
read_lock_irqsave(&ni->size_lock, flags);
if (unlikely(attr_len > ni->initialized_size))
attr_len = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
kaddr = kmap_atomic(page, KM_USER0);
/* Copy the data to the page. */
memcpy(kaddr, (u8*)ctx->attr +
Expand Down Expand Up @@ -1247,20 +1248,6 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
int err;

BUG_ON(!PageLocked(page));
/*
* If a previous ntfs_truncate() failed, repeat it and abort if it
* fails again.
*/
if (unlikely(NInoTruncateFailed(ni))) {
down_write(&vi->i_alloc_sem);
err = ntfs_truncate(vi);
up_write(&vi->i_alloc_sem);
if (err || NInoTruncateFailed(ni)) {
if (!err)
err = -EIO;
goto err_out;
}
}
i_size = i_size_read(vi);
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
Expand Down Expand Up @@ -1490,13 +1477,12 @@ static int ntfs_prepare_nonresident_write(struct page *page,

read_lock_irqsave(&ni->size_lock, flags);
/*
* The first out of bounds block for the allocated size. No need to
* The first out of bounds block for the allocated size. No need to
* round up as allocated_size is in multiples of cluster size and the
* minimum cluster size is 512 bytes, which is equal to the smallest
* blocksize.
*/
ablock = ni->allocated_size >> blocksize_bits;

i_size = i_size_read(vi);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
Expand Down
107 changes: 62 additions & 45 deletions fs/ntfs/attrib.c
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2001-2005 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
Expand Down Expand Up @@ -30,32 +30,31 @@
#include "types.h"

/**
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
* ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* Return 0 on success and -errno on error.
*
* Locking: - The runlist must be unlocked on entry and is unlocked on return.
* - This function takes the lock for writing and modifies the runlist.
* Locking: - The runlist must be locked for writing.
* - This function modifies the runlist.
*/
int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn)
{
ntfs_inode *base_ni;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
ntfs_attr_search_ctx *ctx;
runlist_element *rl;
int err = 0;

ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
(unsigned long long)vcn);

if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;

mrec = map_mft_record(base_ni);
if (IS_ERR(mrec))
return PTR_ERR(mrec);
Expand All @@ -66,43 +65,60 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err))
goto put_err_out;

down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
LCN_RL_NOT_MAPPED)) {
runlist_element *rl;

if (likely(!err)) {
rl = ntfs_mapping_pairs_decompress(ni->vol, ctx->attr,
ni->runlist.rl);
if (IS_ERR(rl))
err = PTR_ERR(rl);
else
ni->runlist.rl = rl;
}
up_write(&ni->runlist.lock);

put_err_out:
ntfs_attr_put_search_ctx(ctx);
err_out:
unmap_mft_record(base_ni);
return err;
}

/**
* ntfs_find_vcn - find a vcn in the runlist described by an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
* @need_write: if false, lock for reading and if true, lock for writing
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* Return 0 on success and -errno on error.
*
* Locking: - The runlist must be unlocked on entry and is unlocked on return.
* - This function takes the runlist lock for writing and modifies the
* runlist.
*/
int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
{
int err = 0;

down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
LCN_RL_NOT_MAPPED))
err = ntfs_map_runlist_nolock(ni, vcn);
up_write(&ni->runlist.lock);
return err;
}

/**
* ntfs_find_vcn_nolock - find a vcn in the runlist described by an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
* @write_locked: true if the runlist is locked for writing
*
* Find the virtual cluster number @vcn in the runlist described by the ntfs
* inode @ni and return the address of the runlist element containing the @vcn.
* The runlist is left locked and the caller has to unlock it. If @need_write
* is true, the runlist is locked for writing and if @need_write is false, the
* runlist is locked for reading. In the error case, the runlist is not left
* locked.
* The runlist is left locked and the caller has to unlock it. In the error
* case, the runlist is left in the same locking state as on entry.
*
* Note if @write_locked is FALSE the lock may be dropped inside the function
* so you cannot rely on the runlist still being the same when this function
* returns.
*
* Note you need to distinguish between the lcn of the returned runlist element
* being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
Expand All @@ -124,28 +140,24 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
* true, it is locked for writing. Otherwise is is locked for
* reading.
*/
runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
const BOOL need_write)
runlist_element *ntfs_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked)
{
runlist_element *rl;
int err = 0;
BOOL is_retry = FALSE;

ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, lock for %sing.",
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn,
!need_write ? "read" : "writ");
write_locked ? "write" : "read");
BUG_ON(!ni);
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
lock_retry_remap:
if (!need_write)
down_read(&ni->runlist.lock);
else
down_write(&ni->runlist.lock);
retry_remap:
rl = ni->runlist.rl;
if (likely(rl && vcn >= rl[0].vcn)) {
while (likely(rl->length)) {
if (likely(vcn < rl[1].vcn)) {
if (unlikely(vcn < rl[1].vcn)) {
if (likely(rl->lcn >= LCN_HOLE)) {
ntfs_debug("Done.");
return rl;
Expand All @@ -161,19 +173,23 @@ runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
err = -EIO;
}
}
if (!need_write)
up_read(&ni->runlist.lock);
else
up_write(&ni->runlist.lock);
if (!err && !is_retry) {
/*
* The @vcn is in an unmapped region, map the runlist and
* retry.
*/
err = ntfs_map_runlist(ni, vcn);
if (!write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
}
err = ntfs_map_runlist_nolock(ni, vcn);
if (!write_locked) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
}
if (likely(!err)) {
is_retry = TRUE;
goto lock_retry_remap;
goto retry_remap;
}
/*
* -EINVAL and -ENOENT coming from a failed mapping attempt are
Expand All @@ -184,7 +200,8 @@ runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
err = -EIO;
} else if (!err)
err = -EIO;
ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
if (err != -ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
return ERR_PTR(err);
}

Expand Down
5 changes: 3 additions & 2 deletions fs/ntfs/attrib.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,11 @@ typedef struct {
ATTR_RECORD *base_attr;
} ntfs_attr_search_ctx;

extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn);
extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);

extern runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
const BOOL need_write);
extern runlist_element *ntfs_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked);

int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
Expand Down
Loading

0 comments on commit b6ad6c5

Please sign in to comment.