Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/mason/linux-btrfs into for-linus
  • Loading branch information
Li Zefan committed Jan 11, 2012
2 parents 396e6e4 + 08c422c commit d25223a
Show file tree
Hide file tree
Showing 34 changed files with 6,339 additions and 2,011 deletions.
4 changes: 2 additions & 2 deletions Documentation/filesystems/btrfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ IRC network.
Userspace tools for creating and manipulating Btrfs file systems are
available from the git repository at the following location:

http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git

These include the following tools:

Expand Down
3 changes: 2 additions & 1 deletion fs/btrfs/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o

btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
17 changes: 7 additions & 10 deletions fs/btrfs/acl.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,22 +59,19 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
if (!value)
return ERR_PTR(-ENOMEM);
size = __btrfs_getxattr(inode, name, value, size);
if (size > 0) {
acl = posix_acl_from_xattr(value, size);
if (IS_ERR(acl)) {
kfree(value);
return acl;
}
set_cached_acl(inode, type, acl);
}
kfree(value);
}
if (size > 0) {
acl = posix_acl_from_xattr(value, size);
} else if (size == -ENOENT || size == -ENODATA || size == 0) {
/* FIXME, who returns -ENOENT? I think nobody */
acl = NULL;
set_cached_acl(inode, type, acl);
} else {
acl = ERR_PTR(-EIO);
}
kfree(value);

if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);

return acl;
}
Expand Down
120 changes: 57 additions & 63 deletions fs/btrfs/async-thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ struct btrfs_worker_thread {
int idle;
};

static int __btrfs_start_workers(struct btrfs_workers *workers);

/*
* btrfs_start_workers uses kthread_run, which can block waiting for memory
* for a very long time. It will actually throttle on page writeback,
Expand All @@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
{
struct worker_start *start;
start = container_of(work, struct worker_start, work);
btrfs_start_workers(start->queue, 1);
__btrfs_start_workers(start->queue);
kfree(start);
}

static int start_new_worker(struct btrfs_workers *queue)
{
struct worker_start *start;
int ret;

start = kzalloc(sizeof(*start), GFP_NOFS);
if (!start)
return -ENOMEM;

start->work.func = start_new_worker_func;
start->queue = queue;
ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
if (ret)
kfree(start);
return ret;
}

/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
Expand Down Expand Up @@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
struct btrfs_workers *workers = worker->workers;
struct worker_start *start;
unsigned long flags;

rmb();
if (!workers->atomic_start_pending)
return;

start = kzalloc(sizeof(*start), GFP_NOFS);
if (!start)
return;

start->work.func = start_new_worker_func;
start->queue = workers;

spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
Expand All @@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)

workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
start_new_worker(workers);
btrfs_queue_worker(workers->atomic_worker_start, &start->work);
return;

out:
kfree(start);
spin_unlock_irqrestore(&workers->lock, flags);
}

Expand Down Expand Up @@ -331,7 +325,7 @@ static int worker_loop(void *arg)
run_ordered_completions(worker->workers, work);

check_pending_worker_creates(worker);

cond_resched();
}

spin_lock_irq(&worker->lock);
Expand Down Expand Up @@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
static int __btrfs_start_workers(struct btrfs_workers *workers,
int num_workers)
static int __btrfs_start_workers(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
int i;

for (i = 0; i < num_workers; i++) {
worker = kzalloc(sizeof(*worker), GFP_NOFS);
if (!worker) {
ret = -ENOMEM;
goto fail;
}
worker = kzalloc(sizeof(*worker), GFP_NOFS);
if (!worker) {
ret = -ENOMEM;
goto fail;
}

INIT_LIST_HEAD(&worker->pending);
INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);

atomic_set(&worker->num_pending, 0);
atomic_set(&worker->refs, 1);
worker->workers = workers;
worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
if (IS_ERR(worker->task)) {
ret = PTR_ERR(worker->task);
kfree(worker);
goto fail;
}
spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
workers->num_workers_starting--;
WARN_ON(workers->num_workers_starting < 0);
spin_unlock_irq(&workers->lock);
INIT_LIST_HEAD(&worker->pending);
INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);

atomic_set(&worker->num_pending, 0);
atomic_set(&worker->refs, 1);
worker->workers = workers;
worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name,
workers->num_workers + 1);
if (IS_ERR(worker->task)) {
ret = PTR_ERR(worker->task);
kfree(worker);
goto fail;
}
spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
workers->num_workers_starting--;
WARN_ON(workers->num_workers_starting < 0);
spin_unlock_irq(&workers->lock);

return 0;
fail:
btrfs_stop_workers(workers);
spin_lock_irq(&workers->lock);
workers->num_workers_starting--;
spin_unlock_irq(&workers->lock);
return ret;
}

int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
int btrfs_start_workers(struct btrfs_workers *workers)
{
spin_lock_irq(&workers->lock);
workers->num_workers_starting += num_workers;
workers->num_workers_starting++;
spin_unlock_irq(&workers->lock);
return __btrfs_start_workers(workers, num_workers);
return __btrfs_start_workers(workers);
}

/*
Expand Down Expand Up @@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
struct btrfs_worker_thread *worker;
unsigned long flags;
struct list_head *fallback;
int ret;

again:
spin_lock_irqsave(&workers->lock, flags);
again:
worker = next_worker(workers);

if (!worker) {
Expand All @@ -584,7 +578,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
__btrfs_start_workers(workers, 1);
ret = __btrfs_start_workers(workers);
spin_lock_irqsave(&workers->lock, flags);
if (ret)
goto fallback;
goto again;
}
}
Expand Down Expand Up @@ -665,15 +662,15 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
int wake = 0;

/* don't requeue something already on a list */
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
goto out;
return;

worker = find_worker(workers);
if (workers->ordered) {
Expand Down Expand Up @@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);

out:
return 0;
}
4 changes: 2 additions & 2 deletions fs/btrfs/async-thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ struct btrfs_workers {
char *name;
};

int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
Expand Down
Loading

0 comments on commit d25223a

Please sign in to comment.