Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 130701
b: refs/heads/master
c: fb8ec18
h: refs/heads/master
i:
  130699: 2e42b8f
v: v3
  • Loading branch information
Jens Axboe committed Feb 2, 2009
1 parent 63ebc0a commit 8235d91
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 48 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3fff0179e33cd7d0a688dab65700c46ad089e934
refs/heads/master: fb8ec18c316d869271137c97320dbfd2def56569
6 changes: 3 additions & 3 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
int rw = rq_data_dir(rq);
int cpu;

if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue))
return;

cpu = part_stat_lock();
Expand Down Expand Up @@ -1667,7 +1667,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
struct gendisk *disk = req->rq_disk;

if (!disk || !blk_queue_io_stat(disk->queue))
if (!disk || !blk_do_io_stat(disk->queue))
return;

if (blk_fs_request(req)) {
Expand All @@ -1686,7 +1686,7 @@ static void blk_account_io_done(struct request *req)
{
struct gendisk *disk = req->rq_disk;

if (!disk || !blk_queue_io_stat(disk->queue))
if (!disk || !blk_do_io_stat(disk->queue))
return;

/*
Expand Down
8 changes: 8 additions & 0 deletions trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,4 +108,12 @@ static inline int blk_cpu_to_group(int cpu)
#endif
}

static inline int blk_do_io_stat(struct request_queue *q)
{
if (q)
return blk_queue_io_stat(q);

return 0;
}

#endif
2 changes: 1 addition & 1 deletion trunk/drivers/virtio/virtio_pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);

if (drv && drv->config_changed)
if (drv->config_changed)
drv->config_changed(&vp_dev->vdev);
}

Expand Down
25 changes: 9 additions & 16 deletions trunk/include/linux/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,11 @@ void *__symbol_get_gpl(const char *symbol);

#endif

struct module_ref
{
local_t count;
} ____cacheline_aligned;

enum module_state
{
MODULE_STATE_LIVE,
Expand Down Expand Up @@ -339,11 +344,8 @@ struct module
/* Destruction function. */
void (*exit)(void);

#ifdef CONFIG_SMP
char *refptr;
#else
local_t ref;
#endif
/* Reference counts */
struct module_ref ref[NR_CPUS];
#endif
};
#ifndef MODULE_ARCH_INIT
Expand Down Expand Up @@ -393,22 +395,13 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);

static inline local_t *__module_ref_addr(struct module *mod, int cpu)
{
#ifdef CONFIG_SMP
return (local_t *) (mod->refptr + per_cpu_offset(cpu));
#else
return &mod->ref;
#endif
}

/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
BUG_ON(module_refcount(module) == 0);
local_inc(__module_ref_addr(module, get_cpu()));
local_inc(&module->ref[get_cpu()].count);
put_cpu();
}
}
Expand All @@ -420,7 +413,7 @@ static inline int try_module_get(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
if (likely(module_is_live(module)))
local_inc(__module_ref_addr(module, cpu));
local_inc(&module->ref[cpu].count);
else
ret = 0;
put_cpu();
Expand Down
35 changes: 10 additions & 25 deletions trunk/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
/* Init the unload section of the module. */
static void module_unload_init(struct module *mod)
{
int cpu;
unsigned int i;

INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu)
local_set(__module_ref_addr(mod, cpu), 0);
for (i = 0; i < NR_CPUS; i++)
local_set(&mod->ref[i].count, 0);
/* Hold reference count during initialization. */
local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
local_set(&mod->ref[raw_smp_processor_id()].count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
Expand Down Expand Up @@ -717,11 +717,10 @@ static int try_stop_module(struct module *mod, int flags, int *forced)

unsigned int module_refcount(struct module *mod)
{
unsigned int total = 0;
int cpu;
unsigned int i, total = 0;

for_each_possible_cpu(cpu)
total += local_read(__module_ref_addr(mod, cpu));
for (i = 0; i < NR_CPUS; i++)
total += local_read(&mod->ref[i].count);
return total;
}
EXPORT_SYMBOL(module_refcount);
Expand Down Expand Up @@ -895,7 +894,7 @@ void module_put(struct module *module)
{
if (module) {
unsigned int cpu = get_cpu();
local_dec(__module_ref_addr(module, cpu));
local_dec(&module->ref[cpu].count);
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
Expand Down Expand Up @@ -1465,10 +1464,7 @@ static void free_module(struct module *mod)
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
if (mod->refptr)
percpu_modfree(mod->refptr);
#endif

/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);

Expand Down Expand Up @@ -2015,22 +2011,14 @@ static noinline struct module *load_module(void __user *umod,
if (err < 0)
goto free_mod;

#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
mod->name);
if (!mod->refptr) {
err = -ENOMEM;
goto free_mod;
}
#endif
if (pcpuindex) {
/* We have a special allocation for this section. */
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
sechdrs[pcpuindex].sh_addralign,
mod->name);
if (!percpu) {
err = -ENOMEM;
goto free_percpu;
goto free_mod;
}
sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
mod->percpu = percpu;
Expand Down Expand Up @@ -2294,9 +2282,6 @@ static noinline struct module *load_module(void __user *umod,
free_percpu:
if (percpu)
percpu_modfree(percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
percpu_modfree(mod->refptr);
#endif
free_mod:
kfree(args);
free_hdr:
Expand Down
47 changes: 45 additions & 2 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,14 @@ static inline int __mlock_posix_error_return(long retval)
*
* return number of pages [> 0] to be removed from locked_vm on success
* of "special" vmas.
*
* return negative error if vma spanning @start-@range disappears while
* mmap semaphore is dropped. Unlikely?
*/
long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int nr_pages = (end - start) / PAGE_SIZE;
BUG_ON(!(vma->vm_flags & VM_LOCKED));

Expand All @@ -310,8 +314,20 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
long error;
downgrade_write(&mm->mmap_sem);

error = __mlock_vma_pages_range(vma, start, end, 1);

return __mlock_vma_pages_range(vma, start, end, 1);
up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
vma = find_vma(mm, start);
/* non-NULL vma must contain @start, but need to check @end */
if (!vma || end > vma->vm_end)
return -ENOMEM;

return 0; /* hide other errors from mmap(), et al */
}

/*
Expand Down Expand Up @@ -422,14 +438,41 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
vma->vm_flags = newflags;

if (lock) {
/*
* mmap_sem is currently held for write. Downgrade the write
* lock to a read lock so that other faults, mmap scans, ...
* while we fault in all pages.
*/
downgrade_write(&mm->mmap_sem);

ret = __mlock_vma_pages_range(vma, start, end, 1);

if (ret > 0) {
/*
* Need to reacquire mmap sem in write mode, as our callers
* expect this. We have no support for atomically upgrading
* a sem to write, so we need to check for ranges while sem
* is unlocked.
*/
up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
*prev = find_vma(mm, start);
/* non-NULL *prev must contain @start, but need to check @end */
if (!(*prev) || end > (*prev)->vm_end)
ret = -ENOMEM;
else if (ret > 0) {
mm->locked_vm -= ret;
ret = 0;
} else
ret = __mlock_posix_error_return(ret); /* translate if needed */
} else {
/*
* TODO: for unlocking, pages will already be resident, so
* we don't need to wait for allocations/reclaim/pagein, ...
* However, unlocking a very large region can still take a
* while. Should we downgrade the semaphore for both lock
* AND unlock ?
*/
__mlock_vma_pages_range(vma, start, end, 0);
}

Expand Down

0 comments on commit 8235d91

Please sign in to comment.