Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 130706
b: refs/heads/master
c: 86adf8a
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Feb 3, 2009
1 parent c0d2ae6 commit 7593107
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cbb5901b904e122139e97c6f4caed9b1f13c3455
refs/heads/master: 86adf8adfcb3d3f4b6c30aeb40da480da02de1d1
2 changes: 1 addition & 1 deletion trunk/drivers/virtio/virtio_pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);

if (drv->config_changed)
if (drv && drv->config_changed)
drv->config_changed(&vp_dev->vdev);
}

Expand Down
25 changes: 16 additions & 9 deletions trunk/include/linux/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol);

#endif

struct module_ref
{
local_t count;
} ____cacheline_aligned;

enum module_state
{
MODULE_STATE_LIVE,
Expand Down Expand Up @@ -344,8 +339,11 @@ struct module
/* Destruction function. */
void (*exit)(void);

/* Reference counts */
struct module_ref ref[NR_CPUS];
#ifdef CONFIG_SMP
char *refptr;
#else
local_t ref;
#endif
#endif
};
#ifndef MODULE_ARCH_INIT
Expand Down Expand Up @@ -395,13 +393,22 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);

static inline local_t *__module_ref_addr(struct module *mod, int cpu)
{
#ifdef CONFIG_SMP
return (local_t *) (mod->refptr + per_cpu_offset(cpu));
#else
return &mod->ref;
#endif
}

/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
BUG_ON(module_refcount(module) == 0);
local_inc(&module->ref[get_cpu()].count);
local_inc(__module_ref_addr(module, get_cpu()));
put_cpu();
}
}
Expand All @@ -413,7 +420,7 @@ static inline int try_module_get(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
if (likely(module_is_live(module)))
local_inc(&module->ref[cpu].count);
local_inc(__module_ref_addr(module, cpu));
else
ret = 0;
put_cpu();
Expand Down
35 changes: 25 additions & 10 deletions trunk/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
/* Init the unload section of the module. */
static void module_unload_init(struct module *mod)
{
unsigned int i;
int cpu;

INIT_LIST_HEAD(&mod->modules_which_use_me);
for (i = 0; i < NR_CPUS; i++)
local_set(&mod->ref[i].count, 0);
for_each_possible_cpu(cpu)
local_set(__module_ref_addr(mod, cpu), 0);
/* Hold reference count during initialization. */
local_set(&mod->ref[raw_smp_processor_id()].count, 1);
local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
Expand Down Expand Up @@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced)

unsigned int module_refcount(struct module *mod)
{
unsigned int i, total = 0;
unsigned int total = 0;
int cpu;

for (i = 0; i < NR_CPUS; i++)
total += local_read(&mod->ref[i].count);
for_each_possible_cpu(cpu)
total += local_read(__module_ref_addr(mod, cpu));
return total;
}
EXPORT_SYMBOL(module_refcount);
Expand Down Expand Up @@ -894,7 +895,7 @@ void module_put(struct module *module)
{
if (module) {
unsigned int cpu = get_cpu();
local_dec(&module->ref[cpu].count);
local_dec(__module_ref_addr(module, cpu));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
Expand Down Expand Up @@ -1464,7 +1465,10 @@ static void free_module(struct module *mod)
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);

#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
if (mod->refptr)
percpu_modfree(mod->refptr);
#endif
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);

Expand Down Expand Up @@ -2011,14 +2015,22 @@ static noinline struct module *load_module(void __user *umod,
if (err < 0)
goto free_mod;

#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
mod->name);
if (!mod->refptr) {
err = -ENOMEM;
goto free_mod;
}
#endif
if (pcpuindex) {
/* We have a special allocation for this section. */
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
sechdrs[pcpuindex].sh_addralign,
mod->name);
if (!percpu) {
err = -ENOMEM;
goto free_mod;
goto free_percpu;
}
sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
mod->percpu = percpu;
Expand Down Expand Up @@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod,
free_percpu:
if (percpu)
percpu_modfree(percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
percpu_modfree(mod->refptr);
#endif
free_mod:
kfree(args);
free_hdr:
Expand Down
47 changes: 2 additions & 45 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,14 +294,10 @@ static inline int __mlock_posix_error_return(long retval)
*
* return number of pages [> 0] to be removed from locked_vm on success
* of "special" vmas.
*
* return negative error if vma spanning @start-@range disappears while
* mmap semaphore is dropped. Unlikely?
*/
long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int nr_pages = (end - start) / PAGE_SIZE;
BUG_ON(!(vma->vm_flags & VM_LOCKED));

Expand All @@ -314,20 +310,8 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
long error;
downgrade_write(&mm->mmap_sem);

error = __mlock_vma_pages_range(vma, start, end, 1);

up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
vma = find_vma(mm, start);
/* non-NULL vma must contain @start, but need to check @end */
if (!vma || end > vma->vm_end)
return -ENOMEM;

return 0; /* hide other errors from mmap(), et al */
return __mlock_vma_pages_range(vma, start, end, 1);
}

/*
Expand Down Expand Up @@ -438,41 +422,14 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
vma->vm_flags = newflags;

if (lock) {
/*
* mmap_sem is currently held for write. Downgrade the write
* lock to a read lock so that other faults, mmap scans, ...
* while we fault in all pages.
*/
downgrade_write(&mm->mmap_sem);

ret = __mlock_vma_pages_range(vma, start, end, 1);

/*
* Need to reacquire mmap sem in write mode, as our callers
* expect this. We have no support for atomically upgrading
* a sem to write, so we need to check for ranges while sem
* is unlocked.
*/
up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
*prev = find_vma(mm, start);
/* non-NULL *prev must contain @start, but need to check @end */
if (!(*prev) || end > (*prev)->vm_end)
ret = -ENOMEM;
else if (ret > 0) {
if (ret > 0) {
mm->locked_vm -= ret;
ret = 0;
} else
ret = __mlock_posix_error_return(ret); /* translate if needed */
} else {
/*
* TODO: for unlocking, pages will already be resident, so
* we don't need to wait for allocations/reclaim/pagein, ...
* However, unlocking a very large region can still take a
* while. Should we downgrade the semaphore for both lock
* AND unlock ?
*/
__mlock_vma_pages_range(vma, start, end, 0);
}

Expand Down

0 comments on commit 7593107

Please sign in to comment.