Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 156423
b: refs/heads/master
c: 4bfc449
h: refs/heads/master
i:
  156421: 7a96a17
  156419: a762dcb
  156415: 3881ff7
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Aug 7, 2009
1 parent a41ad7e commit 4b60220
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 93274e4d4e9416ad1fa47e2f26011e2c483fe5fe
refs/heads/master: 4bfc44958e499af9a73f62201543b3a1f617cfeb
28 changes: 28 additions & 0 deletions trunk/include/linux/nodemask.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,12 @@
* to generate slightly worse code. So use a simple one-line #define
* for node_isset(), instead of wrapping an inline inside a macro, the
* way we do the other calls.
*
* NODEMASK_SCRATCH
* When doing above logical AND, OR, XOR, Remap operations the callers tend to
* need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
* nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
* for such situations. See below and CPUMASK_ALLOC also.
*/

#include <linux/kernel.h>
Expand Down Expand Up @@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)

/*
* For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
*/

#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
#define NODEMASK_FREE(m) kfree(m)
#else
#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
#define NODEMASK_FREE(m)
#endif

/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
};

#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)


#endif /* __LINUX_NODEMASK_H */
84 changes: 58 additions & 26 deletions trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,34 +191,38 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
* Must be called holding task's alloc_lock to protect task's mems_allowed
* and mempolicy. May also be called holding the mmap_semaphore for write.
*/
static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_set_nodemask(struct mempolicy *pol,
const nodemask_t *nodes, struct nodemask_scratch *nsc)
{
nodemask_t cpuset_context_nmask;
int ret;

/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
if (pol == NULL)
return 0;
/* Check N_HIGH_MEMORY */
nodes_and(nsc->mask1,
cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);

VM_BUG_ON(!nodes);
if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
nodes = NULL; /* explicit local allocation */
else {
if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&cpuset_context_nmask, nodes,
&cpuset_current_mems_allowed);
mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
else
nodes_and(cpuset_context_nmask, *nodes,
cpuset_current_mems_allowed);
nodes_and(nsc->mask2, *nodes, nsc->mask1);

if (mpol_store_user_nodemask(pol))
pol->w.user_nodemask = *nodes;
else
pol->w.cpuset_mems_allowed =
cpuset_current_mems_allowed;
}

ret = mpol_ops[pol->mode].create(pol,
nodes ? &cpuset_context_nmask : NULL);
if (nodes)
ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
else
ret = mpol_ops[pol->mode].create(pol, NULL);
return ret;
}

Expand Down Expand Up @@ -620,12 +624,17 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
{
struct mempolicy *new, *old;
struct mm_struct *mm = current->mm;
NODEMASK_SCRATCH(scratch);
int ret;

new = mpol_new(mode, flags, nodes);
if (IS_ERR(new))
return PTR_ERR(new);
if (!scratch)
return -ENOMEM;

new = mpol_new(mode, flags, nodes);
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto out;
}
/*
* prevent changing our mempolicy while show_numa_maps()
* is using it.
Expand All @@ -635,13 +644,13 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
if (mm)
down_write(&mm->mmap_sem);
task_lock(current);
ret = mpol_set_nodemask(new, nodes);
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
task_unlock(current);
if (mm)
up_write(&mm->mmap_sem);
mpol_put(new);
return ret;
goto out;
}
old = current->mempolicy;
current->mempolicy = new;
Expand All @@ -654,7 +663,10 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
up_write(&mm->mmap_sem);

mpol_put(old);
return 0;
ret = 0;
out:
NODEMASK_SCRATCH_FREE(scratch);
return ret;
}

/*
Expand Down Expand Up @@ -1014,12 +1026,20 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err)
return err;
}
down_write(&mm->mmap_sem);
task_lock(current);
err = mpol_set_nodemask(new, nmask);
task_unlock(current);
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
down_write(&mm->mmap_sem);
task_lock(current);
err = mpol_set_nodemask(new, nmask, scratch);
task_unlock(current);
if (err)
up_write(&mm->mmap_sem);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
}
if (err) {
up_write(&mm->mmap_sem);
mpol_put(new);
return err;
}
Expand Down Expand Up @@ -1891,6 +1911,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
* Install non-NULL @mpol in inode's shared policy rb-tree.
* On entry, the current task has a reference on a non-NULL @mpol.
* This must be released on exit.
* This is called at get_inode() calls and we can use GFP_KERNEL.
*/
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
{
Expand All @@ -1902,19 +1923,24 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
if (mpol) {
struct vm_area_struct pvma;
struct mempolicy *new;
NODEMASK_SCRATCH(scratch);

if (!scratch)
return;
/* contextualize the tmpfs mount point mempolicy */
new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
if (IS_ERR(new)) {
mpol_put(mpol); /* drop our ref on sb mpol */
NODEMASK_SCRATCH_FREE(scratch);
return; /* no valid nodemask intersection */
}

task_lock(current);
ret = mpol_set_nodemask(new, &mpol->w.user_nodemask);
ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
task_unlock(current);
mpol_put(mpol); /* drop our ref on sb mpol */
if (ret) {
NODEMASK_SCRATCH_FREE(scratch);
mpol_put(new);
return;
}
Expand All @@ -1924,6 +1950,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
pvma.vm_end = TASK_SIZE; /* policy covers entire file */
mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
mpol_put(new); /* drop initial ref */
NODEMASK_SCRATCH_FREE(scratch);
}
}

Expand Down Expand Up @@ -2140,13 +2167,18 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
err = 1;
else {
int ret;

task_lock(current);
ret = mpol_set_nodemask(new, &nodes);
task_unlock(current);
if (ret)
NODEMASK_SCRATCH(scratch);
if (scratch) {
task_lock(current);
ret = mpol_set_nodemask(new, &nodes, scratch);
task_unlock(current);
} else
ret = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
if (ret) {
err = 1;
else if (no_context) {
mpol_put(new);
} else if (no_context) {
/* save for contextualization */
new->w.user_nodemask = nodes;
}
Expand Down

0 comments on commit 4b60220

Please sign in to comment.