Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 94034
b: refs/heads/master
c: 45c4745
h: refs/heads/master
v: v3
  • Loading branch information
Lee Schermerhorn authored and Linus Torvalds committed Apr 28, 2008
1 parent f7a011a commit 8afa2d1
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ae4d8c16aa22775f5731677abb8a82f03cec877e
refs/heads/master: 45c4745af381851b0406d8e4db99e62e265691c2
4 changes: 0 additions & 4 deletions trunk/Documentation/vm/numa_memory_policy.txt
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,6 @@ Components of Memory Policies
structure, struct mempolicy. Details of this structure will be discussed
in context, below, as required to explain the behavior.

Note: in some functions AND in the struct mempolicy itself, the mode
is called "policy". However, to avoid confusion with the policy tuple,
this document will continue to use the term "mode".

Linux memory policy supports the following 4 behavioral modes:

Default Mode--MPOL_DEFAULT: The behavior specified by this mode is
Expand Down
6 changes: 3 additions & 3 deletions trunk/include/linux/mempolicy.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ struct mm_struct;
*/
struct mempolicy {
atomic_t refcnt;
unsigned short policy; /* See MPOL_* above */
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
short preferred_node; /* preferred */
Expand Down Expand Up @@ -149,7 +149,7 @@ struct shared_policy {
spinlock_t lock;
};

void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
void mpol_shared_policy_init(struct shared_policy *info, unsigned short mode,
unsigned short flags, nodemask_t *nodes);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
Expand Down Expand Up @@ -213,7 +213,7 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
}

static inline void mpol_shared_policy_init(struct shared_policy *info,
unsigned short policy, unsigned short flags, nodemask_t *nodes)
unsigned short mode, unsigned short flags, nodemask_t *nodes)
{
}

Expand Down
46 changes: 23 additions & 23 deletions trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ enum zone_type policy_zone = 0;

struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
.policy = MPOL_DEFAULT,
.mode = MPOL_DEFAULT,
};

static const struct mempolicy_operations {
Expand Down Expand Up @@ -211,7 +211,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
if (!policy)
return ERR_PTR(-ENOMEM);
atomic_set(&policy->refcnt, 1);
policy->policy = mode;
policy->mode = mode;
policy->flags = flags;

if (nodes) {
Expand Down Expand Up @@ -302,7 +302,7 @@ static void mpol_rebind_policy(struct mempolicy *pol,
if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
mpol_ops[pol->policy].rebind(pol, newmask);
mpol_ops[pol->mode].rebind(pol, newmask);
}

/*
Expand Down Expand Up @@ -608,7 +608,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
mpol_put(current->mempolicy);
current->mempolicy = new;
mpol_set_task_struct_flag();
if (new && new->policy == MPOL_INTERLEAVE &&
if (new && new->mode == MPOL_INTERLEAVE &&
nodes_weight(new->v.nodes))
current->il_next = first_node(new->v.nodes);
if (mm)
Expand All @@ -621,7 +621,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
{
nodes_clear(*nodes);
switch (p->policy) {
switch (p->mode) {
case MPOL_DEFAULT:
break;
case MPOL_BIND:
Expand Down Expand Up @@ -700,14 +700,14 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->policy == MPOL_INTERLEAVE) {
pol->mode == MPOL_INTERLEAVE) {
*policy = current->il_next;
} else {
err = -EINVAL;
goto out;
}
} else
*policy = pol->policy | pol->flags;
*policy = pol->mode | pol->flags;

if (vma) {
up_read(&current->mm->mmap_sem);
Expand Down Expand Up @@ -1276,7 +1276,7 @@ static struct mempolicy *get_vma_policy(struct task_struct *task,
pol = vpol;
shared_pol = 1; /* if pol non-NULL, add ref below */
} else if (vma->vm_policy &&
vma->vm_policy->policy != MPOL_DEFAULT)
vma->vm_policy->mode != MPOL_DEFAULT)
pol = vma->vm_policy;
}
if (!pol)
Expand All @@ -1290,7 +1290,7 @@ static struct mempolicy *get_vma_policy(struct task_struct *task,
static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
{
/* Lower zones don't get a nodemask applied for MPOL_BIND */
if (unlikely(policy->policy == MPOL_BIND) &&
if (unlikely(policy->mode == MPOL_BIND) &&
gfp_zone(gfp) >= policy_zone &&
cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
return &policy->v.nodes;
Expand All @@ -1303,7 +1303,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
{
int nd;

switch (policy->policy) {
switch (policy->mode) {
case MPOL_PREFERRED:
nd = policy->v.preferred_node;
if (nd < 0)
Expand Down Expand Up @@ -1353,7 +1353,7 @@ static unsigned interleave_nodes(struct mempolicy *policy)
*/
unsigned slab_node(struct mempolicy *policy)
{
unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
unsigned short pol = policy ? policy->mode : MPOL_DEFAULT;

switch (pol) {
case MPOL_INTERLEAVE:
Expand Down Expand Up @@ -1454,9 +1454,9 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,

*mpol = NULL; /* probably no unref needed */
*nodemask = NULL; /* assume !MPOL_BIND */
if (pol->policy == MPOL_BIND) {
if (pol->mode == MPOL_BIND) {
*nodemask = &pol->v.nodes;
} else if (pol->policy == MPOL_INTERLEAVE) {
} else if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;

nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
Expand All @@ -1468,7 +1468,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,

zl = zonelist_policy(GFP_HIGHUSER, pol);
if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
if (pol->policy != MPOL_BIND)
if (pol->mode != MPOL_BIND)
__mpol_put(pol); /* finished with pol */
else
*mpol = pol; /* unref needed after allocation */
Expand Down Expand Up @@ -1522,7 +1522,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)

cpuset_update_task_memory_state();

if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;

nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Expand Down Expand Up @@ -1574,7 +1574,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
cpuset_update_task_memory_state();
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE)
if (pol->mode == MPOL_INTERLEAVE)
return alloc_page_interleave(gfp, order, interleave_nodes(pol));
return __alloc_pages_nodemask(gfp, order,
zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
Expand Down Expand Up @@ -1620,11 +1620,11 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
return 0;
if (a->policy != b->policy)
if (a->mode != b->mode)
return 0;
if (a->policy != MPOL_DEFAULT && !mpol_match_intent(a, b))
if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
return 0;
switch (a->policy) {
switch (a->mode) {
case MPOL_DEFAULT:
return 1;
case MPOL_BIND:
Expand All @@ -1644,7 +1644,7 @@ void __mpol_put(struct mempolicy *p)
{
if (!atomic_dec_and_test(&p->refcnt))
return;
p->policy = MPOL_DEFAULT;
p->mode = MPOL_DEFAULT;
kmem_cache_free(policy_cache, p);
}

Expand Down Expand Up @@ -1710,7 +1710,7 @@ static void sp_insert(struct shared_policy *sp, struct sp_node *new)
rb_link_node(&new->nd, parent, p);
rb_insert_color(&new->nd, &sp->root);
pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
new->policy ? new->policy->policy : 0);
new->policy ? new->policy->mode : 0);
}

/* Find shared policy intersecting idx */
Expand Down Expand Up @@ -1835,7 +1835,7 @@ int mpol_set_shared_policy(struct shared_policy *info,

pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
vma->vm_pgoff,
sz, npol ? npol->policy : -1,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
npol ? nodes_addr(npol->v.nodes)[0] : -1);

Expand Down Expand Up @@ -1935,7 +1935,7 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
char *p = buffer;
int l;
nodemask_t nodes;
unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
unsigned short mode = pol ? pol->mode : MPOL_DEFAULT;
unsigned short flags = pol ? pol->flags : 0;

switch (mode) {
Expand Down

0 comments on commit 8afa2d1

Please sign in to comment.