Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 65035
b: refs/heads/master
c: 480eccf
h: refs/heads/master
i:
  65033: e4cb267
  65031: 92fc1ed
v: v3
  • Loading branch information
Lee Schermerhorn authored and Linus Torvalds committed Sep 19, 2007
1 parent 3109341 commit 4f6fd67
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 28f300d23674fa01ae747c66ce861d4ee6aebe8c
refs/heads/master: 480eccf9ae1073b87bb4fe118971fbf134a5bc61
4 changes: 2 additions & 2 deletions trunk/include/linux/mempolicy.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);

extern struct mempolicy default_policy;
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags);
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
extern unsigned slab_node(struct mempolicy *policy);

extern enum zone_type policy_zone;
Expand Down Expand Up @@ -256,7 +256,7 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
#define set_cpuset_being_rebound(x) do {} while (0)

static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags)
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
{
return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
}
Expand Down
4 changes: 3 additions & 1 deletion trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,9 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
{
int nid;
struct page *page = NULL;
struct mempolicy *mpol;
struct zonelist *zonelist = huge_zonelist(vma, address,
htlb_alloc_mask);
htlb_alloc_mask, &mpol);
struct zone **z;

for (z = zonelist->zones; *z; z++) {
Expand All @@ -87,6 +88,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
break;
}
}
mpol_free(mpol); /* unref if mpol !NULL */
return page;
}

Expand Down
79 changes: 70 additions & 9 deletions trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -1077,21 +1077,37 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,

#endif

/* Return effective policy for a VMA */
/*
* get_vma_policy(@task, @vma, @addr)
* @task - task for fallback if vma policy == default
* @vma - virtual memory area whose policy is sought
* @addr - address in @vma for shared policy lookup
*
* Returns effective policy for a VMA at specified address.
* Falls back to @task or system default policy, as necessary.
* Returned policy has extra reference count if shared, vma,
* or some other task's policy [show_numa_maps() can pass
* @task != current]. It is the caller's responsibility to
* free the reference in these cases.
*/
static struct mempolicy * get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
int shared_pol = 0;

if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy)
if (vma->vm_ops && vma->vm_ops->get_policy) {
pol = vma->vm_ops->get_policy(vma, addr);
else if (vma->vm_policy &&
shared_pol = 1; /* if pol non-NULL, add ref below */
} else if (vma->vm_policy &&
vma->vm_policy->policy != MPOL_DEFAULT)
pol = vma->vm_policy;
}
if (!pol)
pol = &default_policy;
else if (!shared_pol && pol != current->mempolicy)
mpol_get(pol); /* vma or other task's policy */
return pol;
}

Expand Down Expand Up @@ -1207,19 +1223,45 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
}

#ifdef CONFIG_HUGETLBFS
/* Return a zonelist suitable for a huge page allocation. */
/*
* huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
* @vma = virtual memory area whose policy is sought
* @addr = address in @vma for shared policy lookup and interleave policy
* @gfp_flags = for requested zone
* @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
*
* Returns a zonelist suitable for a huge page allocation.
* If the effective policy is 'BIND, returns pointer to policy's zonelist.
* If it is also a policy for which get_vma_policy() returns an extra
* reference, we must hold that reference until after allocation.
* In that case, return policy via @mpol so hugetlb allocation can drop
* the reference. For non-'BIND referenced policies, we can/do drop the
* reference here, so the caller doesn't need to know about the special case
* for default and current task policy.
*/
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
gfp_t gfp_flags)
gfp_t gfp_flags, struct mempolicy **mpol)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
struct zonelist *zl;

*mpol = NULL; /* probably no unref needed */
if (pol->policy == MPOL_INTERLEAVE) {
unsigned nid;

nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
__mpol_free(pol); /* finished with pol */
return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
}
return zonelist_policy(GFP_HIGHUSER, pol);

zl = zonelist_policy(GFP_HIGHUSER, pol);
if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
if (pol->policy != MPOL_BIND)
__mpol_free(pol); /* finished with pol */
else
*mpol = pol; /* unref needed after allocation */
}
return zl;
}
#endif

Expand Down Expand Up @@ -1264,6 +1306,7 @@ struct page *
alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
struct zonelist *zl;

cpuset_update_task_memory_state();

Expand All @@ -1273,7 +1316,19 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
return alloc_page_interleave(gfp, 0, nid);
}
return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
zl = zonelist_policy(gfp, pol);
if (pol != &default_policy && pol != current->mempolicy) {
/*
* slow path: ref counted policy -- shared or vma
*/
struct page *page = __alloc_pages(gfp, 0, zl);
__mpol_free(pol);
return page;
}
/*
* fast path: default or task policy
*/
return __alloc_pages(gfp, 0, zl);
}

/**
Expand Down Expand Up @@ -1872,6 +1927,7 @@ int show_numa_map(struct seq_file *m, void *v)
struct numa_maps *md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
struct mempolicy *pol;
int n;
char buffer[50];

Expand All @@ -1882,8 +1938,13 @@ int show_numa_map(struct seq_file *m, void *v)
if (!md)
return 0;

mpol_to_str(buffer, sizeof(buffer),
get_vma_policy(priv->task, vma, vma->vm_start));
pol = get_vma_policy(priv->task, vma, vma->vm_start);
mpol_to_str(buffer, sizeof(buffer), pol);
/*
* unref shared or other task's mempolicy
*/
if (pol != &default_policy && pol != current->mempolicy)
__mpol_free(pol);

seq_printf(m, "%08lx %s", vma->vm_start, buffer);

Expand Down

0 comments on commit 4f6fd67

Please sign in to comment.