Skip to content

Commit

Permalink
[PATCH] Remove old node based policy interface from mempolicy.c
Browse files Browse the repository at this point in the history
mempolicy.c contains provisional interface for huge page allocation based on
node numbers.  This is in use in SLES9 but was never used (AFAIK) in upstream
versions of Linux.

Huge page allocations now use zonelists to figure out where to allocate pages.
 The use of zonelists allows us to find the closest hugepage which was the
consideration of the NUMA distance for huge page allocations.

Remove the obsolete functions.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Acked-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jan 6, 2006
1 parent 5da7ca8 commit 21abb14
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 67 deletions.
19 changes: 0 additions & 19 deletions include/linux/mempolicy.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)

#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)

/*
* Hugetlb policy. i386 hugetlb so far works with node numbers
* instead of zone lists, so give it special interfaces for now.
*/
extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
unsigned long addr);

/*
* Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas
Expand Down Expand Up @@ -184,17 +176,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
return NULL;
}

static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
{
return numa_node_id();
}

static inline int
mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
{
return 1;
}

struct shared_policy {};

static inline int mpol_set_shared_policy(struct shared_policy *info,
Expand Down
48 changes: 0 additions & 48 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -960,54 +960,6 @@ void __mpol_free(struct mempolicy *p)
kmem_cache_free(policy_cache, p);
}

/*
* Hugetlb policy. Same as above, just works with node numbers instead of
* zonelists.
*/

/* Find first node suitable for an allocation */
int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);

switch (pol->policy) {
case MPOL_DEFAULT:
return numa_node_id();
case MPOL_BIND:
return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
case MPOL_INTERLEAVE:
return interleave_nodes(pol);
case MPOL_PREFERRED:
return pol->v.preferred_node >= 0 ?
pol->v.preferred_node : numa_node_id();
}
BUG();
return 0;
}

/* Find secondary valid nodes for an allocation */
int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);

switch (pol->policy) {
case MPOL_PREFERRED:
case MPOL_DEFAULT:
case MPOL_INTERLEAVE:
return 1;
case MPOL_BIND: {
struct zone **z;
for (z = pol->v.zonelist->zones; *z; z++)
if ((*z)->zone_pgdat->node_id == nid)
return 1;
return 0;
}
default:
BUG();
return 0;
}
}

/*
* Shared memory backing store policy support.
*
Expand Down

0 comments on commit 21abb14

Please sign in to comment.