Skip to content

Commit

Permalink
powerpc: Use RCU based pte freeing mechanism for all powerpc
Browse files Browse the repository at this point in the history
Refactor the RCU based pte free code that was used on ppc64 to be used
on all powerpc.

Additionally refactor pte_free() & pte_free_kernel() into common code
between ppc32 & ppc64.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Kumar Gala authored and Paul Mackerras committed Dec 3, 2008
1 parent df3b861 commit 0186f47
Show file tree
Hide file tree
Showing 8 changed files with 167 additions and 175 deletions.
11 changes: 8 additions & 3 deletions arch/powerpc/include/asm/pgalloc-32.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

#include <linux/threads.h>

#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */

extern void __bad_pte(pmd_t *pmd);

extern pgd_t *pgd_alloc(struct mm_struct *mm);
Expand Down Expand Up @@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);

extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
extern void pte_free(struct mm_struct *mm, pgtable_t pte);

#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
static inline void pgtable_free(pgtable_free_t pgf)
{
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);

free_page((unsigned long)p);
}

#define check_pgt_cache() do { } while (0)

Expand Down
34 changes: 0 additions & 34 deletions arch/powerpc/include/asm/pgalloc-64.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
* 2 of the License, or (at your option) any later version.
*/

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
Expand Down Expand Up @@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page;
}

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}

static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}

#define PGF_CACHENUM_MASK 0x7

typedef struct pgtable_free {
unsigned long val;
} pgtable_free_t;

static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
unsigned long mask)
{
BUG_ON(cachenum > PGF_CACHENUM_MASK);

return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
}

static inline void pgtable_free(pgtable_free_t pgf)
{
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
Expand All @@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p);
}

extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);

#define __pte_free_tlb(tlb,ptepage) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
Expand Down
41 changes: 41 additions & 0 deletions arch/powerpc/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,52 @@
#define _ASM_POWERPC_PGALLOC_H
#ifdef __KERNEL__

#include <linux/mm.h>

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}

static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}

typedef struct pgtable_free {
unsigned long val;
} pgtable_free_t;

#define PGF_CACHENUM_MASK 0x7

static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
unsigned long mask)
{
BUG_ON(cachenum > PGF_CACHENUM_MASK);

return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
}

#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif

extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);

#ifdef CONFIG_SMP
#define __pte_free_tlb(tlb,ptepage) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#else
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#endif


#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
2 changes: 1 addition & 1 deletion arch/powerpc/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif

obj-y := fault.o mem.o \
obj-y := fault.o mem.o pgtable.o \
init_$(CONFIG_WORD_SIZE).o \
pgtable_$(CONFIG_WORD_SIZE).o \
mmu_context_$(CONFIG_WORD_SIZE).o
Expand Down
30 changes: 0 additions & 30 deletions arch/powerpc/mm/hash_low_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -35,36 +35,6 @@ mmu_hash_lock:
.space 4
#endif /* CONFIG_SMP */

/*
* Sync CPUs with hash_page taking & releasing the hash
* table lock
*/
#ifdef CONFIG_SMP
.text
_GLOBAL(hash_page_sync)
mfmsr r10
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
lis r8,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r8
bne- 10b
isync
eieio
li r0,0
stw r0,0(r8)
mtmsr r10
blr
#endif /* CONFIG_SMP */

/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
Expand Down
117 changes: 117 additions & 0 deletions arch/powerpc/mm/pgtable.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
/*
* This file contains common routines for dealing with free of page tables
*
* Derived from arch/powerpc/mm/tlb_64.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>

static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static unsigned long pte_freelist_forced_free;

struct pte_freelist_batch
{
struct rcu_head rcu;
unsigned int index;
pgtable_free_t tables[0];
};

#define PTE_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
/ sizeof(pgtable_free_t))

static void pte_free_smp_sync(void *arg)
{
/* Do nothing, just ensure we sync with all CPUs */
}

/* This is only called when we are critically out of memory
* (and fail to get a page in pte_free_tlb).
*/
static void pgtable_free_now(pgtable_free_t pgf)
{
pte_freelist_forced_free++;

smp_call_function(pte_free_smp_sync, NULL, 1);

pgtable_free(pgf);
}

static void pte_free_rcu_callback(struct rcu_head *head)
{
struct pte_freelist_batch *batch =
container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;

for (i = 0; i < batch->index; i++)
pgtable_free(batch->tables[i]);

free_page((unsigned long)batch);
}

static void pte_free_submit(struct pte_freelist_batch *batch)
{
INIT_RCU_HEAD(&batch->rcu);
call_rcu(&batch->rcu, pte_free_rcu_callback);
}

void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
pgtable_free(pgf);
return;
}

if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
pgtable_free_now(pgf);
return;
}
(*batchp)->index = 0;
}
(*batchp)->tables[(*batchp)->index++] = pgf;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
*batchp = NULL;
}
}

void pte_free_finish(void)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

if (*batchp == NULL)
return;
pte_free_submit(*batchp);
*batchp = NULL;
}
21 changes: 0 additions & 21 deletions arch/powerpc/mm/pgtable_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */

extern char etext[], _stext[];

#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
extern void hash_page_sync(void);
#endif

#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
Expand Down Expand Up @@ -125,23 +121,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}

void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
hash_page_sync();
#endif
free_page((unsigned long)pte);
}

void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
hash_page_sync();
#endif
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}

void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
Expand Down
Loading

0 comments on commit 0186f47

Please sign in to comment.