Skip to content

Commit

Permalink
mm: rename USE_SPLIT_PTLOCKS to USE_SPLIT_PTE_PTLOCKS
Browse files Browse the repository at this point in the history
We're going to introduce split page table lock for PMD level.  Let's
rename existing split ptlock for PTE level to avoid confusion.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Alex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Kirill A. Shutemov authored and Linus Torvalds committed Nov 15, 2013
1 parent e9bb18c commit 57c1ffc
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 13 deletions.
6 changes: 3 additions & 3 deletions arch/arm/mm/fault-armv.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}

#if USE_SPLIT_PTLOCKS
#if USE_SPLIT_PTE_PTLOCKS
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
Expand All @@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
#else /* !USE_SPLIT_PTLOCKS */
#else /* !USE_SPLIT_PTE_PTLOCKS */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
#endif /* USE_SPLIT_PTLOCKS */
#endif /* USE_SPLIT_PTE_PTLOCKS */

static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -796,7 +796,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;

#if USE_SPLIT_PTLOCKS
#if USE_SPLIT_PTE_PTLOCKS
ptl = __pte_lockptr(page);
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
Expand Down Expand Up @@ -1637,7 +1637,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,

__set_pfn_prot(pfn, PAGE_KERNEL_RO);

if (level == PT_PTE && USE_SPLIT_PTLOCKS)
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);

xen_mc_issue(PARAVIRT_LAZY_MMU);
Expand Down Expand Up @@ -1671,7 +1671,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
if (!PageHighMem(page)) {
xen_mc_batch();

if (level == PT_PTE && USE_SPLIT_PTLOCKS)
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);

__set_pfn_prot(pfn, PAGE_KERNEL);
Expand Down
6 changes: 3 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1316,7 +1316,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */

#if USE_SPLIT_PTLOCKS
#if USE_SPLIT_PTE_PTLOCKS
/*
* We tuck a spinlock to guard each pagetable page into its struct page,
* at page->private, with BUILD_BUG_ON to make sure that this will not
Expand All @@ -1329,14 +1329,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else /* !USE_SPLIT_PTLOCKS */
#else /* !USE_SPLIT_PTE_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
#endif /* USE_SPLIT_PTE_PTLOCKS */

static inline void pgtable_page_ctor(struct page *page)
{
Expand Down
8 changes: 4 additions & 4 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

struct address_space;

#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)

/*
* Each physical page in the system has a struct page associated with
Expand Down Expand Up @@ -141,7 +141,7 @@ struct page {
* indicates order in the buddy
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTLOCKS
#if USE_SPLIT_PTE_PTLOCKS
spinlock_t ptl;
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
Expand Down Expand Up @@ -309,14 +309,14 @@ enum {
NR_MM_COUNTERS
};

#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
#define SPLIT_RSS_COUNTING
/* per-thread cached information, */
struct task_rss_stat {
int events; /* for synchronization threshold */
int count[NR_MM_COUNTERS];
};
#endif /* USE_SPLIT_PTLOCKS */
#endif /* USE_SPLIT_PTE_PTLOCKS */

struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
Expand Down

0 comments on commit 57c1ffc

Please sign in to comment.