From 2195e0e0295fc1618d30b593b1fc2ccb19df70e9 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 4 Jan 2013 15:35:12 -0800 Subject: [PATCH] --- yaml --- r: 348277 b: refs/heads/master c: 53a59fc67f97374758e63a9c785891ec62324c81 h: refs/heads/master i: 348275: 06ee7aaf39dd495b277f61dfac5d8411292c2242 v: v3 --- [refs] | 2 +- trunk/include/asm-generic/tlb.h | 9 +++++++++ trunk/mm/memory.c | 5 +++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 8c9cd34971bf..fdff53fd42c7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a458431e176ddb27e8ef8b98c2a681b217337393 +refs/heads/master: 53a59fc67f97374758e63a9c785891ec62324c81 diff --git a/trunk/include/asm-generic/tlb.h b/trunk/include/asm-generic/tlb.h index ed6642ad03e0..25f01d0bc149 100644 --- a/trunk/include/asm-generic/tlb.h +++ b/trunk/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -96,6 +104,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index e0a9b0ce4f10..49fb1cf08611 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -184,10 +184,14 @@ static int tlb_next_batch(struct mmu_gather *tlb) return 1; } + if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) + return 0; + batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return 0; + tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; @@ -216,6 +220,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; + tlb->batch_count = 0; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL;