diff --git a/[refs] b/[refs] index 5040ed7fda36..ebfbc1e0b2d7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: dc053733ea44babedb20266300b984d6add8b9e5 +refs/heads/master: d37dd5dcb955dd8c2cdd4eaef1f15d1b7ecbc379 diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 7f3096137b8a..e73d0206dddd 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -1177,7 +1177,11 @@ int isolate_lru_page(struct page *page) } /* - * Are there way too many processes in the direct reclaim path already? + * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and + * then get resheduled. When there are massive number of tasks doing page + * allocation, such sleeping direct reclaimers may keep piling up on each CPU, + * the LRU list will go small and be scanned faster than necessary, leading to + * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc)