diff --git a/[refs] b/[refs] index cc6bccf30fac..b212961e1798 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0435a2fdcb50f8e53a67e98c27708e5d9396b71a +refs/heads/master: 90deb78839faedd194b65d419dbd9cba981e1922 diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 4cc962d566ce..1a2020d72ca0 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -2538,11 +2538,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, bool anon; lock_page_cgroup(pc); - if (unlikely(PageCgroupUsed(pc))) { - unlock_page_cgroup(pc); - __mem_cgroup_cancel_charge(memcg, nr_pages); - return; - } + VM_BUG_ON(PageCgroupUsed(pc)); /* * we don't need page_cgroup_lock about tail pages, becase they are not * accessed by any other context at this point. @@ -2807,8 +2803,19 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct mem_cgroup **memcgp) { struct mem_cgroup *memcg; + struct page_cgroup *pc; int ret; + pc = lookup_page_cgroup(page); + /* + * Every swap fault against a single page tries to charge the + * page, bail as early as possible. shmem_unuse() encounters + * already charged pages, too. The USED bit is protected by + * the page lock, which serializes swap cache removal, which + * in turn serializes uncharging. + */ + if (PageCgroupUsed(pc)) + return 0; if (!do_swap_account) goto charge_cur_mm; /*