Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 230837
b: refs/heads/master
c: dfe076b
h: refs/heads/master
i:
  230835: aff16e7
v: v3
  • Loading branch information
Daisuke Nishimura authored and Linus Torvalds committed Jan 14, 2011
1 parent 08a4444 commit 4332e75
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 043d18b1e5bdfc4870b8a19d00f0d5c636a5c231
refs/heads/master: dfe076b0971a783469bc2066e85d46e23c8acb1c
84 changes: 49 additions & 35 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,6 @@ static struct move_charge_struct {
unsigned long moved_charge;
unsigned long moved_swap;
struct task_struct *moving_task; /* a task moving charges */
struct mm_struct *mm;
wait_queue_head_t waitq; /* a waitq for other context */
} mc = {
.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Expand Down Expand Up @@ -4681,7 +4680,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
unsigned long precharge;
struct vm_area_struct *vma;

/* We've already held the mmap_sem */
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
struct mm_walk mem_cgroup_count_precharge_walk = {
.pmd_entry = mem_cgroup_count_precharge_pte_range,
Expand All @@ -4693,6 +4692,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
walk_page_range(vma->vm_start, vma->vm_end,
&mem_cgroup_count_precharge_walk);
}
up_read(&mm->mmap_sem);

precharge = mc.precharge;
mc.precharge = 0;
Expand All @@ -4702,10 +4702,15 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
unsigned long precharge = mem_cgroup_count_precharge(mm);

VM_BUG_ON(mc.moving_task);
mc.moving_task = current;
return mem_cgroup_do_precharge(precharge);
}

static void mem_cgroup_clear_mc(void)
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to;
Expand Down Expand Up @@ -4740,23 +4745,28 @@ static void mem_cgroup_clear_mc(void)
PAGE_SIZE * mc.moved_swap);
}
/* we've already done mem_cgroup_get(mc.to) */

mc.moved_swap = 0;
}
if (mc.mm) {
up_read(&mc.mm->mmap_sem);
mmput(mc.mm);
}
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;

/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
*/
mc.moving_task = NULL;
__mem_cgroup_clear_mc();
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
spin_unlock(&mc.lock);
mc.moving_task = NULL;
mc.mm = NULL;
mem_cgroup_end_move(from);
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
}

static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
Expand All @@ -4778,38 +4788,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
return 0;
/* We move charges only when we move a owner of the mm */
if (mm->owner == p) {
/*
* We do all the move charge works under one mmap_sem to
* avoid deadlock with down_write(&mmap_sem)
* -> try_charge() -> if (mc.moving_task) -> sleep.
*/
down_read(&mm->mmap_sem);

VM_BUG_ON(mc.from);
VM_BUG_ON(mc.to);
VM_BUG_ON(mc.precharge);
VM_BUG_ON(mc.moved_charge);
VM_BUG_ON(mc.moved_swap);
VM_BUG_ON(mc.moving_task);
VM_BUG_ON(mc.mm);

mem_cgroup_start_move(from);
spin_lock(&mc.lock);
mc.from = from;
mc.to = mem;
mc.precharge = 0;
mc.moved_charge = 0;
mc.moved_swap = 0;
spin_unlock(&mc.lock);
mc.moving_task = current;
mc.mm = mm;
/* We set mc.moving_task later */

ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
/* We call up_read() and mmput() in clear_mc(). */
} else
mmput(mm);
}
mmput(mm);
}
return ret;
}
Expand Down Expand Up @@ -4898,7 +4893,19 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
struct vm_area_struct *vma;

lru_add_drain_all();
/* We've already held the mmap_sem */
retry:
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
* and retry. Because we cancel precharges, we might not be able
* to move enough charges, but moving charge is a best-effort
* feature anyway, so it wouldn't be a big problem.
*/
__mem_cgroup_clear_mc();
cond_resched();
goto retry;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
int ret;
struct mm_walk mem_cgroup_move_charge_walk = {
Expand All @@ -4917,6 +4924,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
*/
break;
}
up_read(&mm->mmap_sem);
}

static void mem_cgroup_move_task(struct cgroup_subsys *ss,
Expand All @@ -4925,11 +4933,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct task_struct *p,
bool threadgroup)
{
if (!mc.mm)
struct mm_struct *mm;

if (!mc.to)
/* no need to move charge */
return;

mem_cgroup_move_charge(mc.mm);
mm = get_task_mm(p);
if (mm) {
mem_cgroup_move_charge(mm);
mmput(mm);
}
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
Expand Down

0 comments on commit 4332e75

Please sign in to comment.