Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 263332
b: refs/heads/master
c: f51bdd2
h: refs/heads/master
v: v3
  • Loading branch information
Shaohua Li authored and Linus Torvalds committed Aug 25, 2011
1 parent 1fa5ef4 commit bbc01b0
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7e8aa048989bf7e0604996a3e2068fb1a81f81bd
refs/heads/master: f51bdd2e97098a5cbb3cba7c3a56fa0e9ac3c444
16 changes: 8 additions & 8 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.mem_cgroup = mem,
.memcg_record = rec,
};
unsigned long start, end;
ktime_t start, end;

sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
Expand All @@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
sc.may_writepage,
sc.gfp_mask);

start = sched_clock();
start = ktime_get();
/*
* NOTE: Although we can get the priority field, using it
* here is not a good idea, since it limits the pages we can scan.
Expand All @@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
* the priority and make it zero.
*/
shrink_zone(0, zone, &sc);
end = sched_clock();
end = ktime_get();

if (rec)
rec->elapsed += end - start;
rec->elapsed += ktime_to_ns(ktime_sub(end, start));
*scanned = sc.nr_scanned;

trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
Expand All @@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
unsigned long start, end;
ktime_t start, end;
int nid;
struct scan_control sc = {
.may_writepage = !laptop_mode,
Expand All @@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.gfp_mask = sc.gfp_mask,
};

start = sched_clock();
start = ktime_get();
/*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
* take care of from where we get pages. So the node where we start the
Expand All @@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
sc.gfp_mask);

nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
end = sched_clock();
end = ktime_get();
if (rec)
rec->elapsed += end - start;
rec->elapsed += ktime_to_ns(ktime_sub(end, start));

trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);

Expand Down

0 comments on commit bbc01b0

Please sign in to comment.