Skip to content

Commit

Permalink
[PATCH] zoned vm counters: remove useless struct wbs
Browse files Browse the repository at this point in the history
Remove writeback state

We can remove some functions now that were needed to calculate the page state
for writeback control since these statistics are now directly available.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jun 30, 2006
1 parent d2c5e30 commit c24f21b
Showing 1 changed file with 34 additions and 51 deletions.
85 changes: 34 additions & 51 deletions mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,23 +99,6 @@ EXPORT_SYMBOL(laptop_mode);

static void background_writeout(unsigned long _min_pages);

struct writeback_state
{
unsigned long nr_dirty;
unsigned long nr_unstable;
unsigned long nr_mapped;
unsigned long nr_writeback;
};

static void get_writeback_state(struct writeback_state *wbs)
{
wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
wbs->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES);
wbs->nr_writeback = global_page_state(NR_WRITEBACK);
}

/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
Expand All @@ -134,8 +117,8 @@ static void get_writeback_state(struct writeback_state *wbs)
* clamping level.
*/
static void
get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
struct address_space *mapping)
get_dirty_limits(long *pbackground, long *pdirty,
struct address_space *mapping)
{
int background_ratio; /* Percentages */
int dirty_ratio;
Expand All @@ -145,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
unsigned long available_memory = total_pages;
struct task_struct *tsk;

get_writeback_state(wbs);

#ifdef CONFIG_HIGHMEM
/*
* If this mapping can only allocate from low memory,
Expand All @@ -157,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
#endif


unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES)) * 100) /
total_pages;

dirty_ratio = vm_dirty_ratio;
if (dirty_ratio > unmapped_ratio / 2)
Expand Down Expand Up @@ -190,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
*/
static void balance_dirty_pages(struct address_space *mapping)
{
struct writeback_state wbs;
long nr_reclaimable;
long background_thresh;
long dirty_thresh;
Expand All @@ -208,11 +190,12 @@ static void balance_dirty_pages(struct address_space *mapping)
.range_cyclic = 1,
};

get_dirty_limits(&wbs, &background_thresh,
&dirty_thresh, mapping);
nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
break;
get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
dirty_thresh)
break;

if (!dirty_exceeded)
dirty_exceeded = 1;
Expand All @@ -225,20 +208,24 @@ static void balance_dirty_pages(struct address_space *mapping)
*/
if (nr_reclaimable) {
writeback_inodes(&wbc);
get_dirty_limits(&wbs, &background_thresh,
&dirty_thresh, mapping);
nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
break;
get_dirty_limits(&background_thresh,
&dirty_thresh, mapping);
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
if (nr_reclaimable +
global_page_state(NR_WRITEBACK)
<= dirty_thresh)
break;
pages_written += write_chunk - wbc.nr_to_write;
if (pages_written >= write_chunk)
break; /* We've done our duty */
}
blk_congestion_wait(WRITE, HZ/10);
}

if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
dirty_exceeded = 0;
if (nr_reclaimable + global_page_state(NR_WRITEBACK)
<= dirty_thresh && dirty_exceeded)
dirty_exceeded = 0;

if (writeback_in_progress(bdi))
return; /* pdflush is already working this queue */
Expand Down Expand Up @@ -300,21 +287,21 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);

void throttle_vm_writeout(void)
{
struct writeback_state wbs;
long background_thresh;
long dirty_thresh;

for ( ; ; ) {
get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);

/*
* Boost the allowable dirty threshold a bit for page
* allocators so they don't get DoS'ed by heavy writers
*/
dirty_thresh += dirty_thresh / 10; /* wheeee... */

if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
break;
if (global_page_state(NR_UNSTABLE_NFS) +
global_page_state(NR_WRITEBACK) <= dirty_thresh)
break;
blk_congestion_wait(WRITE, HZ/10);
}
}
Expand All @@ -337,12 +324,12 @@ static void background_writeout(unsigned long _min_pages)
};

for ( ; ; ) {
struct writeback_state wbs;
long background_thresh;
long dirty_thresh;

get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
if (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0)
break;
wbc.encountered_congestion = 0;
Expand All @@ -366,12 +353,9 @@ static void background_writeout(unsigned long _min_pages)
*/
int wakeup_pdflush(long nr_pages)
{
if (nr_pages == 0) {
struct writeback_state wbs;

get_writeback_state(&wbs);
nr_pages = wbs.nr_dirty + wbs.nr_unstable;
}
if (nr_pages == 0)
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
return pdflush_operation(background_writeout, nr_pages);
}

Expand Down Expand Up @@ -402,7 +386,6 @@ static void wb_kupdate(unsigned long arg)
unsigned long start_jif;
unsigned long next_jif;
long nr_to_write;
struct writeback_state wbs;
struct writeback_control wbc = {
.bdi = NULL,
.sync_mode = WB_SYNC_NONE,
Expand All @@ -415,11 +398,11 @@ static void wb_kupdate(unsigned long arg)

sync_supers();

get_writeback_state(&wbs);
oldest_jif = jiffies - dirty_expire_interval;
start_jif = jiffies;
next_jif = start_jif + dirty_writeback_interval;
nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
nr_to_write = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) {
wbc.encountered_congestion = 0;
Expand Down

0 comments on commit c24f21b

Please sign in to comment.