Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 31370
b: refs/heads/master
c: f6ac235
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jun 30, 2006
1 parent b8ffe1c commit 117e161
Show file tree
Hide file tree
Showing 8 changed files with 570 additions and 550 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 672b2714ae57af16fe7d760dc4e0918a7a6cb0fa
refs/heads/master: f6ac2354d791195ca40822b84d73d48a4e8b7f2b
7 changes: 0 additions & 7 deletions trunk/include/asm-s390/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -657,13 +657,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
__pte; \
})

#define SetPageUptodate(_page) \
do { \
struct page *__page = (_page); \
if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
page_test_and_clear_dirty(_page); \
} while (0)

#ifdef __s390x__

#define pfn_pmd(pfn, pgprot) \
Expand Down
6 changes: 5 additions & 1 deletion trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout;
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/atomic.h>

#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

Expand Down Expand Up @@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone,
set_page_section(page, pfn_to_section_nr(pfn));
}

/*
* Some inline functions in vmstat.h depend on page_zone()
*/
#include <linux/vmstat.h>

#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
Expand Down
141 changes: 8 additions & 133 deletions trunk/include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,8 @@
#ifndef PAGE_FLAGS_H
#define PAGE_FLAGS_H

#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>

#include <asm/pgtable.h>

/*
* Various page->flags bits:
*
Expand Down Expand Up @@ -102,134 +98,6 @@
#define PG_uncached 31 /* Page has been mapped as uncached */
#endif

/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
*
* - Fields can be modified with xxx_page_state and xxx_page_state_zone at
* any time safely (which protects the instance from modification by
* interrupt.
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
* modified from process context and protected from preemption, or only
* modified from interrupt context. In this case, the field should be
* commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_mapped; /* mapped into pagetables.
* only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab

/*
* The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these.
*/
unsigned long pgpgin; /* Disk reads */
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */

unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;

unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */

unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */

unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;

unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;

unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;

unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;

unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */

unsigned long pgrotated; /* pages rotated to tail of the LRU */
unsigned long nr_bounce; /* pages for bounce buffers */
};

extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
extern unsigned long read_page_state_offset(unsigned long offset);
extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);

#define read_page_state(member) \
read_page_state_offset(offsetof(struct page_state, member))

#define mod_page_state(member, delta) \
mod_page_state_offset(offsetof(struct page_state, member), (delta))

#define __mod_page_state(member, delta) \
__mod_page_state_offset(offsetof(struct page_state, member), (delta))

#define inc_page_state(member) mod_page_state(member, 1UL)
#define dec_page_state(member) mod_page_state(member, 0UL - 1)
#define add_page_state(member,delta) mod_page_state(member, (delta))
#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))

#define __inc_page_state(member) __mod_page_state(member, 1UL)
#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
#define __add_page_state(member,delta) __mod_page_state(member, (delta))
#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))

#define page_state(member) (*__page_state(offsetof(struct page_state, member)))

#define state_zone_offset(zone, member) \
({ \
unsigned offset; \
if (is_highmem(zone)) \
offset = offsetof(struct page_state, member##_high); \
else if (is_normal(zone)) \
offset = offsetof(struct page_state, member##_normal); \
else if (is_dma32(zone)) \
offset = offsetof(struct page_state, member##_dma32); \
else \
offset = offsetof(struct page_state, member##_dma); \
offset; \
})

#define __mod_page_state_zone(zone, member, delta) \
do { \
__mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)

#define mod_page_state_zone(zone, member, delta) \
do { \
mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)

/*
* Manipulation of page state flags
*/
Expand All @@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)

#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
#ifndef SetPageUptodate
#ifdef CONFIG_S390
#define SetPageUptodate(_page) \
do { \
struct page *__page = (_page); \
if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
page_test_and_clear_dirty(_page); \
} while (0)
#else
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
#endif
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
Expand Down
138 changes: 138 additions & 0 deletions trunk/include/linux/vmstat.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
#ifndef _LINUX_VMSTAT_H
#define _LINUX_VMSTAT_H

#include <linux/types.h>
#include <linux/percpu.h>

/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
*
* - Fields can be modified with xxx_page_state and xxx_page_state_zone at
* any time safely (which protects the instance from modification by
* interrupt.
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
* modified from process context and protected from preemption, or only
* modified from interrupt context. In this case, the field should be
* commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_mapped; /* mapped into pagetables.
* only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab

/*
* The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these.
*/
unsigned long pgpgin; /* Disk reads */
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */

unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;

unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */

unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */

unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;

unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;

unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;

unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;

unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */

unsigned long pgrotated; /* pages rotated to tail of the LRU */
unsigned long nr_bounce; /* pages for bounce buffers */
};

extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
extern unsigned long read_page_state_offset(unsigned long offset);
extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);

#define read_page_state(member) \
read_page_state_offset(offsetof(struct page_state, member))

#define mod_page_state(member, delta) \
mod_page_state_offset(offsetof(struct page_state, member), (delta))

#define __mod_page_state(member, delta) \
__mod_page_state_offset(offsetof(struct page_state, member), (delta))

#define inc_page_state(member) mod_page_state(member, 1UL)
#define dec_page_state(member) mod_page_state(member, 0UL - 1)
#define add_page_state(member,delta) mod_page_state(member, (delta))
#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))

#define __inc_page_state(member) __mod_page_state(member, 1UL)
#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
#define __add_page_state(member,delta) __mod_page_state(member, (delta))
#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))

#define page_state(member) (*__page_state(offsetof(struct page_state, member)))

#define state_zone_offset(zone, member) \
({ \
unsigned offset; \
if (is_highmem(zone)) \
offset = offsetof(struct page_state, member##_high); \
else if (is_normal(zone)) \
offset = offsetof(struct page_state, member##_normal); \
else if (is_dma32(zone)) \
offset = offsetof(struct page_state, member##_dma32); \
else \
offset = offsetof(struct page_state, member##_dma); \
offset; \
})

#define __mod_page_state_zone(zone, member, delta) \
do { \
__mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)

#define mod_page_state_zone(zone, member, delta) \
do { \
mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)

DECLARE_PER_CPU(struct page_state, page_states);

#endif /* _LINUX_VMSTAT_H */

2 changes: 1 addition & 1 deletion trunk/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
page_alloc.o page-writeback.o pdflush.o \
readahead.o swap.o truncate.o vmscan.o \
prio_tree.o util.o mmzone.o $(mmu-y)
prio_tree.o util.o mmzone.o vmstat.o $(mmu-y)

obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
Expand Down
Loading

0 comments on commit 117e161

Please sign in to comment.