Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 31371
b: refs/heads/master
c: 2244b95
h: refs/heads/master
i:
  31369: b8ffe1c
  31367: bec38bf
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jun 30, 2006
1 parent 117e161 commit 40803ae
Show file tree
Hide file tree
Showing 7 changed files with 360 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f6ac2354d791195ca40822b84d73d48a4e8b7f2b
refs/heads/master: 2244b95a7bcf8d24196f8a3a44187ba5dfff754c
5 changes: 5 additions & 0 deletions trunk/arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@ config DMA_IS_DMA32
bool
default y

config DMA_IS_NORMAL
bool
depends on IA64_SGI_SN2
default y

choice
prompt "System type"
default IA64_GENERIC
Expand Down
9 changes: 9 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ struct zone_padding {
#define ZONE_PADDING(name)
#endif

enum zone_stat_item {
NR_VM_ZONE_STAT_ITEMS };

struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
Expand All @@ -55,6 +58,10 @@ struct per_cpu_pages {

struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
#ifdef CONFIG_SMP
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif

#ifdef CONFIG_NUMA
unsigned long numa_hit; /* allocated in intended node */
unsigned long numa_miss; /* allocated in non intended node */
Expand Down Expand Up @@ -165,6 +172,8 @@ struct zone {
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;

/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* timestamp (in jiffies) of the last zone reclaim that did not
* result in freeing of pages. This is used to avoid repeated scans
Expand Down
129 changes: 128 additions & 1 deletion trunk/include/linux/vmstat.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@

#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/config.h>
#include <linux/mmzone.h>
#include <asm/atomic.h>

/*
* Global page accounting. One instance per CPU. Only unsigned longs are
Expand Down Expand Up @@ -134,5 +137,129 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);

DECLARE_PER_CPU(struct page_state, page_states);

#endif /* _LINUX_VMSTAT_H */
/*
* Zone based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];

static inline void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
atomic_long_add(x, &vm_stat[item]);
}

static inline unsigned long global_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}

static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}

#ifdef CONFIG_NUMA
/*
* Determine the per node value of a stat item. This function
* is called frequently in a NUMA machine, so try to be as
* frugal as possible.
*/
static inline unsigned long node_page_state(int node,
enum zone_stat_item item)
{
struct zone *zones = NODE_DATA(node)->node_zones;

return
#ifndef CONFIG_DMA_IS_NORMAL
#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
zone_page_state(&zones[ZONE_DMA32], item) +
#endif
zone_page_state(&zones[ZONE_NORMAL], item) +
#endif
#ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
zone_page_state(&zones[ZONE_DMA], item);
}
#else
#define node_page_state(node, item) global_page_state(item)
#endif

#define __add_zone_page_state(__z, __i, __d) \
__mod_zone_page_state(__z, __i, __d)
#define __sub_zone_page_state(__z, __i, __d) \
__mod_zone_page_state(__z, __i,-(__d))

#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))

static inline void zap_zone_vm_stats(struct zone *zone)
{
memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
}

#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);

void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);

extern void inc_zone_state(struct zone *, enum zone_stat_item);

void refresh_cpu_vm_stats(int);
void refresh_vm_stats(void);

#else /* CONFIG_SMP */

/*
* We do not maintain differentials in a single processor configuration.
* The functions directly modify the zone and global counters.
*/
static inline void __mod_zone_page_state(struct zone *zone,
enum zone_stat_item item, int delta)
{
zone_page_state_add(delta, zone, item);
}

static inline void __inc_zone_page_state(struct page *page,
enum zone_stat_item item)
{
atomic_long_inc(&page_zone(page)->vm_stat[item]);
atomic_long_inc(&vm_stat[item]);
}

static inline void __dec_zone_page_state(struct page *page,
enum zone_stat_item item)
{
atomic_long_dec(&page_zone(page)->vm_stat[item]);
atomic_long_dec(&vm_stat[item]);
}

/*
* We only use atomic operations to update counters. So there is no need to
* disable interrupts.
*/
#define inc_zone_page_state __inc_zone_page_state
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state

static inline void refresh_cpu_vm_stats(int cpu) { }
static inline void refresh_vm_stats(void) { }
#endif

#endif /* _LINUX_VMSTAT_H */
2 changes: 2 additions & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2045,6 +2045,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0;
zone->nr_active = 0;
zone->nr_inactive = 0;
zap_zone_vm_stats(zone);
atomic_set(&zone->reclaim_in_progress, 0);
if (!size)
continue;
Expand Down Expand Up @@ -2147,6 +2148,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
}

local_irq_enable();
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
}
Expand Down
1 change: 1 addition & 0 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3763,6 +3763,7 @@ static void cache_reap(void *unused)
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
refresh_cpu_vm_stats(smp_processor_id());
/* Set up the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
Expand Down
Loading

0 comments on commit 40803ae

Please sign in to comment.