Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 356195
b: refs/heads/master
c: 66520eb
h: refs/heads/master
i:
  356193: 051d0f1
  356191: 697963c
v: v3
  • Loading branch information
Jacob Shin authored and H. Peter Anvin committed Nov 17, 2012
1 parent fcbed99 commit e004795
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 26 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e8c57d40519d7226acb8e662f3ab496202ebc7a6
refs/heads/master: 66520ebc2df3fe52eb4792f8101fac573b766baf
8 changes: 1 addition & 7 deletions trunk/arch/x86/include/asm/page_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,7 @@ static inline phys_addr_t get_max_mapped(void)
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

static inline bool pfn_range_is_mapped(unsigned long start_pfn,
unsigned long end_pfn)
{
return end_pfn <= max_low_pfn_mapped ||
(end_pfn > (1UL << (32 - PAGE_SHIFT)) &&
end_pfn <= max_pfn_mapped);
}
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);

extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
Expand Down
8 changes: 5 additions & 3 deletions trunk/arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,11 @@
#include <asm/prom.h>

/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
* The direct mapping extends to max_pfn_mapped, so that we can directly access
* apertures, ACPI and other tables without having to play with fixmaps.
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
* max_pfn_mapped: highest direct mapped pfn over 4GB
*
* The direct mapping only covers E820_RAM regions, so the ranges and gaps are
* represented by pfn_mapped
*/
unsigned long max_low_pfn_mapped;
unsigned long max_pfn_mapped;
Expand Down
120 changes: 109 additions & 11 deletions trunk/arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,38 @@ static unsigned long __init calculate_table_space_size(unsigned long start, unsi
return tables;
}

static unsigned long __init calculate_all_table_space_size(void)
{
unsigned long start_pfn, end_pfn;
unsigned long tables;
int i;

/* the ISA range is always mapped regardless of memory holes */
tables = calculate_table_space_size(0, ISA_END_ADDRESS);

for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
u64 start = start_pfn << PAGE_SHIFT;
u64 end = end_pfn << PAGE_SHIFT;

if (end <= ISA_END_ADDRESS)
continue;

if (start < ISA_END_ADDRESS)
start = ISA_END_ADDRESS;
#ifdef CONFIG_X86_32
/* on 32 bit, we only map up to max_low_pfn */
if ((start >> PAGE_SHIFT) >= max_low_pfn)
continue;

if ((end >> PAGE_SHIFT) > max_low_pfn)
end = max_low_pfn << PAGE_SHIFT;
#endif
tables += calculate_table_space_size(start, end);
}

return tables;
}

static void __init find_early_table_space(unsigned long start,
unsigned long good_end,
unsigned long tables)
Expand All @@ -258,6 +290,34 @@ static void __init find_early_table_space(unsigned long start,
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
}

static struct range pfn_mapped[E820_X_MAX];
static int nr_pfn_mapped;

static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX,
nr_pfn_mapped, start_pfn, end_pfn);
nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX);

max_pfn_mapped = max(max_pfn_mapped, end_pfn);

if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
max_low_pfn_mapped = max(max_low_pfn_mapped,
min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
}

bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
int i;

for (i = 0; i < nr_pfn_mapped; i++)
if ((start_pfn >= pfn_mapped[i].start) &&
(end_pfn <= pfn_mapped[i].end))
return true;

return false;
}

/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
Expand Down Expand Up @@ -288,9 +348,55 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

__flush_tlb_all();

add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);

return ret >> PAGE_SHIFT;
}

/*
* Iterate through E820 memory map and create direct mappings for only E820_RAM
* regions. We cannot simply create direct mappings for all pfns from
* [0 to max_low_pfn) and [4GB to max_pfn) because of possible memory holes in
* high addresses that cannot be marked as UC by fixed/variable range MTRRs.
* Depending on the alignment of E820 ranges, this may possibly result in using
* smaller size (i.e. 4K instead of 2M or 1G) page tables.
*/
static void __init init_all_memory_mapping(void)
{
unsigned long start_pfn, end_pfn;
int i;

/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);

for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
u64 start = (u64)start_pfn << PAGE_SHIFT;
u64 end = (u64)end_pfn << PAGE_SHIFT;

if (end <= ISA_END_ADDRESS)
continue;

if (start < ISA_END_ADDRESS)
start = ISA_END_ADDRESS;
#ifdef CONFIG_X86_32
/* on 32 bit, we only map up to max_low_pfn */
if ((start >> PAGE_SHIFT) >= max_low_pfn)
continue;

if ((end >> PAGE_SHIFT) > max_low_pfn)
end = max_low_pfn << PAGE_SHIFT;
#endif
init_memory_mapping(start, end);
}

#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#endif
}

void __init init_mem_mapping(void)
{
unsigned long tables, good_end, end;
Expand All @@ -311,23 +417,15 @@ void __init init_mem_mapping(void)
end = max_low_pfn << PAGE_SHIFT;
good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
tables = calculate_table_space_size(0, end);
tables = calculate_all_table_space_size();
find_early_table_space(0, good_end, tables);
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx] prealloc\n",
end - 1, pgt_buf_start << PAGE_SHIFT,
(pgt_buf_top << PAGE_SHIFT) - 1);

max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
max_pfn_mapped = 0; /* will get exact value next */
init_all_memory_mapping();

#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
max_pfn_mapped = init_memory_mapping(1UL<<32,
max_pfn<<PAGE_SHIFT);
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#endif
/*
* Reserve the kernel pagetable pages we used (pgt_buf_start -
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
Expand Down
6 changes: 2 additions & 4 deletions trunk/arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -662,13 +662,11 @@ int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat = NODE_DATA(nid);
struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;

last_mapped_pfn = init_memory_mapping(start, start + size);
if (last_mapped_pfn > max_pfn_mapped)
max_pfn_mapped = last_mapped_pfn;
init_memory_mapping(start, start + size);

ret = __add_pages(nid, zone, start_pfn, nr_pages);
WARN_ON_ONCE(ret);
Expand Down

0 comments on commit e004795

Please sign in to comment.