Skip to content

Commit

Permalink
x86, cpa: dont use large pages for kernel identity mapping with DEBUG…
Browse files Browse the repository at this point in the history
…_PAGEALLOC

Don't use large pages for kernel identity mapping with DEBUG_PAGEALLOC.
This will remove the need to split the large page for the
allocated kernel page in the interrupt context.

This will simplify cpa code(as we don't do the split any more from the
interrupt context). cpa code simplication in the subsequent patches.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: arjan@linux.intel.com
Cc: venkatesh.pallipadi@intel.com
Cc: jeremy@goop.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Suresh Siddha authored and Ingo Molnar committed Oct 10, 2008
1 parent a2699e4 commit 0b8fdcb
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 10 deletions.
18 changes: 14 additions & 4 deletions arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -777,7 +777,7 @@ void __init setup_bootmem_allocator(void)
after_init_bootmem = 1;
}

static void __init find_early_table_space(unsigned long end)
static void __init find_early_table_space(unsigned long end, int use_pse)
{
unsigned long puds, pmds, ptes, tables, start;

Expand All @@ -787,7 +787,7 @@ static void __init find_early_table_space(unsigned long end)
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += PAGE_ALIGN(pmds * sizeof(pmd_t));

if (cpu_has_pse) {
if (use_pse) {
unsigned long extra;

extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
Expand Down Expand Up @@ -827,12 +827,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
pgd_t *pgd_base = swapper_pg_dir;
unsigned long start_pfn, end_pfn;
unsigned long big_page_start;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
int use_pse = 0;
#else
int use_pse = cpu_has_pse;
#endif

/*
* Find space for the kernel direct mapping tables.
*/
if (!after_init_bootmem)
find_early_table_space(end);
find_early_table_space(end, use_pse);

#ifdef CONFIG_X86_PAE
set_nx();
Expand Down Expand Up @@ -878,7 +888,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn)
kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
cpu_has_pse);
use_pse);

/* tail is not big page alignment ? */
start_pfn = end_pfn;
Expand Down
26 changes: 20 additions & 6 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -456,21 +456,22 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
return phys_pud_init(pud, addr, end, page_size_mask);
}

static void __init find_early_table_space(unsigned long end)
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{
unsigned long puds, pmds, ptes, tables, start;

puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
if (direct_gbpages) {
if (use_gbpages) {
unsigned long extra;
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
} else
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);

if (cpu_has_pse) {
if (use_pse) {
unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
Expand Down Expand Up @@ -640,6 +641,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

struct map_range mr[NR_RANGE_MR];
int nr_range, i;
int use_pse, use_gbpages;

printk(KERN_INFO "init_memory_mapping\n");

Expand All @@ -653,9 +655,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
init_gbpages();

if (direct_gbpages)
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
use_pse = use_gbpages = 0;
#else
use_pse = cpu_has_pse;
use_gbpages = direct_gbpages;
#endif

if (use_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse)
if (use_pse)
page_size_mask |= 1 << PG_LEVEL_2M;

memset(mr, 0, sizeof(mr));
Expand Down Expand Up @@ -716,7 +730,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
(mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));

if (!after_bootmem)
find_early_table_space(end);
find_early_table_space(end, use_pse, use_gbpages);

for (i = 0; i < nr_range; i++)
last_map_addr = kernel_physical_mapping_init(
Expand Down

0 comments on commit 0b8fdcb

Please sign in to comment.