Skip to content

Commit

Permalink
[S390] zero page cache synonyms
Browse files Browse the repository at this point in the history
If the zero page is mapped to virtual user space addresses that differ
only in bit 2^12 or 2^13 we get L1 cache synonyms which can affect
performance. Follow the mips model and use multiple zero pages to avoid
the synonyms.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Martin Schwidefsky authored and Martin Schwidefsky committed Oct 25, 2010
1 parent 229aebb commit 238ec4e
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 7 deletions.
22 changes: 19 additions & 3 deletions arch/s390/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,27 @@ extern void vmem_map_init(void);
#define update_mmu_cache(vma, address, ptep) do { } while (0)

/*
* ZERO_PAGE is a global shared page that is always zero: used
* ZERO_PAGE is a global shared page that is always zero; used
* for zero-mapped memory areas etc..
*/
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + \
(((unsigned long)(vaddr)) &zero_page_mask))))

#define is_zero_pfn is_zero_pfn
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
unsigned long offset_from_zero_pfn = pfn - zero_pfn;
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}

#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))

#endif /* !__ASSEMBLY__ */

/*
Expand Down
49 changes: 45 additions & 4 deletions arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,52 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));

char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);

static unsigned long setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
unsigned long size;
struct page *page;
int i;

get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672: /* g5 */
case 0x2064: /* z900 */
case 0x2066: /* z900 */
case 0x2084: /* z990 */
case 0x2086: /* z990 */
case 0x2094: /* z9-109 */
case 0x2096: /* z9-109 */
order = 0;
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
default:
order = 2;
break;
}

empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");

page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
SetPageReserved(page);
page++;
}

size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;

return 1UL << order;
}

/*
* paging_init() sets up the page tables
*/
Expand Down Expand Up @@ -92,14 +135,12 @@ void __init mem_init(void)
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);

/* Setup guest page hinting */
cmma_init();

/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */

reservedpages = 0;

Expand Down

0 comments on commit 238ec4e

Please sign in to comment.