Skip to content

Commit

Permalink
x86: use __page_aligned_data/bss
Browse files Browse the repository at this point in the history
Update arch/x86's use of page-aligned variables.  The change to
arch/x86/xen/mmu.c fixes an actual bug, but the rest are cleanups
and to set a precedent.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Jeremy Fitzhardinge authored and Ingo Molnar committed Jul 16, 2008
1 parent 87b935a commit cbcd79c
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 16 deletions.
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/common_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <asm/i387.h>
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/linkage.h>
#include <asm/mmu_context.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
Expand Down Expand Up @@ -517,8 +518,7 @@ void pda_init(int cpu)
}

char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
DEBUG_STKSZ]
__attribute__((section(".bss.page_aligned")));
DEBUG_STKSZ] __page_aligned_bss;

extern asmlinkage void ignore_sysret(void);

Expand Down
7 changes: 2 additions & 5 deletions arch/x86/kernel/irq_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,8 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;

static char softirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));

static char hardirq_stack[NR_CPUS * THREAD_SIZE]
__attribute__((__section__(".bss.page_aligned")));
static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;

static void call_on_stack(void *func, void *stack)
{
Expand Down
15 changes: 6 additions & 9 deletions arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/paravirt.h>
#include <asm/linkage.h>

#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
Expand All @@ -60,22 +61,18 @@
#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)

/* Placeholder for holes in the address space */
static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
__attribute__((section(".data.page_aligned"))) =
static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
{ [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };

/* Array of pointers to pages containing p2m entries */
static unsigned long *p2m_top[TOP_ENTRIES]
__attribute__((section(".data.page_aligned"))) =
static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
{ [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };

/* Arrays of p2m arrays expressed in mfns used for save/restore */
static unsigned long p2m_top_mfn[TOP_ENTRIES]
__attribute__((section(".bss.page_aligned")));
static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;

static unsigned long p2m_top_mfn_list[
PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
__attribute__((section(".bss.page_aligned")));
static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
__page_aligned_bss;

static inline unsigned p2m_top_index(unsigned long pfn)
{
Expand Down

0 comments on commit cbcd79c

Please sign in to comment.