Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 136980
b: refs/heads/master
c: 13b2eda
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Feb 26, 2009
1 parent 8fde6d6 commit 6425fea
Show file tree
Hide file tree
Showing 36 changed files with 293 additions and 1,802 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9976b39b5031bbf76f715893cf080b6a17683881
refs/heads/master: 13b2eda64d14d0a0c15c092664c7351ea58ea851
20 changes: 7 additions & 13 deletions trunk/arch/alpha/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,21 +189,9 @@ callback_init(void * kernel_end)

if (alpha_using_srm) {
static struct vm_struct console_remap_vm;
unsigned long nr_pages = 0;
unsigned long vaddr;
unsigned long vaddr = VMALLOC_START;
unsigned long i, j;

/* calculate needed size */
for (i = 0; i < crb->map_entries; ++i)
nr_pages += crb->map[i].count;

/* register the vm area */
console_remap_vm.flags = VM_ALLOC;
console_remap_vm.size = nr_pages << PAGE_SHIFT;
vm_area_register_early(&console_remap_vm, PAGE_SIZE);

vaddr = (unsigned long)console_remap_vm.addr;

/* Set up the third level PTEs and update the virtual
addresses of the CRB entries. */
for (i = 0; i < crb->map_entries; ++i) {
Expand All @@ -225,6 +213,12 @@ callback_init(void * kernel_end)
vaddr += PAGE_SIZE;
}
}

/* Let vmalloc know that we've allocated some space. */
console_remap_vm.flags = VM_ALLOC;
console_remap_vm.addr = (void *) VMALLOC_START;
console_remap_vm.size = vaddr - VMALLOC_START;
vmlist = &console_remap_vm;
}

callback_init_done = 1;
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/avr32/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
config QUICKLIST
def_bool y

config HAVE_ARCH_BOOTMEM
config HAVE_ARCH_BOOTMEM_NODE
def_bool n

config ARCH_HAVE_MEMORY_PRESENT
Expand Down
5 changes: 1 addition & 4 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,6 @@ config ARCH_HAS_CACHE_LINE_SIZE
config HAVE_SETUP_PER_CPU_AREA
def_bool y

config HAVE_DYNAMIC_PER_CPU_AREA
def_bool y

config HAVE_CPUMASK_OF_CPU_MAP
def_bool X86_64_SMP

Expand Down Expand Up @@ -1125,7 +1122,7 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accomodate various tables.

config HAVE_ARCH_BOOTMEM
config HAVE_ARCH_BOOTMEM_NODE
def_bool y
depends on X86_32 && NUMA

Expand Down
53 changes: 17 additions & 36 deletions trunk/arch/x86/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,43 +5,24 @@
#include <linux/mm.h>

/* Caches aren't brain-dead on the intel. */
static inline void flush_cache_all(void) { }
static inline void flush_cache_mm(struct mm_struct *mm) { }
static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) { }
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn) { }
static inline void flush_dcache_page(struct page *page) { }
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
static inline void flush_icache_range(unsigned long start,
unsigned long end) { }
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page) { }
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr,
unsigned long len) { }
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
static inline void flush_cache_vunmap(unsigned long start,
unsigned long end) { }
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)

static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
}

static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))

#define PG_non_WB PG_arch_1
PAGEFLAG(NonWB, non_WB)
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/x86/include/asm/iomap.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>

int
is_io_mapping_possible(resource_size_t base, unsigned long size);

void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);

Expand Down
43 changes: 40 additions & 3 deletions trunk/arch/x86/include/asm/mmzone_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,46 @@ static inline int pfn_valid(int pfn)
#endif /* CONFIG_DISCONTIGMEM */

#ifdef CONFIG_NEED_MULTIPLE_NODES
/* always use node 0 for bootmem on this numa platform */
#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
(NODE_DATA(0)->bdata)

/*
* Following are macros that are specific to this numa platform.
*/
#define reserve_bootmem(addr, size, flags) \
reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
#define alloc_bootmem(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
__pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
__pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
__pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_pages_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
__pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_low_pages_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
})
#endif /* CONFIG_NEED_MULTIPLE_NODES */

#endif /* _ASM_X86_MMZONE_32_H */
8 changes: 0 additions & 8 deletions trunk/arch/x86/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,6 @@
#else /* ...!ASSEMBLY */

#include <linux/stringify.h>
#include <asm/sections.h>

#define __addr_to_pcpu_ptr(addr) \
(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
+ (unsigned long)__per_cpu_start)
#define __pcpu_ptr_to_addr(ptr) \
(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
- (unsigned long)__per_cpu_start)

#ifdef CONFIG_SMP
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,6 @@ static inline int is_new_memtype_allowed(unsigned long flags,
return 1;
}

pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */

#ifdef CONFIG_X86_32
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/x86/include/asm/xen/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ static inline pte_t __pte_ma(pteval_t x)


xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!data)
return -ENOMEM;

data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
per_cpu(drv_data, cpu) = data;

if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
Expand Down
29 changes: 14 additions & 15 deletions trunk/arch/x86/kernel/irq_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>

#include <asm/apic.h>

Expand Down Expand Up @@ -56,13 +55,13 @@ static inline void print_stack_overflow(void) { }
union irq_ctx {
struct thread_info tinfo;
u32 stack[THREAD_SIZE/sizeof(u32)];
} __attribute__((aligned(PAGE_SIZE)));
};

static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;

static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;

static void call_on_stack(void *func, void *stack)
{
Expand All @@ -82,7 +81,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
u32 *isp, arg1, arg2;

curctx = (union irq_ctx *) current_thread_info();
irqctx = __get_cpu_var(hardirq_ctx);
irqctx = hardirq_ctx[smp_processor_id()];

/*
* this is where we switch to the IRQ stack. However, if we are
Expand Down Expand Up @@ -126,34 +125,34 @@ void __cpuinit irq_ctx_init(int cpu)
{
union irq_ctx *irqctx;

if (per_cpu(hardirq_ctx, cpu))
if (hardirq_ctx[cpu])
return;

irqctx = &per_cpu(hardirq_stack, cpu);
irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);

per_cpu(hardirq_ctx, cpu) = irqctx;
hardirq_ctx[cpu] = irqctx;

irqctx = &per_cpu(softirq_stack, cpu);
irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);

per_cpu(softirq_ctx, cpu) = irqctx;
softirq_ctx[cpu] = irqctx;

printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
}

void irq_ctx_exit(int cpu)
{
per_cpu(hardirq_ctx, cpu) = NULL;
hardirq_ctx[cpu] = NULL;
}

asmlinkage void do_softirq(void)
Expand All @@ -170,7 +169,7 @@ asmlinkage void do_softirq(void)

if (local_softirq_pending()) {
curctx = current_thread_info();
irqctx = __get_cpu_var(softirq_ctx);
irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
irqctx->tinfo.previous_esp = current_stack_pointer;

Expand Down
Loading

0 comments on commit 6425fea

Please sign in to comment.