Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 136481
b: refs/heads/master
c: 3eb3963
h: refs/heads/master
i:
  136479: 6172f32
v: v3
  • Loading branch information
Ingo Molnar committed Jan 21, 2009
1 parent 027225a commit 03e159a
Show file tree
Hide file tree
Showing 107 changed files with 1,287 additions and 1,578 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5766b842b23c6b40935a5f3bd435b2bcdaff2143
refs/heads/master: 3eb3963fd1974c63e9bb3cfe774abc3e1995e479
1 change: 1 addition & 0 deletions trunk/arch/arm/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ SECTIONS
#endif
. = ALIGN(4096);
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/ia64/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ SECTIONS
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
Expand Down
23 changes: 10 additions & 13 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1340,29 +1340,26 @@ config SECCOMP

If unsure, say Y. Only embedded should say N here.

config CC_STACKPROTECTOR_ALL
bool

config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
depends on X86_64 && EXPERIMENTAL && BROKEN
depends on X86_64
select CC_STACKPROTECTOR_ALL
help
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of critical functions, a canary
value on the stack just before the return address, and validates
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
the stack just before the return address, and validates
the value just before actually returning. Stack based buffer
overflows (that need to overwrite this return address) now also
overwrite the canary, which gets detected and the attack is then
neutralized via a kernel panic.

This feature requires gcc version 4.2 or above, or a distribution
gcc with the feature backported. Older versions are automatically
detected and for those versions, this configuration option is ignored.

config CC_STACKPROTECTOR_ALL
bool "Use stack-protector for all functions"
depends on CC_STACKPROTECTOR
help
Normally, GCC only inserts the canary value protection for
functions that use large-ish on-stack buffers. By enabling
this option, GCC will be asked to do this for ALL functions.
detected and for those versions, this configuration option is
ignored. (and a warning is printed during bootup)

source kernel/Kconfig.hz

Expand Down
1 change: 1 addition & 0 deletions trunk/arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ config DEBUG_RODATA
config DEBUG_RODATA_TEST
bool "Testcase for the DEBUG_RODATA feature"
depends on DEBUG_RODATA
default y
help
This option enables a testcase for the DEBUG_RODATA
feature as well as for the change_page_attr() infrastructure.
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ else

stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
"$(CC)" -fstack-protector )
"$(CC)" "-fstack-protector -DGCC_HAS_SP" )
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
"$(CC)" -fstack-protector-all )

Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/x86/ia32/ia32entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
SWAPGS_UNSAFE_STACK
movq %gs:pda_kernelstack, %rsp
addq $(PDA_STACKOFFSET),%rsp
movq PER_CPU_VAR(kernel_stack), %rsp
addq $(KERNEL_STACK_OFFSET),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs, here we enable it straight after entry:
Expand Down Expand Up @@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq %gs:pda_kernelstack,%rsp
movq PER_CPU_VAR(kernel_stack),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
Expand Down
14 changes: 10 additions & 4 deletions trunk/arch/x86/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@

/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/

#ifndef _LINUX_BITOPS_H
Expand Down Expand Up @@ -53,7 +56,8 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
static __always_inline void
set_bit(unsigned int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
Expand Down Expand Up @@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
static __always_inline void
clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
Expand Down Expand Up @@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
*
* This is the same as test_and_set_bit on x86.
*/
static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
static __always_inline int
test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
Expand Down Expand Up @@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return oldbit;
}

static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
Expand Down
21 changes: 21 additions & 0 deletions trunk/arch/x86/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>

#ifdef CONFIG_SMP

extern void prefill_possible_map(void);

#else /* CONFIG_SMP */

static inline void prefill_possible_map(void) {}

#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0

#endif /* CONFIG_SMP */

struct x86_cpu {
struct cpu cpu;
};
Expand All @@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
#endif

DECLARE_PER_CPU(int, cpu_state);

#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif

#endif /* _ASM_X86_CPU_H */
28 changes: 28 additions & 0 deletions trunk/arch/x86/include/asm/cpumask.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>

#ifdef CONFIG_X86_64

extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;

#else /* CONFIG_X86_32 */

extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;

#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)

#endif /* CONFIG_X86_32 */

#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */
24 changes: 3 additions & 21 deletions trunk/arch/x86/include/asm/current.h
Original file line number Diff line number Diff line change
@@ -1,39 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H

#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>

#ifndef __ASSEMBLY__
struct task_struct;

DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}

#else /* X86_32 */

#ifndef __ASSEMBLY__
#include <asm/pda.h>

struct task_struct;

static __always_inline struct task_struct *get_current(void)
{
return read_pda(pcurrent);
return percpu_read(current_task);
}

#else /* __ASSEMBLY__ */

#include <asm/asm-offsets.h>
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
#define current get_current()

#endif /* __ASSEMBLY__ */

#endif /* X86_32 */

#define current get_current()

#endif /* _ASM_X86_CURRENT_H */
7 changes: 0 additions & 7 deletions trunk/arch/x86/include/asm/genapic_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,11 +138,4 @@ struct genapic {
extern struct genapic *genapic;
extern void es7000_update_genapic_to_cluster(void);

enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
#define uv_wakeup_secondary(a, b) 1
#define uv_system_init() do {} while (0)


#endif /* _ASM_X86_GENAPIC_32_H */
6 changes: 0 additions & 6 deletions trunk/arch/x86/include/asm/genapic_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,9 @@ extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);

extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);

extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void uv_cpu_init(void);
extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);

extern void setup_apic_routing(void);

Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/x86/include/asm/hardirq_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ typedef struct {

DECLARE_PER_CPU(irq_cpustat_t, irq_stat);

/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS

#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)

Expand Down
24 changes: 19 additions & 5 deletions trunk/arch/x86/include/asm/hardirq_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,36 @@

#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/pda.h>
#include <asm/apic.h>

typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq0_irqs;
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_spurious_count;
unsigned int irq_threshold_count;
} ____cacheline_aligned irq_cpustat_t;

DECLARE_PER_CPU(irq_cpustat_t, irq_stat);

/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS

#define __ARCH_IRQ_STAT 1

#define inc_irq_stat(member) add_pda(member, 1)
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)

#define local_softirq_pending() read_pda(__softirq_pending)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)

#define __ARCH_SET_SOFTIRQ_PENDING 1

#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))

extern void ack_bad_irq(unsigned int irq);

Expand Down
26 changes: 2 additions & 24 deletions trunk/arch/x86/include/asm/io_apic.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];

/*
* MP-BIOS irq configuration table structures:
*/

#define MP_MAX_IOAPIC_PIN 127

struct mp_config_ioapic {
unsigned long mp_apicaddr;
unsigned int mp_apicid;
unsigned char mp_type;
unsigned char mp_apicver;
unsigned char mp_flags;
};

struct mp_config_intsrc {
unsigned int mp_dstapic;
unsigned char mp_type;
unsigned char mp_irqtype;
unsigned short mp_irqflag;
unsigned char mp_srcbus;
unsigned char mp_srcbusirq;
unsigned char mp_dstirq;
};

/* I/O APIC entries */
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];

/* # of MP IRQ source entries */
extern int mp_irq_entries;

/* MP IRQ source entries */
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];

/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
Expand Down
Loading

0 comments on commit 03e159a

Please sign in to comment.