Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 79798
b: refs/heads/master
c: 6abcd98
h: refs/heads/master
v: v3
  • Loading branch information
Glauber de Oliveira Costa authored and Ingo Molnar committed Jan 30, 2008
1 parent f51c846 commit 53bdb32
Show file tree
Hide file tree
Showing 10 changed files with 262 additions and 390 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 416b72182ac3f3f4931ed17d0256b1d805d1b553
refs/heads/master: 6abcd98ffafbff81f0bfd7ee1d129e634af13245
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/asm-offsets_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ void foo(void)
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
#endif

Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
* for paravirtualization. The following will never clobber any registers:
* INTERRUPT_RETURN (aka. "iret")
* GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
* ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
* ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
*
* For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
* specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
Expand Down Expand Up @@ -351,7 +351,7 @@ sysenter_past_esp:
xorl %ebp,%ebp
TRACE_IRQS_ON
1: mov PT_FS(%esp), %fs
ENABLE_INTERRUPTS_SYSEXIT
ENABLE_INTERRUPTS_SYSCALL_RET
CFI_ENDPROC
.pushsection .fixup,"ax"
2: movl $0,PT_FS(%esp)
Expand Down Expand Up @@ -882,10 +882,10 @@ ENTRY(native_iret)
.previous
END(native_iret)

ENTRY(native_irq_enable_sysexit)
ENTRY(native_irq_enable_syscall_ret)
sti
sysexit
END(native_irq_enable_sysexit)
END(native_irq_enable_syscall_ret)
#endif

KPROBE_ENTRY(int3)
Expand Down
10 changes: 5 additions & 5 deletions trunk/arch/x86/kernel/paravirt_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
DEF_NATIVE(pv_cpu_ops, iret, "iret");
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit");
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
Expand Down Expand Up @@ -88,7 +88,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
SITE(pv_irq_ops, restore_fl);
SITE(pv_irq_ops, save_fl);
SITE(pv_cpu_ops, iret);
SITE(pv_cpu_ops, irq_enable_sysexit);
SITE(pv_cpu_ops, irq_enable_syscall_ret);
SITE(pv_mmu_ops, read_cr2);
SITE(pv_mmu_ops, read_cr3);
SITE(pv_mmu_ops, write_cr3);
Expand Down Expand Up @@ -186,7 +186,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
/* If the operation is a nop, then nop the callsite */
ret = paravirt_patch_nop();
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
else
Expand Down Expand Up @@ -237,7 +237,7 @@ static void native_flush_tlb_single(unsigned long addr)

/* These are in entry.S */
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
extern void native_irq_enable_syscall_ret(void);

static int __init print_banner(void)
{
Expand Down Expand Up @@ -384,7 +384,7 @@ struct pv_cpu_ops pv_cpu_ops = {
.write_idt_entry = write_dt_entry,
.load_esp0 = native_load_esp0,

.irq_enable_sysexit = native_irq_enable_sysexit,
.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
.iret = native_iret,

.set_iopl_mask = native_set_iopl_mask,
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/kernel/vmi_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
insns, eip);
case PARAVIRT_PATCH(pv_cpu_ops.iret):
return patch_internal(VMI_CALL_IRET, len, insns, eip);
case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
default:
break;
Expand Down Expand Up @@ -870,7 +870,7 @@ static inline int __init activate_vmi(void)
* the backend. They are performance critical anyway, so requiring
* a patch is not a big problem.
*/
pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
pv_cpu_ops.iret = (void *)0xbadbab0;

#ifdef CONFIG_SMP
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -953,7 +953,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
.read_pmc = native_read_pmc,

.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
.irq_enable_sysexit = NULL, /* never called */
.irq_enable_syscall_ret = NULL, /* never called */

.load_tr_desc = paravirt_nop,
.set_ldt = xen_set_ldt,
Expand Down
246 changes: 243 additions & 3 deletions trunk/include/asm-x86/irqflags.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,245 @@
#ifdef CONFIG_X86_32
# include "irqflags_32.h"
#ifndef _X86_IRQFLAGS_H_
#define _X86_IRQFLAGS_H_

#include <asm/processor-flags.h>

#ifndef __ASSEMBLY__
/*
* Interrupt control:
*/

static inline unsigned long native_save_fl(void)
{
unsigned long flags;

__asm__ __volatile__(
"# __raw_save_flags\n\t"
"pushf ; pop %0"
: "=g" (flags)
: /* no input */
: "memory"
);

return flags;
}

static inline void native_restore_fl(unsigned long flags)
{
__asm__ __volatile__(
"push %0 ; popf"
: /* no output */
:"g" (flags)
:"memory", "cc"
);
}

static inline void native_irq_disable(void)
{
asm volatile("cli": : :"memory");
}

static inline void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
}

static inline void native_safe_halt(void)
{
asm volatile("sti; hlt": : :"memory");
}

static inline void native_halt(void)
{
asm volatile("hlt": : :"memory");
}

#endif

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#ifndef __ASSEMBLY__

static inline unsigned long __raw_local_save_flags(void)
{
return native_save_fl();
}

static inline void raw_local_irq_restore(unsigned long flags)
{
native_restore_fl(flags);
}

static inline void raw_local_irq_disable(void)
{
native_irq_disable();
}

static inline void raw_local_irq_enable(void)
{
native_irq_enable();
}

/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static inline void raw_safe_halt(void)
{
native_safe_halt();
}

/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static inline void halt(void)
{
native_halt();
}

/*
* For spinlocks, etc:
*/
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = __raw_local_save_flags();

raw_local_irq_disable();

return flags;
}
#else

#define ENABLE_INTERRUPTS(x) sti
#define DISABLE_INTERRUPTS(x) cli

#ifdef CONFIG_X86_64
#define INTERRUPT_RETURN iretq
#define ENABLE_INTERRUPTS_SYSCALL_RET \
movq %gs:pda_oldrsp, %rsp; \
swapgs; \
sysretq;
#else
#define INTERRUPT_RETURN iret
#define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit
#define GET_CR0_INTO_EAX movl %cr0, %eax
#endif


#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */

#ifndef __ASSEMBLY__
#define raw_local_save_flags(flags) \
do { (flags) = __raw_local_save_flags(); } while (0)

#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)

static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}

static inline int raw_irqs_disabled(void)
{
unsigned long flags = __raw_local_save_flags();

return raw_irqs_disabled_flags(flags);
}

/*
* makes the traced hardirq state match with the machine state
*
* should be a rarely used function, only in places where its
* otherwise impossible to know the irq state, like in traps.
*/
static inline void trace_hardirqs_fixup_flags(unsigned long flags)
{
if (raw_irqs_disabled_flags(flags))
trace_hardirqs_off();
else
trace_hardirqs_on();
}

static inline void trace_hardirqs_fixup(void)
{
unsigned long flags = __raw_local_save_flags();

trace_hardirqs_fixup_flags(flags);
}

#else
# include "irqflags_64.h"

#ifdef CONFIG_X86_64
/*
* Currently paravirt can't handle swapgs nicely when we
* don't have a stack we can rely on (such as a user space
* stack). So we either find a way around these or just fault
* and emulate if a guest tries to call swapgs directly.
*
* Either way, this is a good way to document that we don't
* have a reliable stack. x86_64 only.
*/
#define SWAPGS_UNSAFE_STACK swapgs
#define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
#define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \
sti; \
SAVE_REST; \
LOCKDEP_SYS_EXIT; \
RESTORE_REST; \
cli; \
TRACE_IRQS_OFF;

#else
#define ARCH_TRACE_IRQS_ON \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_on; \
popl %edx; \
popl %ecx; \
popl %eax;

#define ARCH_TRACE_IRQS_OFF \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_off; \
popl %edx; \
popl %ecx; \
popl %eax;

#define ARCH_LOCKDEP_SYS_EXIT \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call lockdep_sys_exit; \
popl %edx; \
popl %ecx; \
popl %eax;

#define ARCH_LOCKDEP_SYS_EXIT_IRQ
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON
# define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
# else
# define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ
# endif

#endif /* __ASSEMBLY__ */
#endif
Loading

0 comments on commit 53bdb32

Please sign in to comment.