Skip to content

Commit

Permalink
x86/paravirt: finish change from lazy cpu to context switch start/end
Browse files Browse the repository at this point in the history
Impact: fix lazy context switch API

Pass the previous and next tasks into the context switch start
end calls, so that the called functions can properly access the
task state (esp in end_context_switch, in which the next task
is not yet completely current).

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
  • Loading branch information
Jeremy Fitzhardinge committed Mar 30, 2009
1 parent b407fc5 commit 224101e
Show file tree
Hide file tree
Showing 11 changed files with 37 additions and 36 deletions.
17 changes: 10 additions & 7 deletions arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ struct desc_ptr;
struct tss_struct;
struct mm_struct;
struct desc_struct;
struct task_struct;

/*
* Wrapper type for pointers to code which uses the non-standard
Expand Down Expand Up @@ -203,7 +204,8 @@ struct pv_cpu_ops {

void (*swapgs)(void);

struct pv_lazy_ops lazy_mode;
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
};

struct pv_irq_ops {
Expand Down Expand Up @@ -1414,20 +1416,21 @@ enum paravirt_lazy_mode {
};

enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_enter_lazy_cpu(void);
void paravirt_leave_lazy_cpu(void);
void paravirt_start_context_switch(struct task_struct *prev);
void paravirt_end_context_switch(struct task_struct *next);

void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);

#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(void)
static inline void arch_start_context_switch(struct task_struct *prev)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
}

static inline void arch_end_context_switch(void)
static inline void arch_end_context_switch(struct task_struct *next)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
}

#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)

#define arch_end_context_switch(prev) do {} while(0)

#endif /* CONFIG_PARAVIRT */

/*
Expand Down
14 changes: 6 additions & 8 deletions arch/x86/kernel/paravirt.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,20 +270,20 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}

void paravirt_enter_lazy_cpu(void)
void paravirt_start_context_switch(struct task_struct *prev)
{
if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_thread_flag(TIF_LAZY_MMU_UPDATES);
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(PARAVIRT_LAZY_CPU);
}

void paravirt_leave_lazy_cpu(void)
void paravirt_end_context_switch(struct task_struct *next)
{
leave_lazy(PARAVIRT_LAZY_CPU);

if (test_and_clear_thread_flag(TIF_LAZY_MMU_UPDATES))
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}

Expand Down Expand Up @@ -399,10 +399,8 @@ struct pv_cpu_ops pv_cpu_ops = {
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,

.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
},
.start_context_switch = paravirt_nop,
.end_context_switch = paravirt_nop,
};

struct pv_apic_ops pv_apic_ops = {
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch();
arch_end_context_switch(next_p);

/* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch();
arch_end_context_switch(next_p);

/*
* Switch FS and GS.
Expand Down
12 changes: 6 additions & 6 deletions arch/x86/kernel/vmi_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -467,16 +467,16 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
}
#endif

static void vmi_enter_lazy_cpu(void)
static void vmi_start_context_switch(struct task_struct *prev)
{
paravirt_enter_lazy_cpu();
paravirt_start_context_switch(prev);
vmi_ops.set_lazy_mode(2);
}

static void vmi_leave_lazy_cpu(void)
static void vmi_end_context_switch(struct task_struct *next)
{
vmi_ops.set_lazy_mode(0);
paravirt_leave_lazy_cpu();
paravirt_end_context_switch(next);
}

static void vmi_enter_lazy_mmu(void)
Expand Down Expand Up @@ -722,9 +722,9 @@ static inline int __init activate_vmi(void)
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
para_fill(pv_cpu_ops.io_delay, IODelay);

para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
set_lazy_mode, SetLazyMode);
para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy_cpu,
para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
set_lazy_mode, SetLazyMode);

para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/lguest/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,10 @@ static void lguest_leave_lazy_mmu_mode(void)
paravirt_leave_lazy_mmu();
}

static void lguest_leave_lazy_cpu_mode(void)
static void lguest_end_context_switch(struct task_struct *next)
{
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
paravirt_leave_lazy_cpu();
paravirt_end_context_switch(next);
}

/*G:033
Expand Down Expand Up @@ -1031,8 +1031,8 @@ __init void lguest_init(void)
pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
pv_cpu_ops.wbinvd = lguest_wbinvd;
pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_cpu_mode;
pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
pv_cpu_ops.end_context_switch = lguest_end_context_switch;

/* pagetable management */
pv_mmu_ops.write_cr3 = lguest_write_cr3;
Expand Down
10 changes: 4 additions & 6 deletions arch/x86/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,10 @@ static unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg);
}

static void xen_leave_lazy_cpu(void)
static void xen_end_context_switch(struct task_struct *next)
{
xen_mc_flush();
paravirt_leave_lazy_cpu();
paravirt_end_context_switch(next);
}

static unsigned long xen_store_tr(void)
Expand Down Expand Up @@ -817,10 +817,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
/* Xen takes care of %gs when switching to usermode for us */
.swapgs = paravirt_nop,

.lazy_mode = {
.enter = paravirt_enter_lazy_cpu,
.leave = xen_leave_lazy_cpu,
},
.start_context_switch = paravirt_start_context_switch,
.end_context_switch = xen_end_context_switch,
};

static const struct pv_apic_ops xen_apic_ops __initdata = {
Expand Down
2 changes: 1 addition & 1 deletion include/asm-frv/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ static inline int pte_file(pte_t pte) { return 0; }
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)

#define arch_start_context_switch() do {} while (0)
#define arch_start_context_switch(prev) do {} while (0)

#else /* !CONFIG_MMU */
/*****************************************************************************/
Expand Down
2 changes: 1 addition & 1 deletion include/asm-generic/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
* definition.
*/
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
#define arch_start_context_switch() do {} while (0)
#define arch_start_context_switch(prev) do {} while (0)
#endif

#ifndef __HAVE_PFNMAP_TRACKING
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2746,7 +2746,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch();
arch_start_context_switch(prev);

if (unlikely(!mm)) {
next->active_mm = oldmm;
Expand Down

0 comments on commit 224101e

Please sign in to comment.