From 46755c05c51aba8a2cc3cf7340b0010b672ec8db Mon Sep 17 00:00:00 2001 From: Sergio Luis Date: Tue, 28 Apr 2009 00:27:00 +0200 Subject: [PATCH] --- yaml --- r: 148667 b: refs/heads/master c: f9ebbe53e79c5978d0e8ead0843a3717b41ad3d5 h: refs/heads/master i: 148665: 26cb8f2d7afa7b59d02f6d2774602d4d2afd93c2 148663: 6b0d02cd03f7324e7f7410a40cea8ad0f21bb711 v: v3 --- [refs] | 2 +- trunk/arch/x86/power/cpu_32.c | 48 +++++++++++++++++++++++++++++++++++ trunk/arch/x86/power/cpu_64.c | 29 ++++++++++++++++++++- 3 files changed, 77 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index ffb0d563184a..4b62e0738962 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 833b2ca0795526898a66c7b6770273bb16567e19 +refs/heads/master: f9ebbe53e79c5978d0e8ead0843a3717b41ad3d5 diff --git a/trunk/arch/x86/power/cpu_32.c b/trunk/arch/x86/power/cpu_32.c index de1a86b2cffa..294e78baff75 100644 --- a/trunk/arch/x86/power/cpu_32.c +++ b/trunk/arch/x86/power/cpu_32.c @@ -32,25 +32,65 @@ static void fix_processor_context(void); struct saved_context saved_context; #endif +/** + * __save_processor_state - save CPU registers before creating a + * hibernation image and before restoring the memory state from it + * @ctxt - structure to store the registers contents in + * + * NOTE: If there is a CPU register the modification of which by the + * boot kernel (ie. the kernel used for loading the hibernation image) + * might affect the operations of the restored target kernel (ie. the one + * saved in the hibernation image), then its contents must be saved by this + * function. In other words, if kernel A is hibernated and different + * kernel B is used for loading the hibernation image into memory, the + * kernel A's __save_processor_state() function must save all registers + * needed by kernel A, so that it can operate correctly after the resume + * regardless of what kernel B does in the meantime. + */ static void __save_processor_state(struct saved_context *ctxt) { +#ifdef CONFIG_X86_32 mtrr_save_fixed_ranges(NULL); +#endif kernel_fpu_begin(); /* * descriptor tables */ +#ifdef CONFIG_X86_32 store_gdt(&ctxt->gdt); store_idt(&ctxt->idt); +#else +/* CONFIG_X86_64 */ + store_gdt((struct desc_ptr *)&ctxt->gdt_limit); + store_idt((struct desc_ptr *)&ctxt->idt_limit); +#endif store_tr(ctxt->tr); + /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* * segment registers */ +#ifdef CONFIG_X86_32 savesegment(es, ctxt->es); savesegment(fs, ctxt->fs); savesegment(gs, ctxt->gs); savesegment(ss, ctxt->ss); +#else +/* CONFIG_X86_64 */ + asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); + asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); + asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); + asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); + asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); + + rdmsrl(MSR_FS_BASE, ctxt->fs_base); + rdmsrl(MSR_GS_BASE, ctxt->gs_base); + rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); + mtrr_save_fixed_ranges(NULL); + + rdmsrl(MSR_EFER, ctxt->efer); +#endif /* * control registers @@ -58,7 +98,13 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr0 = read_cr0(); ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); +#ifdef CONFIG_X86_32 ctxt->cr4 = read_cr4_safe(); +#else +/* CONFIG_X86_64 */ + ctxt->cr4 = read_cr4(); + ctxt->cr8 = read_cr8(); +#endif } /* Needed by apm.c */ @@ -66,7 +112,9 @@ void save_processor_state(void) { __save_processor_state(&saved_context); } +#ifdef CONFIG_X86_32 EXPORT_SYMBOL(save_processor_state); +#endif static void do_fpu_end(void) { diff --git a/trunk/arch/x86/power/cpu_64.c b/trunk/arch/x86/power/cpu_64.c index 6ce0eca847c3..11ea7d0ba5d9 100644 --- a/trunk/arch/x86/power/cpu_64.c +++ b/trunk/arch/x86/power/cpu_64.c @@ -50,19 +50,35 @@ struct saved_context saved_context; */ static void __save_processor_state(struct saved_context *ctxt) { +#ifdef CONFIG_X86_32 + mtrr_save_fixed_ranges(NULL); +#endif kernel_fpu_begin(); /* * descriptor tables */ +#ifdef CONFIG_X86_32 + store_gdt(&ctxt->gdt); + store_idt(&ctxt->idt); +#else +/* CONFIG_X86_64 */ store_gdt((struct desc_ptr *)&ctxt->gdt_limit); store_idt((struct desc_ptr *)&ctxt->idt_limit); +#endif store_tr(ctxt->tr); /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* * segment registers */ +#ifdef CONFIG_X86_32 + savesegment(es, ctxt->es); + savesegment(fs, ctxt->fs); + savesegment(gs, ctxt->gs); + savesegment(ss, ctxt->ss); +#else +/* CONFIG_X86_64 */ asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); @@ -74,21 +90,32 @@ static void __save_processor_state(struct saved_context *ctxt) rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); mtrr_save_fixed_ranges(NULL); + rdmsrl(MSR_EFER, ctxt->efer); +#endif + /* * control registers */ - rdmsrl(MSR_EFER, ctxt->efer); ctxt->cr0 = read_cr0(); ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); +#ifdef CONFIG_X86_32 + ctxt->cr4 = read_cr4_safe(); +#else +/* CONFIG_X86_64 */ ctxt->cr4 = read_cr4(); ctxt->cr8 = read_cr8(); +#endif } +/* Needed by apm.c */ void save_processor_state(void) { __save_processor_state(&saved_context); } +#ifdef CONFIG_X86_32 +EXPORT_SYMBOL(save_processor_state); +#endif static void do_fpu_end(void) {