Skip to content

Commit

Permalink
RISC-V: Add arch functions to support hibernation/suspend-to-disk
Browse files Browse the repository at this point in the history
Low level Arch functions were created to support hibernation.
swsusp_arch_suspend() relies code from __cpu_suspend_enter() to write
cpu state onto the stack, then calling swsusp_save() to save the memory
image.

Arch specific hibernation header is implemented and is utilized by the
arch_hibernation_header_restore() and arch_hibernation_header_save()
functions. The arch specific hibernation header consists of satp, hartid,
and the cpu_resume address. The kernel built version is also need to be
saved into the hibernation image header to making sure only the same
kernel is restore when resume.

swsusp_arch_resume() creates a temporary page table that covering only
the linear map. It copies the restore code to a 'safe' page, then start
to restore the memory image. Once completed, it restores the original
kernel's page table. It then calls into __hibernate_cpu_resume()
to restore the CPU context. Finally, it follows the normal hibernation
path back to the hibernation core.

To enable hibernation/suspend to disk into RISCV, the below config
need to be enabled:
- CONFIG_HIBERNATION
- CONFIG_ARCH_HIBERNATION_HEADER
- CONFIG_ARCH_HIBERNATION_POSSIBLE

Signed-off-by: Sia Jee Heng <jeeheng.sia@starfivetech.com>
Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com>
Reviewed-by: Mason Huo <mason.huo@starfivetech.com>
Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20230330064321.1008373-5-jeeheng.sia@starfivetech.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  • Loading branch information
Sia Jee Heng authored and Palmer Dabbelt committed Apr 29, 2023
1 parent a15c90b commit c031721
Show file tree
Hide file tree
Showing 7 changed files with 556 additions and 1 deletion.
8 changes: 7 additions & 1 deletion arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ config RISCV
select CLINT_TIMER if !MMU
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE
select CPU_PM if CPU_IDLE || HIBERNATION
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
Expand Down Expand Up @@ -707,6 +707,12 @@ menu "Power management options"

source "kernel/power/Kconfig"

config ARCH_HIBERNATION_POSSIBLE
def_bool y

config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION

endmenu # "Power management options"

menu "CPU Power Management"
Expand Down
20 changes: 20 additions & 0 deletions arch/riscv/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,24 @@
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
.endm

/*
* copy_page - copy 1 page (4KB) of data from source to destination
* @a0 - destination
* @a1 - source
*/
.macro copy_page a0, a1
lui a2, 0x1
add a2, a2, a0
1 :
REG_L t0, 0(a1)
REG_L t1, SZREG(a1)

REG_S t0, 0(a0)
REG_S t1, SZREG(a0)

addi a0, a0, 2 * SZREG
addi a1, a1, 2 * SZREG
bne a2, a0, 1b
.endm

#endif /* __ASM_ASSEMBLER_H */
19 changes: 19 additions & 0 deletions arch/riscv/include/asm/suspend.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ struct suspend_context {
#endif
};

/*
* Used by hibernation core and cleared during resume sequence
*/
extern int in_suspend;

/* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context);

Expand All @@ -36,4 +41,18 @@ int __cpu_resume_enter(unsigned long hartid, unsigned long context);
/* Used to save and restore the CSRs */
void suspend_save_csrs(struct suspend_context *context);
void suspend_restore_csrs(struct suspend_context *context);

/* Low-level API to support hibernation */
int swsusp_arch_suspend(void);
int swsusp_arch_resume(void);
int arch_hibernation_header_save(void *addr, unsigned int max_size);
int arch_hibernation_header_restore(void *addr);
int __hibernate_cpu_resume(void);

/* Used to resume on the CPU we hibernated on */
int hibernate_resume_nonboot_cpu_disable(void);

asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
#endif
1 change: 1 addition & 0 deletions arch/riscv/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o

obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o

obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
Expand Down
5 changes: 5 additions & 0 deletions arch/riscv/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <linux/kbuild.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <asm/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
Expand Down Expand Up @@ -116,6 +117,10 @@ void asm_offsets(void)

OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);

OFFSET(HIBERN_PBE_ADDR, pbe, address);
OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
OFFSET(HIBERN_PBE_NEXT, pbe, next);

OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
Expand Down
77 changes: 77 additions & 0 deletions arch/riscv/kernel/hibernate-asm.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hibernation low level support for RISCV.
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/

#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/csr.h>

#include <linux/linkage.h>

/*
* int __hibernate_cpu_resume(void)
* Switch back to the hibernated image's page table prior to restoring the CPU
* context.
*
* Always returns 0
*/
ENTRY(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma

REG_L a0, hibernate_cpu_context

suspend_restore_csrs
suspend_restore_regs

/* Return zero value. */
mv a0, zero

ret
END(__hibernate_cpu_resume)

/*
* Prepare to restore the image.
* a0: satp of saved page tables.
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
ENTRY(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
REG_L s4, restore_pblist
REG_L a1, relocated_restore_code

jalr a1
END(hibernate_restore_image)

/*
* The below code will be executed from a 'safe' page.
* It first switches to the temporary page table, then starts to copy the pages
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
ENTRY(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
.Lcopy:
/* The below code will restore the hibernated image. */
REG_L a1, HIBERN_PBE_ADDR(s4)
REG_L a0, HIBERN_PBE_ORIG(s4)

copy_page a0, a1

REG_L s4, HIBERN_PBE_NEXT(s4)
bnez s4, .Lcopy

jalr s2
END(hibernate_core_restore_code)
Loading

0 comments on commit c031721

Please sign in to comment.