From 6bae499a0ad437efb67b7c378e6fb4abef1885a1 Mon Sep 17 00:00:00 2001 From: Zhimin Gu Date: Fri, 21 Sep 2018 14:28:22 +0800 Subject: x86-32, hibernate: Switch to relocated restore code during resume on 32bit system On 64bit system, code should be executed in a safe page during page restoring, as the page where instruction is running during resume might be scribbled and causes issues. Although on 32 bit, we only suspend resuming by same kernel that did the suspend, we'd like to remove that restriction in the future. Porting corresponding code from 64bit system: Allocate a safe page, and copy the restore code to it, then jump to the safe page to run the code. Signed-off-by: Zhimin Gu Acked-by: Pavel Machek Signed-off-by: Chen Yu Acked-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki --- arch/x86/power/hibernate.c | 2 -- arch/x86/power/hibernate_32.c | 4 ++++ arch/x86/power/hibernate_asm_32.S | 7 +++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index 4935b8139229..7383cb67ffd7 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -212,7 +212,6 @@ int arch_hibernation_header_restore(void *addr) return 0; } -#ifdef CONFIG_X86_64 int relocate_restore_code(void) { pgd_t *pgd; @@ -251,4 +250,3 @@ out: __flush_tlb_all(); return 0; } -#endif diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index a44bdada4e4e..a9861095fbb8 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -158,6 +158,10 @@ asmlinkage int swsusp_arch_resume(void) temp_pgt = __pa(resume_pg_dir); + error = relocate_restore_code(); + if (error) + return error; + /* We have got enough memory and from now on we cannot recover */ restore_image(); return 0; diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 6b2b94937113..e9adda6b6b02 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -39,6 +39,13 @@ ENTRY(restore_image) movl restore_cr3, %ebp movl mmu_cr4_features, %ecx + + /* jump to relocated restore code */ + movl relocated_restore_code, %eax + jmpl *%eax + +/* code below has been relocated to a safe page */ +ENTRY(core_restore_code) movl temp_pgt, %eax movl %eax, %cr3 -- cgit v1.2.3