2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/cpu.h>
17 #include <asm/proto.h>
19 #include <asm/pgtable.h>
21 #include <asm/sections.h>
22 #include <asm/suspend.h>
23 #include <asm/tlbflush.h>
25 /* Defined in hibernate_asm_64.S */
26 extern asmlinkage __visible int restore_image(void);
29 * Address to jump to in the last phase of restore in order to get to the image
30 * kernel's text (this value is passed in the image header).
32 unsigned long restore_jump_address __visible;
33 unsigned long jump_address_phys;
36 * Value of the cr3 register from before the hibernation (this value is passed
37 * in the image header).
39 unsigned long restore_cr3 __visible;
41 unsigned long temp_level4_pgt __visible;
43 unsigned long relocated_restore_code __visible;
45 static int set_up_temporary_text_mapping(pgd_t *pgd)
51 * The new mapping only has to cover the page containing the image
52 * kernel's entry point (jump_address_phys), because the switch over to
53 * it is carried out by relocated code running from a page allocated
54 * specifically for this purpose and covered by the identity mapping, so
55 * the temporary kernel text mapping is only needed for the final jump.
56 * Moreover, in that mapping the virtual address of the image kernel's
57 * entry point must be the same as its virtual address in the image
58 * kernel (restore_jump_address), so the image kernel's
59 * restore_registers() code doesn't find itself in a different area of
60 * the virtual address space after switching over to the original page
61 * tables used by the image kernel.
63 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
67 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
71 set_pmd(pmd + pmd_index(restore_jump_address),
72 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
73 set_pud(pud + pud_index(restore_jump_address),
74 __pud(__pa(pmd) | _KERNPG_TABLE));
75 set_pgd(pgd + pgd_index(restore_jump_address),
76 __pgd(__pa(pud) | _KERNPG_TABLE));
81 static void *alloc_pgt_page(void *context)
83 return (void *)get_safe_page(GFP_ATOMIC);
86 static int set_up_temporary_mappings(void)
88 struct x86_mapping_info info = {
89 .alloc_pgt_page = alloc_pgt_page,
90 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
91 .offset = __PAGE_OFFSET,
93 unsigned long mstart, mend;
98 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
102 /* Prepare a temporary mapping for the kernel text */
103 result = set_up_temporary_text_mapping(pgd);
107 /* Set up the direct mapping from scratch */
108 for (i = 0; i < nr_pfn_mapped; i++) {
109 mstart = pfn_mapped[i].start << PAGE_SHIFT;
110 mend = pfn_mapped[i].end << PAGE_SHIFT;
112 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
117 temp_level4_pgt = __pa(pgd);
121 static int relocate_restore_code(void)
126 relocated_restore_code = get_safe_page(GFP_ATOMIC);
127 if (!relocated_restore_code)
130 memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
132 /* Make the page containing the relocated code executable */
133 pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
134 pud = pud_offset(pgd, relocated_restore_code);
135 if (pud_large(*pud)) {
136 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
138 pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
140 if (pmd_large(*pmd)) {
141 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
143 pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
145 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
153 asmlinkage int swsusp_arch_resume(void)
157 /* We have got enough memory and from now on we cannot recover */
158 error = set_up_temporary_mappings();
162 error = relocate_restore_code();
171 * pfn_is_nosave - check if given pfn is in the 'nosave' section
174 int pfn_is_nosave(unsigned long pfn)
176 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
177 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
178 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
181 struct restore_data_record {
182 unsigned long jump_address;
183 unsigned long jump_address_phys;
188 #define RESTORE_MAGIC 0x123456789ABCDEF0UL
191 * arch_hibernation_header_save - populate the architecture specific part
192 * of a hibernation image header
193 * @addr: address to save the data at
195 int arch_hibernation_header_save(void *addr, unsigned int max_size)
197 struct restore_data_record *rdr = addr;
199 if (max_size < sizeof(struct restore_data_record))
201 rdr->jump_address = (unsigned long)restore_registers;
202 rdr->jump_address_phys = __pa_symbol(restore_registers);
203 rdr->cr3 = restore_cr3;
204 rdr->magic = RESTORE_MAGIC;
209 * arch_hibernation_header_restore - read the architecture specific data
210 * from the hibernation image header
211 * @addr: address to read the data from
213 int arch_hibernation_header_restore(void *addr)
215 struct restore_data_record *rdr = addr;
217 restore_jump_address = rdr->jump_address;
218 jump_address_phys = rdr->jump_address_phys;
219 restore_cr3 = rdr->cr3;
220 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
223 int arch_resume_nosmt(void)
227 * We reached this while coming out of hibernation. This means
228 * that SMT siblings are sleeping in hlt, as mwait is not safe
229 * against control transition during resume (see comment in
230 * hibernate_resume_nonboot_cpu_disable()).
232 * If the resumed kernel has SMT disabled, we have to take all the
233 * SMT siblings out of hlt, and offline them again so that they
234 * end up in mwait proper.
236 * Called with hotplug disabled.
238 cpu_hotplug_enable();
239 if (cpu_smt_control == CPU_SMT_DISABLED ||
240 cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
241 enum cpuhp_smt_control old = cpu_smt_control;
243 ret = cpuhp_smt_enable();
246 ret = cpuhp_smt_disable(old);
251 cpu_hotplug_disable();