GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / x86 / power / hibernate_64.c
1 /*
2  * Hibernation support for x86-64
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9  */
10
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/cpu.h>
15
16 #include <asm/init.h>
17 #include <asm/proto.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mtrr.h>
21 #include <asm/sections.h>
22 #include <asm/suspend.h>
23 #include <asm/tlbflush.h>
24
25 /* Defined in hibernate_asm_64.S */
26 extern asmlinkage __visible int restore_image(void);
27
28 /*
29  * Address to jump to in the last phase of restore in order to get to the image
30  * kernel's text (this value is passed in the image header).
31  */
32 unsigned long restore_jump_address __visible;
33 unsigned long jump_address_phys;
34
35 /*
36  * Value of the cr3 register from before the hibernation (this value is passed
37  * in the image header).
38  */
39 unsigned long restore_cr3 __visible;
40
41 unsigned long temp_level4_pgt __visible;
42
43 unsigned long relocated_restore_code __visible;
44
45 static int set_up_temporary_text_mapping(pgd_t *pgd)
46 {
47         pmd_t *pmd;
48         pud_t *pud;
49
50         /*
51          * The new mapping only has to cover the page containing the image
52          * kernel's entry point (jump_address_phys), because the switch over to
53          * it is carried out by relocated code running from a page allocated
54          * specifically for this purpose and covered by the identity mapping, so
55          * the temporary kernel text mapping is only needed for the final jump.
56          * Moreover, in that mapping the virtual address of the image kernel's
57          * entry point must be the same as its virtual address in the image
58          * kernel (restore_jump_address), so the image kernel's
59          * restore_registers() code doesn't find itself in a different area of
60          * the virtual address space after switching over to the original page
61          * tables used by the image kernel.
62          */
63         pud = (pud_t *)get_safe_page(GFP_ATOMIC);
64         if (!pud)
65                 return -ENOMEM;
66
67         pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
68         if (!pmd)
69                 return -ENOMEM;
70
71         set_pmd(pmd + pmd_index(restore_jump_address),
72                 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
73         set_pud(pud + pud_index(restore_jump_address),
74                 __pud(__pa(pmd) | _KERNPG_TABLE));
75         set_pgd(pgd + pgd_index(restore_jump_address),
76                 __pgd(__pa(pud) | _KERNPG_TABLE));
77
78         return 0;
79 }
80
81 static void *alloc_pgt_page(void *context)
82 {
83         return (void *)get_safe_page(GFP_ATOMIC);
84 }
85
86 static int set_up_temporary_mappings(void)
87 {
88         struct x86_mapping_info info = {
89                 .alloc_pgt_page = alloc_pgt_page,
90                 .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
91                 .offset         = __PAGE_OFFSET,
92         };
93         unsigned long mstart, mend;
94         pgd_t *pgd;
95         int result;
96         int i;
97
98         pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
99         if (!pgd)
100                 return -ENOMEM;
101
102         /* Prepare a temporary mapping for the kernel text */
103         result = set_up_temporary_text_mapping(pgd);
104         if (result)
105                 return result;
106
107         /* Set up the direct mapping from scratch */
108         for (i = 0; i < nr_pfn_mapped; i++) {
109                 mstart = pfn_mapped[i].start << PAGE_SHIFT;
110                 mend   = pfn_mapped[i].end << PAGE_SHIFT;
111
112                 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
113                 if (result)
114                         return result;
115         }
116
117         temp_level4_pgt = __pa(pgd);
118         return 0;
119 }
120
121 static int relocate_restore_code(void)
122 {
123         pgd_t *pgd;
124         pud_t *pud;
125
126         relocated_restore_code = get_safe_page(GFP_ATOMIC);
127         if (!relocated_restore_code)
128                 return -ENOMEM;
129
130         memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
131
132         /* Make the page containing the relocated code executable */
133         pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
134         pud = pud_offset(pgd, relocated_restore_code);
135         if (pud_large(*pud)) {
136                 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
137         } else {
138                 pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
139
140                 if (pmd_large(*pmd)) {
141                         set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
142                 } else {
143                         pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
144
145                         set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
146                 }
147         }
148         __flush_tlb_all();
149
150         return 0;
151 }
152
153 asmlinkage int swsusp_arch_resume(void)
154 {
155         int error;
156
157         /* We have got enough memory and from now on we cannot recover */
158         error = set_up_temporary_mappings();
159         if (error)
160                 return error;
161
162         error = relocate_restore_code();
163         if (error)
164                 return error;
165
166         restore_image();
167         return 0;
168 }
169
170 /*
171  *      pfn_is_nosave - check if given pfn is in the 'nosave' section
172  */
173
174 int pfn_is_nosave(unsigned long pfn)
175 {
176         unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
177         unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
178         return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
179 }
180
181 struct restore_data_record {
182         unsigned long jump_address;
183         unsigned long jump_address_phys;
184         unsigned long cr3;
185         unsigned long magic;
186 };
187
188 #define RESTORE_MAGIC   0x123456789ABCDEF0UL
189
190 /**
191  *      arch_hibernation_header_save - populate the architecture specific part
192  *              of a hibernation image header
193  *      @addr: address to save the data at
194  */
195 int arch_hibernation_header_save(void *addr, unsigned int max_size)
196 {
197         struct restore_data_record *rdr = addr;
198
199         if (max_size < sizeof(struct restore_data_record))
200                 return -EOVERFLOW;
201         rdr->jump_address = (unsigned long)restore_registers;
202         rdr->jump_address_phys = __pa_symbol(restore_registers);
203         rdr->cr3 = restore_cr3;
204         rdr->magic = RESTORE_MAGIC;
205         return 0;
206 }
207
208 /**
209  *      arch_hibernation_header_restore - read the architecture specific data
210  *              from the hibernation image header
211  *      @addr: address to read the data from
212  */
213 int arch_hibernation_header_restore(void *addr)
214 {
215         struct restore_data_record *rdr = addr;
216
217         restore_jump_address = rdr->jump_address;
218         jump_address_phys = rdr->jump_address_phys;
219         restore_cr3 = rdr->cr3;
220         return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
221 }
222
223 int arch_resume_nosmt(void)
224 {
225         int ret = 0;
226         /*
227          * We reached this while coming out of hibernation. This means
228          * that SMT siblings are sleeping in hlt, as mwait is not safe
229          * against control transition during resume (see comment in
230          * hibernate_resume_nonboot_cpu_disable()).
231          *
232          * If the resumed kernel has SMT disabled, we have to take all the
233          * SMT siblings out of hlt, and offline them again so that they
234          * end up in mwait proper.
235          *
236          * Called with hotplug disabled.
237          */
238         cpu_hotplug_enable();
239         if (cpu_smt_control == CPU_SMT_DISABLED ||
240                         cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
241                 enum cpuhp_smt_control old = cpu_smt_control;
242
243                 ret = cpuhp_smt_enable();
244                 if (ret)
245                         goto out;
246                 ret = cpuhp_smt_disable(old);
247                 if (ret)
248                         goto out;
249         }
250 out:
251         cpu_hotplug_disable();
252         return ret;
253 }