GNU Linux-libre 4.19.286-gnu1
[releases.git] / arch / s390 / mm / vmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2006
4  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5  */
6
7 #include <linux/bootmem.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/memblock.h>
15 #include <asm/cacheflush.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/setup.h>
19 #include <asm/tlbflush.h>
20 #include <asm/sections.h>
21 #include <asm/set_memory.h>
22
23 static DEFINE_MUTEX(vmem_mutex);
24
25 struct memory_segment {
26         struct list_head list;
27         unsigned long start;
28         unsigned long size;
29 };
30
31 static LIST_HEAD(mem_segs);
32
33 static void __ref *vmem_alloc_pages(unsigned int order)
34 {
35         unsigned long size = PAGE_SIZE << order;
36
37         if (slab_is_available())
38                 return (void *)__get_free_pages(GFP_KERNEL, order);
39         return (void *) memblock_alloc(size, size);
40 }
41
42 void *vmem_crst_alloc(unsigned long val)
43 {
44         unsigned long *table;
45
46         table = vmem_alloc_pages(CRST_ALLOC_ORDER);
47         if (table)
48                 crst_table_init(table, val);
49         return table;
50 }
51
52 pte_t __ref *vmem_pte_alloc(void)
53 {
54         unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
55         pte_t *pte;
56
57         if (slab_is_available())
58                 pte = (pte_t *) page_table_alloc(&init_mm);
59         else
60                 pte = (pte_t *) memblock_alloc(size, size);
61         if (!pte)
62                 return NULL;
63         memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
64         return pte;
65 }
66
67 /*
68  * Add a physical memory range to the 1:1 mapping.
69  */
70 static int vmem_add_mem(unsigned long start, unsigned long size)
71 {
72         unsigned long pgt_prot, sgt_prot, r3_prot;
73         unsigned long pages4k, pages1m, pages2g;
74         unsigned long end = start + size;
75         unsigned long address = start;
76         pgd_t *pg_dir;
77         p4d_t *p4_dir;
78         pud_t *pu_dir;
79         pmd_t *pm_dir;
80         pte_t *pt_dir;
81         int ret = -ENOMEM;
82
83         pgt_prot = pgprot_val(PAGE_KERNEL);
84         sgt_prot = pgprot_val(SEGMENT_KERNEL);
85         r3_prot = pgprot_val(REGION3_KERNEL);
86         if (!MACHINE_HAS_NX) {
87                 pgt_prot &= ~_PAGE_NOEXEC;
88                 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
89                 r3_prot &= ~_REGION_ENTRY_NOEXEC;
90         }
91         pages4k = pages1m = pages2g = 0;
92         while (address < end) {
93                 pg_dir = pgd_offset_k(address);
94                 if (pgd_none(*pg_dir)) {
95                         p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
96                         if (!p4_dir)
97                                 goto out;
98                         pgd_populate(&init_mm, pg_dir, p4_dir);
99                 }
100                 p4_dir = p4d_offset(pg_dir, address);
101                 if (p4d_none(*p4_dir)) {
102                         pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
103                         if (!pu_dir)
104                                 goto out;
105                         p4d_populate(&init_mm, p4_dir, pu_dir);
106                 }
107                 pu_dir = pud_offset(p4_dir, address);
108                 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
109                     !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
110                      !debug_pagealloc_enabled()) {
111                         pud_val(*pu_dir) = address | r3_prot;
112                         address += PUD_SIZE;
113                         pages2g++;
114                         continue;
115                 }
116                 if (pud_none(*pu_dir)) {
117                         pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
118                         if (!pm_dir)
119                                 goto out;
120                         pud_populate(&init_mm, pu_dir, pm_dir);
121                 }
122                 pm_dir = pmd_offset(pu_dir, address);
123                 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
124                     !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
125                     !debug_pagealloc_enabled()) {
126                         pmd_val(*pm_dir) = address | sgt_prot;
127                         address += PMD_SIZE;
128                         pages1m++;
129                         continue;
130                 }
131                 if (pmd_none(*pm_dir)) {
132                         pt_dir = vmem_pte_alloc();
133                         if (!pt_dir)
134                                 goto out;
135                         pmd_populate(&init_mm, pm_dir, pt_dir);
136                 }
137
138                 pt_dir = pte_offset_kernel(pm_dir, address);
139                 pte_val(*pt_dir) = address | pgt_prot;
140                 address += PAGE_SIZE;
141                 pages4k++;
142         }
143         ret = 0;
144 out:
145         update_page_count(PG_DIRECT_MAP_4K, pages4k);
146         update_page_count(PG_DIRECT_MAP_1M, pages1m);
147         update_page_count(PG_DIRECT_MAP_2G, pages2g);
148         return ret;
149 }
150
151 /*
152  * Remove a physical memory range from the 1:1 mapping.
153  * Currently only invalidates page table entries.
154  */
155 static void vmem_remove_range(unsigned long start, unsigned long size)
156 {
157         unsigned long pages4k, pages1m, pages2g;
158         unsigned long end = start + size;
159         unsigned long address = start;
160         pgd_t *pg_dir;
161         p4d_t *p4_dir;
162         pud_t *pu_dir;
163         pmd_t *pm_dir;
164         pte_t *pt_dir;
165
166         pages4k = pages1m = pages2g = 0;
167         while (address < end) {
168                 pg_dir = pgd_offset_k(address);
169                 if (pgd_none(*pg_dir)) {
170                         address += PGDIR_SIZE;
171                         continue;
172                 }
173                 p4_dir = p4d_offset(pg_dir, address);
174                 if (p4d_none(*p4_dir)) {
175                         address += P4D_SIZE;
176                         continue;
177                 }
178                 pu_dir = pud_offset(p4_dir, address);
179                 if (pud_none(*pu_dir)) {
180                         address += PUD_SIZE;
181                         continue;
182                 }
183                 if (pud_large(*pu_dir)) {
184                         pud_clear(pu_dir);
185                         address += PUD_SIZE;
186                         pages2g++;
187                         continue;
188                 }
189                 pm_dir = pmd_offset(pu_dir, address);
190                 if (pmd_none(*pm_dir)) {
191                         address += PMD_SIZE;
192                         continue;
193                 }
194                 if (pmd_large(*pm_dir)) {
195                         pmd_clear(pm_dir);
196                         address += PMD_SIZE;
197                         pages1m++;
198                         continue;
199                 }
200                 pt_dir = pte_offset_kernel(pm_dir, address);
201                 pte_clear(&init_mm, address, pt_dir);
202                 address += PAGE_SIZE;
203                 pages4k++;
204         }
205         flush_tlb_kernel_range(start, end);
206         update_page_count(PG_DIRECT_MAP_4K, -pages4k);
207         update_page_count(PG_DIRECT_MAP_1M, -pages1m);
208         update_page_count(PG_DIRECT_MAP_2G, -pages2g);
209 }
210
211 /*
212  * Add a backed mem_map array to the virtual mem_map array.
213  */
214 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
215                 struct vmem_altmap *altmap)
216 {
217         unsigned long pgt_prot, sgt_prot;
218         unsigned long address = start;
219         pgd_t *pg_dir;
220         p4d_t *p4_dir;
221         pud_t *pu_dir;
222         pmd_t *pm_dir;
223         pte_t *pt_dir;
224         int ret = -ENOMEM;
225
226         pgt_prot = pgprot_val(PAGE_KERNEL);
227         sgt_prot = pgprot_val(SEGMENT_KERNEL);
228         if (!MACHINE_HAS_NX) {
229                 pgt_prot &= ~_PAGE_NOEXEC;
230                 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
231         }
232         for (address = start; address < end;) {
233                 pg_dir = pgd_offset_k(address);
234                 if (pgd_none(*pg_dir)) {
235                         p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
236                         if (!p4_dir)
237                                 goto out;
238                         pgd_populate(&init_mm, pg_dir, p4_dir);
239                 }
240
241                 p4_dir = p4d_offset(pg_dir, address);
242                 if (p4d_none(*p4_dir)) {
243                         pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
244                         if (!pu_dir)
245                                 goto out;
246                         p4d_populate(&init_mm, p4_dir, pu_dir);
247                 }
248
249                 pu_dir = pud_offset(p4_dir, address);
250                 if (pud_none(*pu_dir)) {
251                         pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
252                         if (!pm_dir)
253                                 goto out;
254                         pud_populate(&init_mm, pu_dir, pm_dir);
255                 }
256
257                 pm_dir = pmd_offset(pu_dir, address);
258                 if (pmd_none(*pm_dir)) {
259                         /* Use 1MB frames for vmemmap if available. We always
260                          * use large frames even if they are only partially
261                          * used.
262                          * Otherwise we would have also page tables since
263                          * vmemmap_populate gets called for each section
264                          * separately. */
265                         if (MACHINE_HAS_EDAT1) {
266                                 void *new_page;
267
268                                 new_page = vmemmap_alloc_block(PMD_SIZE, node);
269                                 if (!new_page)
270                                         goto out;
271                                 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
272                                 address = (address + PMD_SIZE) & PMD_MASK;
273                                 continue;
274                         }
275                         pt_dir = vmem_pte_alloc();
276                         if (!pt_dir)
277                                 goto out;
278                         pmd_populate(&init_mm, pm_dir, pt_dir);
279                 } else if (pmd_large(*pm_dir)) {
280                         address = (address + PMD_SIZE) & PMD_MASK;
281                         continue;
282                 }
283
284                 pt_dir = pte_offset_kernel(pm_dir, address);
285                 if (pte_none(*pt_dir)) {
286                         void *new_page;
287
288                         new_page = vmemmap_alloc_block(PAGE_SIZE, node);
289                         if (!new_page)
290                                 goto out;
291                         pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
292                 }
293                 address += PAGE_SIZE;
294         }
295         ret = 0;
296 out:
297         return ret;
298 }
299
300 void vmemmap_free(unsigned long start, unsigned long end,
301                 struct vmem_altmap *altmap)
302 {
303 }
304
305 /*
306  * Add memory segment to the segment list if it doesn't overlap with
307  * an already present segment.
308  */
309 static int insert_memory_segment(struct memory_segment *seg)
310 {
311         struct memory_segment *tmp;
312
313         if (seg->start + seg->size > VMEM_MAX_PHYS ||
314             seg->start + seg->size < seg->start)
315                 return -ERANGE;
316
317         list_for_each_entry(tmp, &mem_segs, list) {
318                 if (seg->start >= tmp->start + tmp->size)
319                         continue;
320                 if (seg->start + seg->size <= tmp->start)
321                         continue;
322                 return -ENOSPC;
323         }
324         list_add(&seg->list, &mem_segs);
325         return 0;
326 }
327
328 /*
329  * Remove memory segment from the segment list.
330  */
331 static void remove_memory_segment(struct memory_segment *seg)
332 {
333         list_del(&seg->list);
334 }
335
336 static void __remove_shared_memory(struct memory_segment *seg)
337 {
338         remove_memory_segment(seg);
339         vmem_remove_range(seg->start, seg->size);
340 }
341
342 int vmem_remove_mapping(unsigned long start, unsigned long size)
343 {
344         struct memory_segment *seg;
345         int ret;
346
347         mutex_lock(&vmem_mutex);
348
349         ret = -ENOENT;
350         list_for_each_entry(seg, &mem_segs, list) {
351                 if (seg->start == start && seg->size == size)
352                         break;
353         }
354
355         if (seg->start != start || seg->size != size)
356                 goto out;
357
358         ret = 0;
359         __remove_shared_memory(seg);
360         kfree(seg);
361 out:
362         mutex_unlock(&vmem_mutex);
363         return ret;
364 }
365
366 int vmem_add_mapping(unsigned long start, unsigned long size)
367 {
368         struct memory_segment *seg;
369         int ret;
370
371         mutex_lock(&vmem_mutex);
372         ret = -ENOMEM;
373         seg = kzalloc(sizeof(*seg), GFP_KERNEL);
374         if (!seg)
375                 goto out;
376         seg->start = start;
377         seg->size = size;
378
379         ret = insert_memory_segment(seg);
380         if (ret)
381                 goto out_free;
382
383         ret = vmem_add_mem(start, size);
384         if (ret)
385                 goto out_remove;
386         goto out;
387
388 out_remove:
389         __remove_shared_memory(seg);
390 out_free:
391         kfree(seg);
392 out:
393         mutex_unlock(&vmem_mutex);
394         return ret;
395 }
396
397 /*
398  * map whole physical memory to virtual memory (identity mapping)
399  * we reserve enough space in the vmalloc area for vmemmap to hotplug
400  * additional memory segments.
401  */
402 void __init vmem_map_init(void)
403 {
404         struct memblock_region *reg;
405
406         for_each_memblock(memory, reg)
407                 vmem_add_mem(reg->base, reg->size);
408         __set_memory((unsigned long)_stext,
409                      (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
410                      SET_MEMORY_RO | SET_MEMORY_X);
411         __set_memory((unsigned long)_etext,
412                      (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
413                      SET_MEMORY_RO);
414         __set_memory((unsigned long)_sinittext,
415                      (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
416                      SET_MEMORY_RO | SET_MEMORY_X);
417         pr_info("Write protected kernel read-only data: %luk\n",
418                 (unsigned long)(__end_rodata - _stext) >> 10);
419 }
420
421 /*
422  * Convert memblock.memory  to a memory segment list so there is a single
423  * list that contains all memory segments.
424  */
425 static int __init vmem_convert_memory_chunk(void)
426 {
427         struct memblock_region *reg;
428         struct memory_segment *seg;
429
430         mutex_lock(&vmem_mutex);
431         for_each_memblock(memory, reg) {
432                 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
433                 if (!seg)
434                         panic("Out of memory...\n");
435                 seg->start = reg->base;
436                 seg->size = reg->size;
437                 insert_memory_segment(seg);
438         }
439         mutex_unlock(&vmem_mutex);
440         return 0;
441 }
442
443 core_initcall(vmem_convert_memory_chunk);