GNU Linux-libre 4.19.286-gnu1
[releases.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/mmdebug.h>
22 #include <linux/sched/signal.h>
23 #include <linux/rmap.h>
24 #include <linux/string_helpers.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/jhash.h>
28
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/tlb.h>
32
33 #include <linux/io.h>
34 #include <linux/hugetlb.h>
35 #include <linux/hugetlb_cgroup.h>
36 #include <linux/node.h>
37 #include <linux/userfaultfd_k.h>
38 #include <linux/page_owner.h>
39 #include "internal.h"
40
41 int hugetlb_max_hstate __read_mostly;
42 unsigned int default_hstate_idx;
43 struct hstate hstates[HUGE_MAX_HSTATE];
44 /*
45  * Minimum page order among possible hugepage sizes, set to a proper value
46  * at boot time.
47  */
48 static unsigned int minimum_order __read_mostly = UINT_MAX;
49
50 __initdata LIST_HEAD(huge_boot_pages);
51
52 /* for command line parsing */
53 static struct hstate * __initdata parsed_hstate;
54 static unsigned long __initdata default_hstate_max_huge_pages;
55 static unsigned long __initdata default_hstate_size;
56 static bool __initdata parsed_valid_hugepagesz = true;
57
58 /*
59  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
60  * free_huge_pages, and surplus_huge_pages.
61  */
62 DEFINE_SPINLOCK(hugetlb_lock);
63
64 /*
65  * Serializes faults on the same logical page.  This is used to
66  * prevent spurious OOMs when the hugepage pool is fully utilized.
67  */
68 static int num_fault_mutexes;
69 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
70
71 static inline bool PageHugeFreed(struct page *head)
72 {
73         return page_private(head + 4) == -1UL;
74 }
75
76 static inline void SetPageHugeFreed(struct page *head)
77 {
78         set_page_private(head + 4, -1UL);
79 }
80
81 static inline void ClearPageHugeFreed(struct page *head)
82 {
83         set_page_private(head + 4, 0);
84 }
85
86 /* Forward declaration */
87 static int hugetlb_acct_memory(struct hstate *h, long delta);
88
89 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
90 {
91         bool free = (spool->count == 0) && (spool->used_hpages == 0);
92
93         spin_unlock(&spool->lock);
94
95         /* If no pages are used, and no other handles to the subpool
96          * remain, give up any reservations mased on minimum size and
97          * free the subpool */
98         if (free) {
99                 if (spool->min_hpages != -1)
100                         hugetlb_acct_memory(spool->hstate,
101                                                 -spool->min_hpages);
102                 kfree(spool);
103         }
104 }
105
106 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
107                                                 long min_hpages)
108 {
109         struct hugepage_subpool *spool;
110
111         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
112         if (!spool)
113                 return NULL;
114
115         spin_lock_init(&spool->lock);
116         spool->count = 1;
117         spool->max_hpages = max_hpages;
118         spool->hstate = h;
119         spool->min_hpages = min_hpages;
120
121         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
122                 kfree(spool);
123                 return NULL;
124         }
125         spool->rsv_hpages = min_hpages;
126
127         return spool;
128 }
129
130 void hugepage_put_subpool(struct hugepage_subpool *spool)
131 {
132         spin_lock(&spool->lock);
133         BUG_ON(!spool->count);
134         spool->count--;
135         unlock_or_release_subpool(spool);
136 }
137
138 /*
139  * Subpool accounting for allocating and reserving pages.
140  * Return -ENOMEM if there are not enough resources to satisfy the
141  * the request.  Otherwise, return the number of pages by which the
142  * global pools must be adjusted (upward).  The returned value may
143  * only be different than the passed value (delta) in the case where
144  * a subpool minimum size must be manitained.
145  */
146 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
147                                       long delta)
148 {
149         long ret = delta;
150
151         if (!spool)
152                 return ret;
153
154         spin_lock(&spool->lock);
155
156         if (spool->max_hpages != -1) {          /* maximum size accounting */
157                 if ((spool->used_hpages + delta) <= spool->max_hpages)
158                         spool->used_hpages += delta;
159                 else {
160                         ret = -ENOMEM;
161                         goto unlock_ret;
162                 }
163         }
164
165         /* minimum size accounting */
166         if (spool->min_hpages != -1 && spool->rsv_hpages) {
167                 if (delta > spool->rsv_hpages) {
168                         /*
169                          * Asking for more reserves than those already taken on
170                          * behalf of subpool.  Return difference.
171                          */
172                         ret = delta - spool->rsv_hpages;
173                         spool->rsv_hpages = 0;
174                 } else {
175                         ret = 0;        /* reserves already accounted for */
176                         spool->rsv_hpages -= delta;
177                 }
178         }
179
180 unlock_ret:
181         spin_unlock(&spool->lock);
182         return ret;
183 }
184
185 /*
186  * Subpool accounting for freeing and unreserving pages.
187  * Return the number of global page reservations that must be dropped.
188  * The return value may only be different than the passed value (delta)
189  * in the case where a subpool minimum size must be maintained.
190  */
191 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
192                                        long delta)
193 {
194         long ret = delta;
195
196         if (!spool)
197                 return delta;
198
199         spin_lock(&spool->lock);
200
201         if (spool->max_hpages != -1)            /* maximum size accounting */
202                 spool->used_hpages -= delta;
203
204          /* minimum size accounting */
205         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
206                 if (spool->rsv_hpages + delta <= spool->min_hpages)
207                         ret = 0;
208                 else
209                         ret = spool->rsv_hpages + delta - spool->min_hpages;
210
211                 spool->rsv_hpages += delta;
212                 if (spool->rsv_hpages > spool->min_hpages)
213                         spool->rsv_hpages = spool->min_hpages;
214         }
215
216         /*
217          * If hugetlbfs_put_super couldn't free spool due to an outstanding
218          * quota reference, free it now.
219          */
220         unlock_or_release_subpool(spool);
221
222         return ret;
223 }
224
225 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
226 {
227         return HUGETLBFS_SB(inode->i_sb)->spool;
228 }
229
230 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
231 {
232         return subpool_inode(file_inode(vma->vm_file));
233 }
234
235 /*
236  * Region tracking -- allows tracking of reservations and instantiated pages
237  *                    across the pages in a mapping.
238  *
239  * The region data structures are embedded into a resv_map and protected
240  * by a resv_map's lock.  The set of regions within the resv_map represent
241  * reservations for huge pages, or huge pages that have already been
242  * instantiated within the map.  The from and to elements are huge page
243  * indicies into the associated mapping.  from indicates the starting index
244  * of the region.  to represents the first index past the end of  the region.
245  *
246  * For example, a file region structure with from == 0 and to == 4 represents
247  * four huge pages in a mapping.  It is important to note that the to element
248  * represents the first element past the end of the region. This is used in
249  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
250  *
251  * Interval notation of the form [from, to) will be used to indicate that
252  * the endpoint from is inclusive and to is exclusive.
253  */
254 struct file_region {
255         struct list_head link;
256         long from;
257         long to;
258 };
259
260 /*
261  * Add the huge page range represented by [f, t) to the reserve
262  * map.  In the normal case, existing regions will be expanded
263  * to accommodate the specified range.  Sufficient regions should
264  * exist for expansion due to the previous call to region_chg
265  * with the same range.  However, it is possible that region_del
266  * could have been called after region_chg and modifed the map
267  * in such a way that no region exists to be expanded.  In this
268  * case, pull a region descriptor from the cache associated with
269  * the map and use that for the new range.
270  *
271  * Return the number of new huge pages added to the map.  This
272  * number is greater than or equal to zero.
273  */
274 static long region_add(struct resv_map *resv, long f, long t)
275 {
276         struct list_head *head = &resv->regions;
277         struct file_region *rg, *nrg, *trg;
278         long add = 0;
279
280         spin_lock(&resv->lock);
281         /* Locate the region we are either in or before. */
282         list_for_each_entry(rg, head, link)
283                 if (f <= rg->to)
284                         break;
285
286         /*
287          * If no region exists which can be expanded to include the
288          * specified range, the list must have been modified by an
289          * interleving call to region_del().  Pull a region descriptor
290          * from the cache and use it for this range.
291          */
292         if (&rg->link == head || t < rg->from) {
293                 VM_BUG_ON(resv->region_cache_count <= 0);
294
295                 resv->region_cache_count--;
296                 nrg = list_first_entry(&resv->region_cache, struct file_region,
297                                         link);
298                 list_del(&nrg->link);
299
300                 nrg->from = f;
301                 nrg->to = t;
302                 list_add(&nrg->link, rg->link.prev);
303
304                 add += t - f;
305                 goto out_locked;
306         }
307
308         /* Round our left edge to the current segment if it encloses us. */
309         if (f > rg->from)
310                 f = rg->from;
311
312         /* Check for and consume any regions we now overlap with. */
313         nrg = rg;
314         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
315                 if (&rg->link == head)
316                         break;
317                 if (rg->from > t)
318                         break;
319
320                 /* If this area reaches higher then extend our area to
321                  * include it completely.  If this is not the first area
322                  * which we intend to reuse, free it. */
323                 if (rg->to > t)
324                         t = rg->to;
325                 if (rg != nrg) {
326                         /* Decrement return value by the deleted range.
327                          * Another range will span this area so that by
328                          * end of routine add will be >= zero
329                          */
330                         add -= (rg->to - rg->from);
331                         list_del(&rg->link);
332                         kfree(rg);
333                 }
334         }
335
336         add += (nrg->from - f);         /* Added to beginning of region */
337         nrg->from = f;
338         add += t - nrg->to;             /* Added to end of region */
339         nrg->to = t;
340
341 out_locked:
342         resv->adds_in_progress--;
343         spin_unlock(&resv->lock);
344         VM_BUG_ON(add < 0);
345         return add;
346 }
347
348 /*
349  * Examine the existing reserve map and determine how many
350  * huge pages in the specified range [f, t) are NOT currently
351  * represented.  This routine is called before a subsequent
352  * call to region_add that will actually modify the reserve
353  * map to add the specified range [f, t).  region_chg does
354  * not change the number of huge pages represented by the
355  * map.  However, if the existing regions in the map can not
356  * be expanded to represent the new range, a new file_region
357  * structure is added to the map as a placeholder.  This is
358  * so that the subsequent region_add call will have all the
359  * regions it needs and will not fail.
360  *
361  * Upon entry, region_chg will also examine the cache of region descriptors
362  * associated with the map.  If there are not enough descriptors cached, one
363  * will be allocated for the in progress add operation.
364  *
365  * Returns the number of huge pages that need to be added to the existing
366  * reservation map for the range [f, t).  This number is greater or equal to
367  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
368  * is needed and can not be allocated.
369  */
370 static long region_chg(struct resv_map *resv, long f, long t)
371 {
372         struct list_head *head = &resv->regions;
373         struct file_region *rg, *nrg = NULL;
374         long chg = 0;
375
376 retry:
377         spin_lock(&resv->lock);
378 retry_locked:
379         resv->adds_in_progress++;
380
381         /*
382          * Check for sufficient descriptors in the cache to accommodate
383          * the number of in progress add operations.
384          */
385         if (resv->adds_in_progress > resv->region_cache_count) {
386                 struct file_region *trg;
387
388                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
389                 /* Must drop lock to allocate a new descriptor. */
390                 resv->adds_in_progress--;
391                 spin_unlock(&resv->lock);
392
393                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
394                 if (!trg) {
395                         kfree(nrg);
396                         return -ENOMEM;
397                 }
398
399                 spin_lock(&resv->lock);
400                 list_add(&trg->link, &resv->region_cache);
401                 resv->region_cache_count++;
402                 goto retry_locked;
403         }
404
405         /* Locate the region we are before or in. */
406         list_for_each_entry(rg, head, link)
407                 if (f <= rg->to)
408                         break;
409
410         /* If we are below the current region then a new region is required.
411          * Subtle, allocate a new region at the position but make it zero
412          * size such that we can guarantee to record the reservation. */
413         if (&rg->link == head || t < rg->from) {
414                 if (!nrg) {
415                         resv->adds_in_progress--;
416                         spin_unlock(&resv->lock);
417                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
418                         if (!nrg)
419                                 return -ENOMEM;
420
421                         nrg->from = f;
422                         nrg->to   = f;
423                         INIT_LIST_HEAD(&nrg->link);
424                         goto retry;
425                 }
426
427                 list_add(&nrg->link, rg->link.prev);
428                 chg = t - f;
429                 goto out_nrg;
430         }
431
432         /* Round our left edge to the current segment if it encloses us. */
433         if (f > rg->from)
434                 f = rg->from;
435         chg = t - f;
436
437         /* Check for and consume any regions we now overlap with. */
438         list_for_each_entry(rg, rg->link.prev, link) {
439                 if (&rg->link == head)
440                         break;
441                 if (rg->from > t)
442                         goto out;
443
444                 /* We overlap with this area, if it extends further than
445                  * us then we must extend ourselves.  Account for its
446                  * existing reservation. */
447                 if (rg->to > t) {
448                         chg += rg->to - t;
449                         t = rg->to;
450                 }
451                 chg -= rg->to - rg->from;
452         }
453
454 out:
455         spin_unlock(&resv->lock);
456         /*  We already know we raced and no longer need the new region */
457         kfree(nrg);
458         return chg;
459 out_nrg:
460         spin_unlock(&resv->lock);
461         return chg;
462 }
463
464 /*
465  * Abort the in progress add operation.  The adds_in_progress field
466  * of the resv_map keeps track of the operations in progress between
467  * calls to region_chg and region_add.  Operations are sometimes
468  * aborted after the call to region_chg.  In such cases, region_abort
469  * is called to decrement the adds_in_progress counter.
470  *
471  * NOTE: The range arguments [f, t) are not needed or used in this
472  * routine.  They are kept to make reading the calling code easier as
473  * arguments will match the associated region_chg call.
474  */
475 static void region_abort(struct resv_map *resv, long f, long t)
476 {
477         spin_lock(&resv->lock);
478         VM_BUG_ON(!resv->region_cache_count);
479         resv->adds_in_progress--;
480         spin_unlock(&resv->lock);
481 }
482
483 /*
484  * Delete the specified range [f, t) from the reserve map.  If the
485  * t parameter is LONG_MAX, this indicates that ALL regions after f
486  * should be deleted.  Locate the regions which intersect [f, t)
487  * and either trim, delete or split the existing regions.
488  *
489  * Returns the number of huge pages deleted from the reserve map.
490  * In the normal case, the return value is zero or more.  In the
491  * case where a region must be split, a new region descriptor must
492  * be allocated.  If the allocation fails, -ENOMEM will be returned.
493  * NOTE: If the parameter t == LONG_MAX, then we will never split
494  * a region and possibly return -ENOMEM.  Callers specifying
495  * t == LONG_MAX do not need to check for -ENOMEM error.
496  */
497 static long region_del(struct resv_map *resv, long f, long t)
498 {
499         struct list_head *head = &resv->regions;
500         struct file_region *rg, *trg;
501         struct file_region *nrg = NULL;
502         long del = 0;
503
504 retry:
505         spin_lock(&resv->lock);
506         list_for_each_entry_safe(rg, trg, head, link) {
507                 /*
508                  * Skip regions before the range to be deleted.  file_region
509                  * ranges are normally of the form [from, to).  However, there
510                  * may be a "placeholder" entry in the map which is of the form
511                  * (from, to) with from == to.  Check for placeholder entries
512                  * at the beginning of the range to be deleted.
513                  */
514                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
515                         continue;
516
517                 if (rg->from >= t)
518                         break;
519
520                 if (f > rg->from && t < rg->to) { /* Must split region */
521                         /*
522                          * Check for an entry in the cache before dropping
523                          * lock and attempting allocation.
524                          */
525                         if (!nrg &&
526                             resv->region_cache_count > resv->adds_in_progress) {
527                                 nrg = list_first_entry(&resv->region_cache,
528                                                         struct file_region,
529                                                         link);
530                                 list_del(&nrg->link);
531                                 resv->region_cache_count--;
532                         }
533
534                         if (!nrg) {
535                                 spin_unlock(&resv->lock);
536                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
537                                 if (!nrg)
538                                         return -ENOMEM;
539                                 goto retry;
540                         }
541
542                         del += t - f;
543
544                         /* New entry for end of split region */
545                         nrg->from = t;
546                         nrg->to = rg->to;
547                         INIT_LIST_HEAD(&nrg->link);
548
549                         /* Original entry is trimmed */
550                         rg->to = f;
551
552                         list_add(&nrg->link, &rg->link);
553                         nrg = NULL;
554                         break;
555                 }
556
557                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
558                         del += rg->to - rg->from;
559                         list_del(&rg->link);
560                         kfree(rg);
561                         continue;
562                 }
563
564                 if (f <= rg->from) {    /* Trim beginning of region */
565                         del += t - rg->from;
566                         rg->from = t;
567                 } else {                /* Trim end of region */
568                         del += rg->to - f;
569                         rg->to = f;
570                 }
571         }
572
573         spin_unlock(&resv->lock);
574         kfree(nrg);
575         return del;
576 }
577
578 /*
579  * A rare out of memory error was encountered which prevented removal of
580  * the reserve map region for a page.  The huge page itself was free'ed
581  * and removed from the page cache.  This routine will adjust the subpool
582  * usage count, and the global reserve count if needed.  By incrementing
583  * these counts, the reserve map entry which could not be deleted will
584  * appear as a "reserved" entry instead of simply dangling with incorrect
585  * counts.
586  */
587 void hugetlb_fix_reserve_counts(struct inode *inode)
588 {
589         struct hugepage_subpool *spool = subpool_inode(inode);
590         long rsv_adjust;
591         bool reserved = false;
592
593         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
594         if (rsv_adjust > 0) {
595                 struct hstate *h = hstate_inode(inode);
596
597                 if (!hugetlb_acct_memory(h, 1))
598                         reserved = true;
599         } else if (!rsv_adjust) {
600                 reserved = true;
601         }
602
603         if (!reserved)
604                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
605 }
606
607 /*
608  * Count and return the number of huge pages in the reserve map
609  * that intersect with the range [f, t).
610  */
611 static long region_count(struct resv_map *resv, long f, long t)
612 {
613         struct list_head *head = &resv->regions;
614         struct file_region *rg;
615         long chg = 0;
616
617         spin_lock(&resv->lock);
618         /* Locate each segment we overlap with, and count that overlap. */
619         list_for_each_entry(rg, head, link) {
620                 long seg_from;
621                 long seg_to;
622
623                 if (rg->to <= f)
624                         continue;
625                 if (rg->from >= t)
626                         break;
627
628                 seg_from = max(rg->from, f);
629                 seg_to = min(rg->to, t);
630
631                 chg += seg_to - seg_from;
632         }
633         spin_unlock(&resv->lock);
634
635         return chg;
636 }
637
638 /*
639  * Convert the address within this vma to the page offset within
640  * the mapping, in pagecache page units; huge pages here.
641  */
642 static pgoff_t vma_hugecache_offset(struct hstate *h,
643                         struct vm_area_struct *vma, unsigned long address)
644 {
645         return ((address - vma->vm_start) >> huge_page_shift(h)) +
646                         (vma->vm_pgoff >> huge_page_order(h));
647 }
648
649 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
650                                      unsigned long address)
651 {
652         return vma_hugecache_offset(hstate_vma(vma), vma, address);
653 }
654 EXPORT_SYMBOL_GPL(linear_hugepage_index);
655
656 /*
657  * Return the size of the pages allocated when backing a VMA. In the majority
658  * cases this will be same size as used by the page table entries.
659  */
660 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
661 {
662         if (vma->vm_ops && vma->vm_ops->pagesize)
663                 return vma->vm_ops->pagesize(vma);
664         return PAGE_SIZE;
665 }
666 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
667
668 /*
669  * Return the page size being used by the MMU to back a VMA. In the majority
670  * of cases, the page size used by the kernel matches the MMU size. On
671  * architectures where it differs, an architecture-specific 'strong'
672  * version of this symbol is required.
673  */
674 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
675 {
676         return vma_kernel_pagesize(vma);
677 }
678
679 /*
680  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
681  * bits of the reservation map pointer, which are always clear due to
682  * alignment.
683  */
684 #define HPAGE_RESV_OWNER    (1UL << 0)
685 #define HPAGE_RESV_UNMAPPED (1UL << 1)
686 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
687
688 /*
689  * These helpers are used to track how many pages are reserved for
690  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
691  * is guaranteed to have their future faults succeed.
692  *
693  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
694  * the reserve counters are updated with the hugetlb_lock held. It is safe
695  * to reset the VMA at fork() time as it is not in use yet and there is no
696  * chance of the global counters getting corrupted as a result of the values.
697  *
698  * The private mapping reservation is represented in a subtly different
699  * manner to a shared mapping.  A shared mapping has a region map associated
700  * with the underlying file, this region map represents the backing file
701  * pages which have ever had a reservation assigned which this persists even
702  * after the page is instantiated.  A private mapping has a region map
703  * associated with the original mmap which is attached to all VMAs which
704  * reference it, this region map represents those offsets which have consumed
705  * reservation ie. where pages have been instantiated.
706  */
707 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
708 {
709         return (unsigned long)vma->vm_private_data;
710 }
711
712 static void set_vma_private_data(struct vm_area_struct *vma,
713                                                         unsigned long value)
714 {
715         vma->vm_private_data = (void *)value;
716 }
717
718 struct resv_map *resv_map_alloc(void)
719 {
720         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
721         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
722
723         if (!resv_map || !rg) {
724                 kfree(resv_map);
725                 kfree(rg);
726                 return NULL;
727         }
728
729         kref_init(&resv_map->refs);
730         spin_lock_init(&resv_map->lock);
731         INIT_LIST_HEAD(&resv_map->regions);
732
733         resv_map->adds_in_progress = 0;
734
735         INIT_LIST_HEAD(&resv_map->region_cache);
736         list_add(&rg->link, &resv_map->region_cache);
737         resv_map->region_cache_count = 1;
738
739         return resv_map;
740 }
741
742 void resv_map_release(struct kref *ref)
743 {
744         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
745         struct list_head *head = &resv_map->region_cache;
746         struct file_region *rg, *trg;
747
748         /* Clear out any active regions before we release the map. */
749         region_del(resv_map, 0, LONG_MAX);
750
751         /* ... and any entries left in the cache */
752         list_for_each_entry_safe(rg, trg, head, link) {
753                 list_del(&rg->link);
754                 kfree(rg);
755         }
756
757         VM_BUG_ON(resv_map->adds_in_progress);
758
759         kfree(resv_map);
760 }
761
762 static inline struct resv_map *inode_resv_map(struct inode *inode)
763 {
764         return inode->i_mapping->private_data;
765 }
766
767 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
768 {
769         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
770         if (vma->vm_flags & VM_MAYSHARE) {
771                 struct address_space *mapping = vma->vm_file->f_mapping;
772                 struct inode *inode = mapping->host;
773
774                 return inode_resv_map(inode);
775
776         } else {
777                 return (struct resv_map *)(get_vma_private_data(vma) &
778                                                         ~HPAGE_RESV_MASK);
779         }
780 }
781
782 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
783 {
784         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
786
787         set_vma_private_data(vma, (get_vma_private_data(vma) &
788                                 HPAGE_RESV_MASK) | (unsigned long)map);
789 }
790
791 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
792 {
793         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
794         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
795
796         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
797 }
798
799 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
800 {
801         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
802
803         return (get_vma_private_data(vma) & flag) != 0;
804 }
805
806 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
807 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
808 {
809         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
810         if (!(vma->vm_flags & VM_MAYSHARE))
811                 vma->vm_private_data = (void *)0;
812 }
813
814 /* Returns true if the VMA has associated reserve pages */
815 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
816 {
817         if (vma->vm_flags & VM_NORESERVE) {
818                 /*
819                  * This address is already reserved by other process(chg == 0),
820                  * so, we should decrement reserved count. Without decrementing,
821                  * reserve count remains after releasing inode, because this
822                  * allocated page will go into page cache and is regarded as
823                  * coming from reserved pool in releasing step.  Currently, we
824                  * don't have any other solution to deal with this situation
825                  * properly, so add work-around here.
826                  */
827                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
828                         return true;
829                 else
830                         return false;
831         }
832
833         /* Shared mappings always use reserves */
834         if (vma->vm_flags & VM_MAYSHARE) {
835                 /*
836                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
837                  * be a region map for all pages.  The only situation where
838                  * there is no region map is if a hole was punched via
839                  * fallocate.  In this case, there really are no reverves to
840                  * use.  This situation is indicated if chg != 0.
841                  */
842                 if (chg)
843                         return false;
844                 else
845                         return true;
846         }
847
848         /*
849          * Only the process that called mmap() has reserves for
850          * private mappings.
851          */
852         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
853                 /*
854                  * Like the shared case above, a hole punch or truncate
855                  * could have been performed on the private mapping.
856                  * Examine the value of chg to determine if reserves
857                  * actually exist or were previously consumed.
858                  * Very Subtle - The value of chg comes from a previous
859                  * call to vma_needs_reserves().  The reserve map for
860                  * private mappings has different (opposite) semantics
861                  * than that of shared mappings.  vma_needs_reserves()
862                  * has already taken this difference in semantics into
863                  * account.  Therefore, the meaning of chg is the same
864                  * as in the shared case above.  Code could easily be
865                  * combined, but keeping it separate draws attention to
866                  * subtle differences.
867                  */
868                 if (chg)
869                         return false;
870                 else
871                         return true;
872         }
873
874         return false;
875 }
876
877 static void enqueue_huge_page(struct hstate *h, struct page *page)
878 {
879         int nid = page_to_nid(page);
880         list_move(&page->lru, &h->hugepage_freelists[nid]);
881         h->free_huge_pages++;
882         h->free_huge_pages_node[nid]++;
883         SetPageHugeFreed(page);
884 }
885
886 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
887 {
888         struct page *page;
889
890         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
891                 if (!PageHWPoison(page))
892                         break;
893         /*
894          * if 'non-isolated free hugepage' not found on the list,
895          * the allocation fails.
896          */
897         if (&h->hugepage_freelists[nid] == &page->lru)
898                 return NULL;
899         list_move(&page->lru, &h->hugepage_activelist);
900         set_page_refcounted(page);
901         ClearPageHugeFreed(page);
902         h->free_huge_pages--;
903         h->free_huge_pages_node[nid]--;
904         return page;
905 }
906
907 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
908                 nodemask_t *nmask)
909 {
910         unsigned int cpuset_mems_cookie;
911         struct zonelist *zonelist;
912         struct zone *zone;
913         struct zoneref *z;
914         int node = -1;
915
916         zonelist = node_zonelist(nid, gfp_mask);
917
918 retry_cpuset:
919         cpuset_mems_cookie = read_mems_allowed_begin();
920         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
921                 struct page *page;
922
923                 if (!cpuset_zone_allowed(zone, gfp_mask))
924                         continue;
925                 /*
926                  * no need to ask again on the same node. Pool is node rather than
927                  * zone aware
928                  */
929                 if (zone_to_nid(zone) == node)
930                         continue;
931                 node = zone_to_nid(zone);
932
933                 page = dequeue_huge_page_node_exact(h, node);
934                 if (page)
935                         return page;
936         }
937         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
938                 goto retry_cpuset;
939
940         return NULL;
941 }
942
943 /* Movability of hugepages depends on migration support. */
944 static inline gfp_t htlb_alloc_mask(struct hstate *h)
945 {
946         if (hugepage_migration_supported(h))
947                 return GFP_HIGHUSER_MOVABLE;
948         else
949                 return GFP_HIGHUSER;
950 }
951
952 static struct page *dequeue_huge_page_vma(struct hstate *h,
953                                 struct vm_area_struct *vma,
954                                 unsigned long address, int avoid_reserve,
955                                 long chg)
956 {
957         struct page *page;
958         struct mempolicy *mpol;
959         gfp_t gfp_mask;
960         nodemask_t *nodemask;
961         int nid;
962
963         /*
964          * A child process with MAP_PRIVATE mappings created by their parent
965          * have no page reserves. This check ensures that reservations are
966          * not "stolen". The child may still get SIGKILLed
967          */
968         if (!vma_has_reserves(vma, chg) &&
969                         h->free_huge_pages - h->resv_huge_pages == 0)
970                 goto err;
971
972         /* If reserves cannot be used, ensure enough pages are in the pool */
973         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
974                 goto err;
975
976         gfp_mask = htlb_alloc_mask(h);
977         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
978         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
979         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
980                 SetPagePrivate(page);
981                 h->resv_huge_pages--;
982         }
983
984         mpol_cond_put(mpol);
985         return page;
986
987 err:
988         return NULL;
989 }
990
991 /*
992  * common helper functions for hstate_next_node_to_{alloc|free}.
993  * We may have allocated or freed a huge page based on a different
994  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
995  * be outside of *nodes_allowed.  Ensure that we use an allowed
996  * node for alloc or free.
997  */
998 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
999 {
1000         nid = next_node_in(nid, *nodes_allowed);
1001         VM_BUG_ON(nid >= MAX_NUMNODES);
1002
1003         return nid;
1004 }
1005
1006 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1007 {
1008         if (!node_isset(nid, *nodes_allowed))
1009                 nid = next_node_allowed(nid, nodes_allowed);
1010         return nid;
1011 }
1012
1013 /*
1014  * returns the previously saved node ["this node"] from which to
1015  * allocate a persistent huge page for the pool and advance the
1016  * next node from which to allocate, handling wrap at end of node
1017  * mask.
1018  */
1019 static int hstate_next_node_to_alloc(struct hstate *h,
1020                                         nodemask_t *nodes_allowed)
1021 {
1022         int nid;
1023
1024         VM_BUG_ON(!nodes_allowed);
1025
1026         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1027         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1028
1029         return nid;
1030 }
1031
1032 /*
1033  * helper for free_pool_huge_page() - return the previously saved
1034  * node ["this node"] from which to free a huge page.  Advance the
1035  * next node id whether or not we find a free huge page to free so
1036  * that the next attempt to free addresses the next node.
1037  */
1038 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1039 {
1040         int nid;
1041
1042         VM_BUG_ON(!nodes_allowed);
1043
1044         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1045         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1046
1047         return nid;
1048 }
1049
1050 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1051         for (nr_nodes = nodes_weight(*mask);                            \
1052                 nr_nodes > 0 &&                                         \
1053                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1054                 nr_nodes--)
1055
1056 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1057         for (nr_nodes = nodes_weight(*mask);                            \
1058                 nr_nodes > 0 &&                                         \
1059                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1060                 nr_nodes--)
1061
1062 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1063 static void destroy_compound_gigantic_page(struct page *page,
1064                                         unsigned int order)
1065 {
1066         int i;
1067         int nr_pages = 1 << order;
1068         struct page *p = page + 1;
1069
1070         atomic_set(compound_mapcount_ptr(page), 0);
1071         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1072                 clear_compound_head(p);
1073                 set_page_refcounted(p);
1074         }
1075
1076         set_compound_order(page, 0);
1077         __ClearPageHead(page);
1078 }
1079
1080 static void free_gigantic_page(struct page *page, unsigned int order)
1081 {
1082         free_contig_range(page_to_pfn(page), 1 << order);
1083 }
1084
1085 static int __alloc_gigantic_page(unsigned long start_pfn,
1086                                 unsigned long nr_pages, gfp_t gfp_mask)
1087 {
1088         unsigned long end_pfn = start_pfn + nr_pages;
1089         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1090                                   gfp_mask);
1091 }
1092
1093 static bool pfn_range_valid_gigantic(struct zone *z,
1094                         unsigned long start_pfn, unsigned long nr_pages)
1095 {
1096         unsigned long i, end_pfn = start_pfn + nr_pages;
1097         struct page *page;
1098
1099         for (i = start_pfn; i < end_pfn; i++) {
1100                 page = pfn_to_online_page(i);
1101                 if (!page)
1102                         return false;
1103
1104                 if (page_zone(page) != z)
1105                         return false;
1106
1107                 if (PageReserved(page))
1108                         return false;
1109
1110                 if (page_count(page) > 0)
1111                         return false;
1112
1113                 if (PageHuge(page))
1114                         return false;
1115         }
1116
1117         return true;
1118 }
1119
1120 static bool zone_spans_last_pfn(const struct zone *zone,
1121                         unsigned long start_pfn, unsigned long nr_pages)
1122 {
1123         unsigned long last_pfn = start_pfn + nr_pages - 1;
1124         return zone_spans_pfn(zone, last_pfn);
1125 }
1126
1127 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1128                 int nid, nodemask_t *nodemask)
1129 {
1130         unsigned int order = huge_page_order(h);
1131         unsigned long nr_pages = 1 << order;
1132         unsigned long ret, pfn, flags;
1133         struct zonelist *zonelist;
1134         struct zone *zone;
1135         struct zoneref *z;
1136
1137         zonelist = node_zonelist(nid, gfp_mask);
1138         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1139                 spin_lock_irqsave(&zone->lock, flags);
1140
1141                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1142                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1143                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1144                                 /*
1145                                  * We release the zone lock here because
1146                                  * alloc_contig_range() will also lock the zone
1147                                  * at some point. If there's an allocation
1148                                  * spinning on this lock, it may win the race
1149                                  * and cause alloc_contig_range() to fail...
1150                                  */
1151                                 spin_unlock_irqrestore(&zone->lock, flags);
1152                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1153                                 if (!ret)
1154                                         return pfn_to_page(pfn);
1155                                 spin_lock_irqsave(&zone->lock, flags);
1156                         }
1157                         pfn += nr_pages;
1158                 }
1159
1160                 spin_unlock_irqrestore(&zone->lock, flags);
1161         }
1162
1163         return NULL;
1164 }
1165
1166 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1167 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1168
1169 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1170 static inline bool gigantic_page_supported(void) { return false; }
1171 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1172                 int nid, nodemask_t *nodemask) { return NULL; }
1173 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1174 static inline void destroy_compound_gigantic_page(struct page *page,
1175                                                 unsigned int order) { }
1176 #endif
1177
1178 static void update_and_free_page(struct hstate *h, struct page *page)
1179 {
1180         int i;
1181         struct page *subpage = page;
1182
1183         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1184                 return;
1185
1186         h->nr_huge_pages--;
1187         h->nr_huge_pages_node[page_to_nid(page)]--;
1188         for (i = 0; i < pages_per_huge_page(h);
1189              i++, subpage = mem_map_next(subpage, page, i)) {
1190                 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1191                                 1 << PG_referenced | 1 << PG_dirty |
1192                                 1 << PG_active | 1 << PG_private |
1193                                 1 << PG_writeback);
1194         }
1195         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1196         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1197         set_page_refcounted(page);
1198         if (hstate_is_gigantic(h)) {
1199                 destroy_compound_gigantic_page(page, huge_page_order(h));
1200                 free_gigantic_page(page, huge_page_order(h));
1201         } else {
1202                 __free_pages(page, huge_page_order(h));
1203         }
1204 }
1205
1206 struct hstate *size_to_hstate(unsigned long size)
1207 {
1208         struct hstate *h;
1209
1210         for_each_hstate(h) {
1211                 if (huge_page_size(h) == size)
1212                         return h;
1213         }
1214         return NULL;
1215 }
1216
1217 /*
1218  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1219  * to hstate->hugepage_activelist.)
1220  *
1221  * This function can be called for tail pages, but never returns true for them.
1222  */
1223 bool page_huge_active(struct page *page)
1224 {
1225         return PageHeadHuge(page) && PagePrivate(&page[1]);
1226 }
1227
1228 /* never called for tail page */
1229 void set_page_huge_active(struct page *page)
1230 {
1231         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1232         SetPagePrivate(&page[1]);
1233 }
1234
1235 static void clear_page_huge_active(struct page *page)
1236 {
1237         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1238         ClearPagePrivate(&page[1]);
1239 }
1240
1241 /*
1242  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1243  * code
1244  */
1245 static inline bool PageHugeTemporary(struct page *page)
1246 {
1247         if (!PageHuge(page))
1248                 return false;
1249
1250         return (unsigned long)page[2].mapping == -1U;
1251 }
1252
1253 static inline void SetPageHugeTemporary(struct page *page)
1254 {
1255         page[2].mapping = (void *)-1U;
1256 }
1257
1258 static inline void ClearPageHugeTemporary(struct page *page)
1259 {
1260         page[2].mapping = NULL;
1261 }
1262
1263 void free_huge_page(struct page *page)
1264 {
1265         /*
1266          * Can't pass hstate in here because it is called from the
1267          * compound page destructor.
1268          */
1269         struct hstate *h = page_hstate(page);
1270         int nid = page_to_nid(page);
1271         struct hugepage_subpool *spool =
1272                 (struct hugepage_subpool *)page_private(page);
1273         bool restore_reserve;
1274
1275         set_page_private(page, 0);
1276         page->mapping = NULL;
1277         VM_BUG_ON_PAGE(page_count(page), page);
1278         VM_BUG_ON_PAGE(page_mapcount(page), page);
1279         restore_reserve = PagePrivate(page);
1280         ClearPagePrivate(page);
1281
1282         /*
1283          * If PagePrivate() was set on page, page allocation consumed a
1284          * reservation.  If the page was associated with a subpool, there
1285          * would have been a page reserved in the subpool before allocation
1286          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1287          * reservtion, do not call hugepage_subpool_put_pages() as this will
1288          * remove the reserved page from the subpool.
1289          */
1290         if (!restore_reserve) {
1291                 /*
1292                  * A return code of zero implies that the subpool will be
1293                  * under its minimum size if the reservation is not restored
1294                  * after page is free.  Therefore, force restore_reserve
1295                  * operation.
1296                  */
1297                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1298                         restore_reserve = true;
1299         }
1300
1301         spin_lock(&hugetlb_lock);
1302         clear_page_huge_active(page);
1303         hugetlb_cgroup_uncharge_page(hstate_index(h),
1304                                      pages_per_huge_page(h), page);
1305         if (restore_reserve)
1306                 h->resv_huge_pages++;
1307
1308         if (PageHugeTemporary(page)) {
1309                 list_del(&page->lru);
1310                 ClearPageHugeTemporary(page);
1311                 update_and_free_page(h, page);
1312         } else if (h->surplus_huge_pages_node[nid]) {
1313                 /* remove the page from active list */
1314                 list_del(&page->lru);
1315                 update_and_free_page(h, page);
1316                 h->surplus_huge_pages--;
1317                 h->surplus_huge_pages_node[nid]--;
1318         } else {
1319                 arch_clear_hugepage_flags(page);
1320                 enqueue_huge_page(h, page);
1321         }
1322         spin_unlock(&hugetlb_lock);
1323 }
1324
1325 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1326 {
1327         INIT_LIST_HEAD(&page->lru);
1328         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1329         spin_lock(&hugetlb_lock);
1330         set_hugetlb_cgroup(page, NULL);
1331         h->nr_huge_pages++;
1332         h->nr_huge_pages_node[nid]++;
1333         ClearPageHugeFreed(page);
1334         spin_unlock(&hugetlb_lock);
1335 }
1336
1337 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1338 {
1339         int i;
1340         int nr_pages = 1 << order;
1341         struct page *p = page + 1;
1342
1343         /* we rely on prep_new_huge_page to set the destructor */
1344         set_compound_order(page, order);
1345         __ClearPageReserved(page);
1346         __SetPageHead(page);
1347         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1348                 /*
1349                  * For gigantic hugepages allocated through bootmem at
1350                  * boot, it's safer to be consistent with the not-gigantic
1351                  * hugepages and clear the PG_reserved bit from all tail pages
1352                  * too.  Otherwse drivers using get_user_pages() to access tail
1353                  * pages may get the reference counting wrong if they see
1354                  * PG_reserved set on a tail page (despite the head page not
1355                  * having PG_reserved set).  Enforcing this consistency between
1356                  * head and tail pages allows drivers to optimize away a check
1357                  * on the head page when they need know if put_page() is needed
1358                  * after get_user_pages().
1359                  */
1360                 __ClearPageReserved(p);
1361                 set_page_count(p, 0);
1362                 set_compound_head(p, page);
1363         }
1364         atomic_set(compound_mapcount_ptr(page), -1);
1365 }
1366
1367 /*
1368  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1369  * transparent huge pages.  See the PageTransHuge() documentation for more
1370  * details.
1371  */
1372 int PageHuge(struct page *page)
1373 {
1374         if (!PageCompound(page))
1375                 return 0;
1376
1377         page = compound_head(page);
1378         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1379 }
1380 EXPORT_SYMBOL_GPL(PageHuge);
1381
1382 /*
1383  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1384  * normal or transparent huge pages.
1385  */
1386 int PageHeadHuge(struct page *page_head)
1387 {
1388         if (!PageHead(page_head))
1389                 return 0;
1390
1391         return get_compound_page_dtor(page_head) == free_huge_page;
1392 }
1393
1394 pgoff_t hugetlb_basepage_index(struct page *page)
1395 {
1396         struct page *page_head = compound_head(page);
1397         pgoff_t index = page_index(page_head);
1398         unsigned long compound_idx;
1399
1400         if (compound_order(page_head) >= MAX_ORDER)
1401                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1402         else
1403                 compound_idx = page - page_head;
1404
1405         return (index << compound_order(page_head)) + compound_idx;
1406 }
1407
1408 static struct page *alloc_buddy_huge_page(struct hstate *h,
1409                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1410 {
1411         int order = huge_page_order(h);
1412         struct page *page;
1413
1414         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1415         if (nid == NUMA_NO_NODE)
1416                 nid = numa_mem_id();
1417         page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1418         if (page)
1419                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1420         else
1421                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1422
1423         return page;
1424 }
1425
1426 /*
1427  * Common helper to allocate a fresh hugetlb page. All specific allocators
1428  * should use this function to get new hugetlb pages
1429  */
1430 static struct page *alloc_fresh_huge_page(struct hstate *h,
1431                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1432 {
1433         struct page *page;
1434
1435         if (hstate_is_gigantic(h))
1436                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1437         else
1438                 page = alloc_buddy_huge_page(h, gfp_mask,
1439                                 nid, nmask);
1440         if (!page)
1441                 return NULL;
1442
1443         if (hstate_is_gigantic(h))
1444                 prep_compound_gigantic_page(page, huge_page_order(h));
1445         prep_new_huge_page(h, page, page_to_nid(page));
1446
1447         return page;
1448 }
1449
1450 /*
1451  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1452  * manner.
1453  */
1454 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1455 {
1456         struct page *page;
1457         int nr_nodes, node;
1458         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1459
1460         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1461                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1462                 if (page)
1463                         break;
1464         }
1465
1466         if (!page)
1467                 return 0;
1468
1469         put_page(page); /* free it into the hugepage allocator */
1470
1471         return 1;
1472 }
1473
1474 /*
1475  * Free huge page from pool from next node to free.
1476  * Attempt to keep persistent huge pages more or less
1477  * balanced over allowed nodes.
1478  * Called with hugetlb_lock locked.
1479  */
1480 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1481                                                          bool acct_surplus)
1482 {
1483         int nr_nodes, node;
1484         int ret = 0;
1485
1486         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1487                 /*
1488                  * If we're returning unused surplus pages, only examine
1489                  * nodes with surplus pages.
1490                  */
1491                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1492                     !list_empty(&h->hugepage_freelists[node])) {
1493                         struct page *page =
1494                                 list_entry(h->hugepage_freelists[node].next,
1495                                           struct page, lru);
1496                         list_del(&page->lru);
1497                         h->free_huge_pages--;
1498                         h->free_huge_pages_node[node]--;
1499                         if (acct_surplus) {
1500                                 h->surplus_huge_pages--;
1501                                 h->surplus_huge_pages_node[node]--;
1502                         }
1503                         update_and_free_page(h, page);
1504                         ret = 1;
1505                         break;
1506                 }
1507         }
1508
1509         return ret;
1510 }
1511
1512 /*
1513  * Dissolve a given free hugepage into free buddy pages. This function does
1514  * nothing for in-use hugepages and non-hugepages.
1515  * This function returns values like below:
1516  *
1517  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1518  *          (allocated or reserved.)
1519  *       0: successfully dissolved free hugepages or the page is not a
1520  *          hugepage (considered as already dissolved)
1521  */
1522 int dissolve_free_huge_page(struct page *page)
1523 {
1524         int rc = -EBUSY;
1525
1526 retry:
1527         /* Not to disrupt normal path by vainly holding hugetlb_lock */
1528         if (!PageHuge(page))
1529                 return 0;
1530
1531         spin_lock(&hugetlb_lock);
1532         if (!PageHuge(page)) {
1533                 rc = 0;
1534                 goto out;
1535         }
1536
1537         if (!page_count(page)) {
1538                 struct page *head = compound_head(page);
1539                 struct hstate *h = page_hstate(head);
1540                 int nid = page_to_nid(head);
1541                 if (h->free_huge_pages - h->resv_huge_pages == 0)
1542                         goto out;
1543
1544                 /*
1545                  * We should make sure that the page is already on the free list
1546                  * when it is dissolved.
1547                  */
1548                 if (unlikely(!PageHugeFreed(head))) {
1549                         spin_unlock(&hugetlb_lock);
1550                         cond_resched();
1551
1552                         /*
1553                          * Theoretically, we should return -EBUSY when we
1554                          * encounter this race. In fact, we have a chance
1555                          * to successfully dissolve the page if we do a
1556                          * retry. Because the race window is quite small.
1557                          * If we seize this opportunity, it is an optimization
1558                          * for increasing the success rate of dissolving page.
1559                          */
1560                         goto retry;
1561                 }
1562
1563                 /*
1564                  * Move PageHWPoison flag from head page to the raw error page,
1565                  * which makes any subpages rather than the error page reusable.
1566                  */
1567                 if (PageHWPoison(head) && page != head) {
1568                         SetPageHWPoison(page);
1569                         ClearPageHWPoison(head);
1570                 }
1571                 list_del(&head->lru);
1572                 h->free_huge_pages--;
1573                 h->free_huge_pages_node[nid]--;
1574                 h->max_huge_pages--;
1575                 update_and_free_page(h, head);
1576                 rc = 0;
1577         }
1578 out:
1579         spin_unlock(&hugetlb_lock);
1580         return rc;
1581 }
1582
1583 /*
1584  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1585  * make specified memory blocks removable from the system.
1586  * Note that this will dissolve a free gigantic hugepage completely, if any
1587  * part of it lies within the given range.
1588  * Also note that if dissolve_free_huge_page() returns with an error, all
1589  * free hugepages that were dissolved before that error are lost.
1590  */
1591 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1592 {
1593         unsigned long pfn;
1594         struct page *page;
1595         int rc = 0;
1596
1597         if (!hugepages_supported())
1598                 return rc;
1599
1600         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1601                 page = pfn_to_page(pfn);
1602                 rc = dissolve_free_huge_page(page);
1603                 if (rc)
1604                         break;
1605         }
1606
1607         return rc;
1608 }
1609
1610 /*
1611  * Allocates a fresh surplus page from the page allocator.
1612  */
1613 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1614                 int nid, nodemask_t *nmask)
1615 {
1616         struct page *page = NULL;
1617
1618         if (hstate_is_gigantic(h))
1619                 return NULL;
1620
1621         spin_lock(&hugetlb_lock);
1622         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1623                 goto out_unlock;
1624         spin_unlock(&hugetlb_lock);
1625
1626         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1627         if (!page)
1628                 return NULL;
1629
1630         spin_lock(&hugetlb_lock);
1631         /*
1632          * We could have raced with the pool size change.
1633          * Double check that and simply deallocate the new page
1634          * if we would end up overcommiting the surpluses. Abuse
1635          * temporary page to workaround the nasty free_huge_page
1636          * codeflow
1637          */
1638         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1639                 SetPageHugeTemporary(page);
1640                 spin_unlock(&hugetlb_lock);
1641                 put_page(page);
1642                 return NULL;
1643         } else {
1644                 h->surplus_huge_pages++;
1645                 h->surplus_huge_pages_node[page_to_nid(page)]++;
1646         }
1647
1648 out_unlock:
1649         spin_unlock(&hugetlb_lock);
1650
1651         return page;
1652 }
1653
1654 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1655                 int nid, nodemask_t *nmask)
1656 {
1657         struct page *page;
1658
1659         if (hstate_is_gigantic(h))
1660                 return NULL;
1661
1662         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1663         if (!page)
1664                 return NULL;
1665
1666         /*
1667          * We do not account these pages as surplus because they are only
1668          * temporary and will be released properly on the last reference
1669          */
1670         SetPageHugeTemporary(page);
1671
1672         return page;
1673 }
1674
1675 /*
1676  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1677  */
1678 static
1679 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1680                 struct vm_area_struct *vma, unsigned long addr)
1681 {
1682         struct page *page;
1683         struct mempolicy *mpol;
1684         gfp_t gfp_mask = htlb_alloc_mask(h);
1685         int nid;
1686         nodemask_t *nodemask;
1687
1688         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1689         page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1690         mpol_cond_put(mpol);
1691
1692         return page;
1693 }
1694
1695 /* page migration callback function */
1696 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1697 {
1698         gfp_t gfp_mask = htlb_alloc_mask(h);
1699         struct page *page = NULL;
1700
1701         if (nid != NUMA_NO_NODE)
1702                 gfp_mask |= __GFP_THISNODE;
1703
1704         spin_lock(&hugetlb_lock);
1705         if (h->free_huge_pages - h->resv_huge_pages > 0)
1706                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1707         spin_unlock(&hugetlb_lock);
1708
1709         if (!page)
1710                 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1711
1712         return page;
1713 }
1714
1715 /* page migration callback function */
1716 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1717                 nodemask_t *nmask)
1718 {
1719         gfp_t gfp_mask = htlb_alloc_mask(h);
1720
1721         spin_lock(&hugetlb_lock);
1722         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1723                 struct page *page;
1724
1725                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1726                 if (page) {
1727                         spin_unlock(&hugetlb_lock);
1728                         return page;
1729                 }
1730         }
1731         spin_unlock(&hugetlb_lock);
1732
1733         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1734 }
1735
1736 /* mempolicy aware migration callback */
1737 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1738                 unsigned long address)
1739 {
1740         struct mempolicy *mpol;
1741         nodemask_t *nodemask;
1742         struct page *page;
1743         gfp_t gfp_mask;
1744         int node;
1745
1746         gfp_mask = htlb_alloc_mask(h);
1747         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1748         page = alloc_huge_page_nodemask(h, node, nodemask);
1749         mpol_cond_put(mpol);
1750
1751         return page;
1752 }
1753
1754 /*
1755  * Increase the hugetlb pool such that it can accommodate a reservation
1756  * of size 'delta'.
1757  */
1758 static int gather_surplus_pages(struct hstate *h, int delta)
1759 {
1760         struct list_head surplus_list;
1761         struct page *page, *tmp;
1762         int ret, i;
1763         int needed, allocated;
1764         bool alloc_ok = true;
1765
1766         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1767         if (needed <= 0) {
1768                 h->resv_huge_pages += delta;
1769                 return 0;
1770         }
1771
1772         allocated = 0;
1773         INIT_LIST_HEAD(&surplus_list);
1774
1775         ret = -ENOMEM;
1776 retry:
1777         spin_unlock(&hugetlb_lock);
1778         for (i = 0; i < needed; i++) {
1779                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1780                                 NUMA_NO_NODE, NULL);
1781                 if (!page) {
1782                         alloc_ok = false;
1783                         break;
1784                 }
1785                 list_add(&page->lru, &surplus_list);
1786                 cond_resched();
1787         }
1788         allocated += i;
1789
1790         /*
1791          * After retaking hugetlb_lock, we need to recalculate 'needed'
1792          * because either resv_huge_pages or free_huge_pages may have changed.
1793          */
1794         spin_lock(&hugetlb_lock);
1795         needed = (h->resv_huge_pages + delta) -
1796                         (h->free_huge_pages + allocated);
1797         if (needed > 0) {
1798                 if (alloc_ok)
1799                         goto retry;
1800                 /*
1801                  * We were not able to allocate enough pages to
1802                  * satisfy the entire reservation so we free what
1803                  * we've allocated so far.
1804                  */
1805                 goto free;
1806         }
1807         /*
1808          * The surplus_list now contains _at_least_ the number of extra pages
1809          * needed to accommodate the reservation.  Add the appropriate number
1810          * of pages to the hugetlb pool and free the extras back to the buddy
1811          * allocator.  Commit the entire reservation here to prevent another
1812          * process from stealing the pages as they are added to the pool but
1813          * before they are reserved.
1814          */
1815         needed += allocated;
1816         h->resv_huge_pages += delta;
1817         ret = 0;
1818
1819         /* Free the needed pages to the hugetlb pool */
1820         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1821                 if ((--needed) < 0)
1822                         break;
1823                 /*
1824                  * This page is now managed by the hugetlb allocator and has
1825                  * no users -- drop the buddy allocator's reference.
1826                  */
1827                 put_page_testzero(page);
1828                 VM_BUG_ON_PAGE(page_count(page), page);
1829                 enqueue_huge_page(h, page);
1830         }
1831 free:
1832         spin_unlock(&hugetlb_lock);
1833
1834         /* Free unnecessary surplus pages to the buddy allocator */
1835         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1836                 put_page(page);
1837         spin_lock(&hugetlb_lock);
1838
1839         return ret;
1840 }
1841
1842 /*
1843  * This routine has two main purposes:
1844  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1845  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1846  *    to the associated reservation map.
1847  * 2) Free any unused surplus pages that may have been allocated to satisfy
1848  *    the reservation.  As many as unused_resv_pages may be freed.
1849  *
1850  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1851  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1852  * we must make sure nobody else can claim pages we are in the process of
1853  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1854  * number of huge pages we plan to free when dropping the lock.
1855  */
1856 static void return_unused_surplus_pages(struct hstate *h,
1857                                         unsigned long unused_resv_pages)
1858 {
1859         unsigned long nr_pages;
1860
1861         /* Cannot return gigantic pages currently */
1862         if (hstate_is_gigantic(h))
1863                 goto out;
1864
1865         /*
1866          * Part (or even all) of the reservation could have been backed
1867          * by pre-allocated pages. Only free surplus pages.
1868          */
1869         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1870
1871         /*
1872          * We want to release as many surplus pages as possible, spread
1873          * evenly across all nodes with memory. Iterate across these nodes
1874          * until we can no longer free unreserved surplus pages. This occurs
1875          * when the nodes with surplus pages have no free pages.
1876          * free_pool_huge_page() will balance the the freed pages across the
1877          * on-line nodes with memory and will handle the hstate accounting.
1878          *
1879          * Note that we decrement resv_huge_pages as we free the pages.  If
1880          * we drop the lock, resv_huge_pages will still be sufficiently large
1881          * to cover subsequent pages we may free.
1882          */
1883         while (nr_pages--) {
1884                 h->resv_huge_pages--;
1885                 unused_resv_pages--;
1886                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1887                         goto out;
1888                 cond_resched_lock(&hugetlb_lock);
1889         }
1890
1891 out:
1892         /* Fully uncommit the reservation */
1893         h->resv_huge_pages -= unused_resv_pages;
1894 }
1895
1896
1897 /*
1898  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1899  * are used by the huge page allocation routines to manage reservations.
1900  *
1901  * vma_needs_reservation is called to determine if the huge page at addr
1902  * within the vma has an associated reservation.  If a reservation is
1903  * needed, the value 1 is returned.  The caller is then responsible for
1904  * managing the global reservation and subpool usage counts.  After
1905  * the huge page has been allocated, vma_commit_reservation is called
1906  * to add the page to the reservation map.  If the page allocation fails,
1907  * the reservation must be ended instead of committed.  vma_end_reservation
1908  * is called in such cases.
1909  *
1910  * In the normal case, vma_commit_reservation returns the same value
1911  * as the preceding vma_needs_reservation call.  The only time this
1912  * is not the case is if a reserve map was changed between calls.  It
1913  * is the responsibility of the caller to notice the difference and
1914  * take appropriate action.
1915  *
1916  * vma_add_reservation is used in error paths where a reservation must
1917  * be restored when a newly allocated huge page must be freed.  It is
1918  * to be called after calling vma_needs_reservation to determine if a
1919  * reservation exists.
1920  */
1921 enum vma_resv_mode {
1922         VMA_NEEDS_RESV,
1923         VMA_COMMIT_RESV,
1924         VMA_END_RESV,
1925         VMA_ADD_RESV,
1926 };
1927 static long __vma_reservation_common(struct hstate *h,
1928                                 struct vm_area_struct *vma, unsigned long addr,
1929                                 enum vma_resv_mode mode)
1930 {
1931         struct resv_map *resv;
1932         pgoff_t idx;
1933         long ret;
1934
1935         resv = vma_resv_map(vma);
1936         if (!resv)
1937                 return 1;
1938
1939         idx = vma_hugecache_offset(h, vma, addr);
1940         switch (mode) {
1941         case VMA_NEEDS_RESV:
1942                 ret = region_chg(resv, idx, idx + 1);
1943                 break;
1944         case VMA_COMMIT_RESV:
1945                 ret = region_add(resv, idx, idx + 1);
1946                 break;
1947         case VMA_END_RESV:
1948                 region_abort(resv, idx, idx + 1);
1949                 ret = 0;
1950                 break;
1951         case VMA_ADD_RESV:
1952                 if (vma->vm_flags & VM_MAYSHARE)
1953                         ret = region_add(resv, idx, idx + 1);
1954                 else {
1955                         region_abort(resv, idx, idx + 1);
1956                         ret = region_del(resv, idx, idx + 1);
1957                 }
1958                 break;
1959         default:
1960                 BUG();
1961         }
1962
1963         if (vma->vm_flags & VM_MAYSHARE)
1964                 return ret;
1965         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1966                 /*
1967                  * In most cases, reserves always exist for private mappings.
1968                  * However, a file associated with mapping could have been
1969                  * hole punched or truncated after reserves were consumed.
1970                  * As subsequent fault on such a range will not use reserves.
1971                  * Subtle - The reserve map for private mappings has the
1972                  * opposite meaning than that of shared mappings.  If NO
1973                  * entry is in the reserve map, it means a reservation exists.
1974                  * If an entry exists in the reserve map, it means the
1975                  * reservation has already been consumed.  As a result, the
1976                  * return value of this routine is the opposite of the
1977                  * value returned from reserve map manipulation routines above.
1978                  */
1979                 if (ret)
1980                         return 0;
1981                 else
1982                         return 1;
1983         }
1984         else
1985                 return ret < 0 ? ret : 0;
1986 }
1987
1988 static long vma_needs_reservation(struct hstate *h,
1989                         struct vm_area_struct *vma, unsigned long addr)
1990 {
1991         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1992 }
1993
1994 static long vma_commit_reservation(struct hstate *h,
1995                         struct vm_area_struct *vma, unsigned long addr)
1996 {
1997         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1998 }
1999
2000 static void vma_end_reservation(struct hstate *h,
2001                         struct vm_area_struct *vma, unsigned long addr)
2002 {
2003         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2004 }
2005
2006 static long vma_add_reservation(struct hstate *h,
2007                         struct vm_area_struct *vma, unsigned long addr)
2008 {
2009         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2010 }
2011
2012 /*
2013  * This routine is called to restore a reservation on error paths.  In the
2014  * specific error paths, a huge page was allocated (via alloc_huge_page)
2015  * and is about to be freed.  If a reservation for the page existed,
2016  * alloc_huge_page would have consumed the reservation and set PagePrivate
2017  * in the newly allocated page.  When the page is freed via free_huge_page,
2018  * the global reservation count will be incremented if PagePrivate is set.
2019  * However, free_huge_page can not adjust the reserve map.  Adjust the
2020  * reserve map here to be consistent with global reserve count adjustments
2021  * to be made by free_huge_page.
2022  */
2023 static void restore_reserve_on_error(struct hstate *h,
2024                         struct vm_area_struct *vma, unsigned long address,
2025                         struct page *page)
2026 {
2027         if (unlikely(PagePrivate(page))) {
2028                 long rc = vma_needs_reservation(h, vma, address);
2029
2030                 if (unlikely(rc < 0)) {
2031                         /*
2032                          * Rare out of memory condition in reserve map
2033                          * manipulation.  Clear PagePrivate so that
2034                          * global reserve count will not be incremented
2035                          * by free_huge_page.  This will make it appear
2036                          * as though the reservation for this page was
2037                          * consumed.  This may prevent the task from
2038                          * faulting in the page at a later time.  This
2039                          * is better than inconsistent global huge page
2040                          * accounting of reserve counts.
2041                          */
2042                         ClearPagePrivate(page);
2043                 } else if (rc) {
2044                         rc = vma_add_reservation(h, vma, address);
2045                         if (unlikely(rc < 0))
2046                                 /*
2047                                  * See above comment about rare out of
2048                                  * memory condition.
2049                                  */
2050                                 ClearPagePrivate(page);
2051                 } else
2052                         vma_end_reservation(h, vma, address);
2053         }
2054 }
2055
2056 struct page *alloc_huge_page(struct vm_area_struct *vma,
2057                                     unsigned long addr, int avoid_reserve)
2058 {
2059         struct hugepage_subpool *spool = subpool_vma(vma);
2060         struct hstate *h = hstate_vma(vma);
2061         struct page *page;
2062         long map_chg, map_commit;
2063         long gbl_chg;
2064         int ret, idx;
2065         struct hugetlb_cgroup *h_cg;
2066
2067         idx = hstate_index(h);
2068         /*
2069          * Examine the region/reserve map to determine if the process
2070          * has a reservation for the page to be allocated.  A return
2071          * code of zero indicates a reservation exists (no change).
2072          */
2073         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2074         if (map_chg < 0)
2075                 return ERR_PTR(-ENOMEM);
2076
2077         /*
2078          * Processes that did not create the mapping will have no
2079          * reserves as indicated by the region/reserve map. Check
2080          * that the allocation will not exceed the subpool limit.
2081          * Allocations for MAP_NORESERVE mappings also need to be
2082          * checked against any subpool limit.
2083          */
2084         if (map_chg || avoid_reserve) {
2085                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2086                 if (gbl_chg < 0) {
2087                         vma_end_reservation(h, vma, addr);
2088                         return ERR_PTR(-ENOSPC);
2089                 }
2090
2091                 /*
2092                  * Even though there was no reservation in the region/reserve
2093                  * map, there could be reservations associated with the
2094                  * subpool that can be used.  This would be indicated if the
2095                  * return value of hugepage_subpool_get_pages() is zero.
2096                  * However, if avoid_reserve is specified we still avoid even
2097                  * the subpool reservations.
2098                  */
2099                 if (avoid_reserve)
2100                         gbl_chg = 1;
2101         }
2102
2103         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2104         if (ret)
2105                 goto out_subpool_put;
2106
2107         spin_lock(&hugetlb_lock);
2108         /*
2109          * glb_chg is passed to indicate whether or not a page must be taken
2110          * from the global free pool (global change).  gbl_chg == 0 indicates
2111          * a reservation exists for the allocation.
2112          */
2113         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2114         if (!page) {
2115                 spin_unlock(&hugetlb_lock);
2116                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2117                 if (!page)
2118                         goto out_uncharge_cgroup;
2119                 spin_lock(&hugetlb_lock);
2120                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2121                         SetPagePrivate(page);
2122                         h->resv_huge_pages--;
2123                 }
2124                 list_move(&page->lru, &h->hugepage_activelist);
2125                 /* Fall through */
2126         }
2127         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2128         spin_unlock(&hugetlb_lock);
2129
2130         set_page_private(page, (unsigned long)spool);
2131
2132         map_commit = vma_commit_reservation(h, vma, addr);
2133         if (unlikely(map_chg > map_commit)) {
2134                 /*
2135                  * The page was added to the reservation map between
2136                  * vma_needs_reservation and vma_commit_reservation.
2137                  * This indicates a race with hugetlb_reserve_pages.
2138                  * Adjust for the subpool count incremented above AND
2139                  * in hugetlb_reserve_pages for the same page.  Also,
2140                  * the reservation count added in hugetlb_reserve_pages
2141                  * no longer applies.
2142                  */
2143                 long rsv_adjust;
2144
2145                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2146                 hugetlb_acct_memory(h, -rsv_adjust);
2147         }
2148         return page;
2149
2150 out_uncharge_cgroup:
2151         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2152 out_subpool_put:
2153         if (map_chg || avoid_reserve)
2154                 hugepage_subpool_put_pages(spool, 1);
2155         vma_end_reservation(h, vma, addr);
2156         return ERR_PTR(-ENOSPC);
2157 }
2158
2159 int alloc_bootmem_huge_page(struct hstate *h)
2160         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2161 int __alloc_bootmem_huge_page(struct hstate *h)
2162 {
2163         struct huge_bootmem_page *m;
2164         int nr_nodes, node;
2165
2166         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2167                 void *addr;
2168
2169                 addr = memblock_virt_alloc_try_nid_raw(
2170                                 huge_page_size(h), huge_page_size(h),
2171                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2172                 if (addr) {
2173                         /*
2174                          * Use the beginning of the huge page to store the
2175                          * huge_bootmem_page struct (until gather_bootmem
2176                          * puts them into the mem_map).
2177                          */
2178                         m = addr;
2179                         goto found;
2180                 }
2181         }
2182         return 0;
2183
2184 found:
2185         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2186         /* Put them into a private list first because mem_map is not up yet */
2187         INIT_LIST_HEAD(&m->list);
2188         list_add(&m->list, &huge_boot_pages);
2189         m->hstate = h;
2190         return 1;
2191 }
2192
2193 static void __init prep_compound_huge_page(struct page *page,
2194                 unsigned int order)
2195 {
2196         if (unlikely(order > (MAX_ORDER - 1)))
2197                 prep_compound_gigantic_page(page, order);
2198         else
2199                 prep_compound_page(page, order);
2200 }
2201
2202 /* Put bootmem huge pages into the standard lists after mem_map is up */
2203 static void __init gather_bootmem_prealloc(void)
2204 {
2205         struct huge_bootmem_page *m;
2206
2207         list_for_each_entry(m, &huge_boot_pages, list) {
2208                 struct page *page = virt_to_page(m);
2209                 struct hstate *h = m->hstate;
2210
2211                 WARN_ON(page_count(page) != 1);
2212                 prep_compound_huge_page(page, h->order);
2213                 WARN_ON(PageReserved(page));
2214                 prep_new_huge_page(h, page, page_to_nid(page));
2215                 put_page(page); /* free it into the hugepage allocator */
2216
2217                 /*
2218                  * If we had gigantic hugepages allocated at boot time, we need
2219                  * to restore the 'stolen' pages to totalram_pages in order to
2220                  * fix confusing memory reports from free(1) and another
2221                  * side-effects, like CommitLimit going negative.
2222                  */
2223                 if (hstate_is_gigantic(h))
2224                         adjust_managed_page_count(page, 1 << h->order);
2225                 cond_resched();
2226         }
2227 }
2228
2229 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2230 {
2231         unsigned long i;
2232
2233         for (i = 0; i < h->max_huge_pages; ++i) {
2234                 if (hstate_is_gigantic(h)) {
2235                         if (!alloc_bootmem_huge_page(h))
2236                                 break;
2237                 } else if (!alloc_pool_huge_page(h,
2238                                          &node_states[N_MEMORY]))
2239                         break;
2240                 cond_resched();
2241         }
2242         if (i < h->max_huge_pages) {
2243                 char buf[32];
2244
2245                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2246                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2247                         h->max_huge_pages, buf, i);
2248                 h->max_huge_pages = i;
2249         }
2250 }
2251
2252 static void __init hugetlb_init_hstates(void)
2253 {
2254         struct hstate *h;
2255
2256         for_each_hstate(h) {
2257                 if (minimum_order > huge_page_order(h))
2258                         minimum_order = huge_page_order(h);
2259
2260                 /* oversize hugepages were init'ed in early boot */
2261                 if (!hstate_is_gigantic(h))
2262                         hugetlb_hstate_alloc_pages(h);
2263         }
2264         VM_BUG_ON(minimum_order == UINT_MAX);
2265 }
2266
2267 static void __init report_hugepages(void)
2268 {
2269         struct hstate *h;
2270
2271         for_each_hstate(h) {
2272                 char buf[32];
2273
2274                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2275                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2276                         buf, h->free_huge_pages);
2277         }
2278 }
2279
2280 #ifdef CONFIG_HIGHMEM
2281 static void try_to_free_low(struct hstate *h, unsigned long count,
2282                                                 nodemask_t *nodes_allowed)
2283 {
2284         int i;
2285
2286         if (hstate_is_gigantic(h))
2287                 return;
2288
2289         for_each_node_mask(i, *nodes_allowed) {
2290                 struct page *page, *next;
2291                 struct list_head *freel = &h->hugepage_freelists[i];
2292                 list_for_each_entry_safe(page, next, freel, lru) {
2293                         if (count >= h->nr_huge_pages)
2294                                 return;
2295                         if (PageHighMem(page))
2296                                 continue;
2297                         list_del(&page->lru);
2298                         update_and_free_page(h, page);
2299                         h->free_huge_pages--;
2300                         h->free_huge_pages_node[page_to_nid(page)]--;
2301                 }
2302         }
2303 }
2304 #else
2305 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2306                                                 nodemask_t *nodes_allowed)
2307 {
2308 }
2309 #endif
2310
2311 /*
2312  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2313  * balanced by operating on them in a round-robin fashion.
2314  * Returns 1 if an adjustment was made.
2315  */
2316 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2317                                 int delta)
2318 {
2319         int nr_nodes, node;
2320
2321         VM_BUG_ON(delta != -1 && delta != 1);
2322
2323         if (delta < 0) {
2324                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2325                         if (h->surplus_huge_pages_node[node])
2326                                 goto found;
2327                 }
2328         } else {
2329                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2330                         if (h->surplus_huge_pages_node[node] <
2331                                         h->nr_huge_pages_node[node])
2332                                 goto found;
2333                 }
2334         }
2335         return 0;
2336
2337 found:
2338         h->surplus_huge_pages += delta;
2339         h->surplus_huge_pages_node[node] += delta;
2340         return 1;
2341 }
2342
2343 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2344 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2345                                                 nodemask_t *nodes_allowed)
2346 {
2347         unsigned long min_count, ret;
2348
2349         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2350                 return h->max_huge_pages;
2351
2352         /*
2353          * Increase the pool size
2354          * First take pages out of surplus state.  Then make up the
2355          * remaining difference by allocating fresh huge pages.
2356          *
2357          * We might race with alloc_surplus_huge_page() here and be unable
2358          * to convert a surplus huge page to a normal huge page. That is
2359          * not critical, though, it just means the overall size of the
2360          * pool might be one hugepage larger than it needs to be, but
2361          * within all the constraints specified by the sysctls.
2362          */
2363         spin_lock(&hugetlb_lock);
2364         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2365                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2366                         break;
2367         }
2368
2369         while (count > persistent_huge_pages(h)) {
2370                 /*
2371                  * If this allocation races such that we no longer need the
2372                  * page, free_huge_page will handle it by freeing the page
2373                  * and reducing the surplus.
2374                  */
2375                 spin_unlock(&hugetlb_lock);
2376
2377                 /* yield cpu to avoid soft lockup */
2378                 cond_resched();
2379
2380                 ret = alloc_pool_huge_page(h, nodes_allowed);
2381                 spin_lock(&hugetlb_lock);
2382                 if (!ret)
2383                         goto out;
2384
2385                 /* Bail for signals. Probably ctrl-c from user */
2386                 if (signal_pending(current))
2387                         goto out;
2388         }
2389
2390         /*
2391          * Decrease the pool size
2392          * First return free pages to the buddy allocator (being careful
2393          * to keep enough around to satisfy reservations).  Then place
2394          * pages into surplus state as needed so the pool will shrink
2395          * to the desired size as pages become free.
2396          *
2397          * By placing pages into the surplus state independent of the
2398          * overcommit value, we are allowing the surplus pool size to
2399          * exceed overcommit. There are few sane options here. Since
2400          * alloc_surplus_huge_page() is checking the global counter,
2401          * though, we'll note that we're not allowed to exceed surplus
2402          * and won't grow the pool anywhere else. Not until one of the
2403          * sysctls are changed, or the surplus pages go out of use.
2404          */
2405         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2406         min_count = max(count, min_count);
2407         try_to_free_low(h, min_count, nodes_allowed);
2408         while (min_count < persistent_huge_pages(h)) {
2409                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2410                         break;
2411                 cond_resched_lock(&hugetlb_lock);
2412         }
2413         while (count < persistent_huge_pages(h)) {
2414                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2415                         break;
2416         }
2417 out:
2418         ret = persistent_huge_pages(h);
2419         spin_unlock(&hugetlb_lock);
2420         return ret;
2421 }
2422
2423 #define HSTATE_ATTR_RO(_name) \
2424         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2425
2426 #define HSTATE_ATTR(_name) \
2427         static struct kobj_attribute _name##_attr = \
2428                 __ATTR(_name, 0644, _name##_show, _name##_store)
2429
2430 static struct kobject *hugepages_kobj;
2431 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2432
2433 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2434
2435 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2436 {
2437         int i;
2438
2439         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2440                 if (hstate_kobjs[i] == kobj) {
2441                         if (nidp)
2442                                 *nidp = NUMA_NO_NODE;
2443                         return &hstates[i];
2444                 }
2445
2446         return kobj_to_node_hstate(kobj, nidp);
2447 }
2448
2449 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2450                                         struct kobj_attribute *attr, char *buf)
2451 {
2452         struct hstate *h;
2453         unsigned long nr_huge_pages;
2454         int nid;
2455
2456         h = kobj_to_hstate(kobj, &nid);
2457         if (nid == NUMA_NO_NODE)
2458                 nr_huge_pages = h->nr_huge_pages;
2459         else
2460                 nr_huge_pages = h->nr_huge_pages_node[nid];
2461
2462         return sprintf(buf, "%lu\n", nr_huge_pages);
2463 }
2464
2465 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2466                                            struct hstate *h, int nid,
2467                                            unsigned long count, size_t len)
2468 {
2469         int err;
2470         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2471
2472         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2473                 err = -EINVAL;
2474                 goto out;
2475         }
2476
2477         if (nid == NUMA_NO_NODE) {
2478                 /*
2479                  * global hstate attribute
2480                  */
2481                 if (!(obey_mempolicy &&
2482                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2483                         NODEMASK_FREE(nodes_allowed);
2484                         nodes_allowed = &node_states[N_MEMORY];
2485                 }
2486         } else if (nodes_allowed) {
2487                 /*
2488                  * per node hstate attribute: adjust count to global,
2489                  * but restrict alloc/free to the specified node.
2490                  */
2491                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2492                 init_nodemask_of_node(nodes_allowed, nid);
2493         } else
2494                 nodes_allowed = &node_states[N_MEMORY];
2495
2496         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2497
2498         if (nodes_allowed != &node_states[N_MEMORY])
2499                 NODEMASK_FREE(nodes_allowed);
2500
2501         return len;
2502 out:
2503         NODEMASK_FREE(nodes_allowed);
2504         return err;
2505 }
2506
2507 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2508                                          struct kobject *kobj, const char *buf,
2509                                          size_t len)
2510 {
2511         struct hstate *h;
2512         unsigned long count;
2513         int nid;
2514         int err;
2515
2516         err = kstrtoul(buf, 10, &count);
2517         if (err)
2518                 return err;
2519
2520         h = kobj_to_hstate(kobj, &nid);
2521         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2522 }
2523
2524 static ssize_t nr_hugepages_show(struct kobject *kobj,
2525                                        struct kobj_attribute *attr, char *buf)
2526 {
2527         return nr_hugepages_show_common(kobj, attr, buf);
2528 }
2529
2530 static ssize_t nr_hugepages_store(struct kobject *kobj,
2531                struct kobj_attribute *attr, const char *buf, size_t len)
2532 {
2533         return nr_hugepages_store_common(false, kobj, buf, len);
2534 }
2535 HSTATE_ATTR(nr_hugepages);
2536
2537 #ifdef CONFIG_NUMA
2538
2539 /*
2540  * hstate attribute for optionally mempolicy-based constraint on persistent
2541  * huge page alloc/free.
2542  */
2543 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2544                                        struct kobj_attribute *attr, char *buf)
2545 {
2546         return nr_hugepages_show_common(kobj, attr, buf);
2547 }
2548
2549 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2550                struct kobj_attribute *attr, const char *buf, size_t len)
2551 {
2552         return nr_hugepages_store_common(true, kobj, buf, len);
2553 }
2554 HSTATE_ATTR(nr_hugepages_mempolicy);
2555 #endif
2556
2557
2558 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2559                                         struct kobj_attribute *attr, char *buf)
2560 {
2561         struct hstate *h = kobj_to_hstate(kobj, NULL);
2562         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2563 }
2564
2565 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2566                 struct kobj_attribute *attr, const char *buf, size_t count)
2567 {
2568         int err;
2569         unsigned long input;
2570         struct hstate *h = kobj_to_hstate(kobj, NULL);
2571
2572         if (hstate_is_gigantic(h))
2573                 return -EINVAL;
2574
2575         err = kstrtoul(buf, 10, &input);
2576         if (err)
2577                 return err;
2578
2579         spin_lock(&hugetlb_lock);
2580         h->nr_overcommit_huge_pages = input;
2581         spin_unlock(&hugetlb_lock);
2582
2583         return count;
2584 }
2585 HSTATE_ATTR(nr_overcommit_hugepages);
2586
2587 static ssize_t free_hugepages_show(struct kobject *kobj,
2588                                         struct kobj_attribute *attr, char *buf)
2589 {
2590         struct hstate *h;
2591         unsigned long free_huge_pages;
2592         int nid;
2593
2594         h = kobj_to_hstate(kobj, &nid);
2595         if (nid == NUMA_NO_NODE)
2596                 free_huge_pages = h->free_huge_pages;
2597         else
2598                 free_huge_pages = h->free_huge_pages_node[nid];
2599
2600         return sprintf(buf, "%lu\n", free_huge_pages);
2601 }
2602 HSTATE_ATTR_RO(free_hugepages);
2603
2604 static ssize_t resv_hugepages_show(struct kobject *kobj,
2605                                         struct kobj_attribute *attr, char *buf)
2606 {
2607         struct hstate *h = kobj_to_hstate(kobj, NULL);
2608         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2609 }
2610 HSTATE_ATTR_RO(resv_hugepages);
2611
2612 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2613                                         struct kobj_attribute *attr, char *buf)
2614 {
2615         struct hstate *h;
2616         unsigned long surplus_huge_pages;
2617         int nid;
2618
2619         h = kobj_to_hstate(kobj, &nid);
2620         if (nid == NUMA_NO_NODE)
2621                 surplus_huge_pages = h->surplus_huge_pages;
2622         else
2623                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2624
2625         return sprintf(buf, "%lu\n", surplus_huge_pages);
2626 }
2627 HSTATE_ATTR_RO(surplus_hugepages);
2628
2629 static struct attribute *hstate_attrs[] = {
2630         &nr_hugepages_attr.attr,
2631         &nr_overcommit_hugepages_attr.attr,
2632         &free_hugepages_attr.attr,
2633         &resv_hugepages_attr.attr,
2634         &surplus_hugepages_attr.attr,
2635 #ifdef CONFIG_NUMA
2636         &nr_hugepages_mempolicy_attr.attr,
2637 #endif
2638         NULL,
2639 };
2640
2641 static const struct attribute_group hstate_attr_group = {
2642         .attrs = hstate_attrs,
2643 };
2644
2645 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2646                                     struct kobject **hstate_kobjs,
2647                                     const struct attribute_group *hstate_attr_group)
2648 {
2649         int retval;
2650         int hi = hstate_index(h);
2651
2652         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2653         if (!hstate_kobjs[hi])
2654                 return -ENOMEM;
2655
2656         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2657         if (retval) {
2658                 kobject_put(hstate_kobjs[hi]);
2659                 hstate_kobjs[hi] = NULL;
2660         }
2661
2662         return retval;
2663 }
2664
2665 static void __init hugetlb_sysfs_init(void)
2666 {
2667         struct hstate *h;
2668         int err;
2669
2670         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2671         if (!hugepages_kobj)
2672                 return;
2673
2674         for_each_hstate(h) {
2675                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2676                                          hstate_kobjs, &hstate_attr_group);
2677                 if (err)
2678                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2679         }
2680 }
2681
2682 #ifdef CONFIG_NUMA
2683
2684 /*
2685  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2686  * with node devices in node_devices[] using a parallel array.  The array
2687  * index of a node device or _hstate == node id.
2688  * This is here to avoid any static dependency of the node device driver, in
2689  * the base kernel, on the hugetlb module.
2690  */
2691 struct node_hstate {
2692         struct kobject          *hugepages_kobj;
2693         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2694 };
2695 static struct node_hstate node_hstates[MAX_NUMNODES];
2696
2697 /*
2698  * A subset of global hstate attributes for node devices
2699  */
2700 static struct attribute *per_node_hstate_attrs[] = {
2701         &nr_hugepages_attr.attr,
2702         &free_hugepages_attr.attr,
2703         &surplus_hugepages_attr.attr,
2704         NULL,
2705 };
2706
2707 static const struct attribute_group per_node_hstate_attr_group = {
2708         .attrs = per_node_hstate_attrs,
2709 };
2710
2711 /*
2712  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2713  * Returns node id via non-NULL nidp.
2714  */
2715 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2716 {
2717         int nid;
2718
2719         for (nid = 0; nid < nr_node_ids; nid++) {
2720                 struct node_hstate *nhs = &node_hstates[nid];
2721                 int i;
2722                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2723                         if (nhs->hstate_kobjs[i] == kobj) {
2724                                 if (nidp)
2725                                         *nidp = nid;
2726                                 return &hstates[i];
2727                         }
2728         }
2729
2730         BUG();
2731         return NULL;
2732 }
2733
2734 /*
2735  * Unregister hstate attributes from a single node device.
2736  * No-op if no hstate attributes attached.
2737  */
2738 static void hugetlb_unregister_node(struct node *node)
2739 {
2740         struct hstate *h;
2741         struct node_hstate *nhs = &node_hstates[node->dev.id];
2742
2743         if (!nhs->hugepages_kobj)
2744                 return;         /* no hstate attributes */
2745
2746         for_each_hstate(h) {
2747                 int idx = hstate_index(h);
2748                 if (nhs->hstate_kobjs[idx]) {
2749                         kobject_put(nhs->hstate_kobjs[idx]);
2750                         nhs->hstate_kobjs[idx] = NULL;
2751                 }
2752         }
2753
2754         kobject_put(nhs->hugepages_kobj);
2755         nhs->hugepages_kobj = NULL;
2756 }
2757
2758
2759 /*
2760  * Register hstate attributes for a single node device.
2761  * No-op if attributes already registered.
2762  */
2763 static void hugetlb_register_node(struct node *node)
2764 {
2765         struct hstate *h;
2766         struct node_hstate *nhs = &node_hstates[node->dev.id];
2767         int err;
2768
2769         if (nhs->hugepages_kobj)
2770                 return;         /* already allocated */
2771
2772         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2773                                                         &node->dev.kobj);
2774         if (!nhs->hugepages_kobj)
2775                 return;
2776
2777         for_each_hstate(h) {
2778                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2779                                                 nhs->hstate_kobjs,
2780                                                 &per_node_hstate_attr_group);
2781                 if (err) {
2782                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2783                                 h->name, node->dev.id);
2784                         hugetlb_unregister_node(node);
2785                         break;
2786                 }
2787         }
2788 }
2789
2790 /*
2791  * hugetlb init time:  register hstate attributes for all registered node
2792  * devices of nodes that have memory.  All on-line nodes should have
2793  * registered their associated device by this time.
2794  */
2795 static void __init hugetlb_register_all_nodes(void)
2796 {
2797         int nid;
2798
2799         for_each_node_state(nid, N_MEMORY) {
2800                 struct node *node = node_devices[nid];
2801                 if (node->dev.id == nid)
2802                         hugetlb_register_node(node);
2803         }
2804
2805         /*
2806          * Let the node device driver know we're here so it can
2807          * [un]register hstate attributes on node hotplug.
2808          */
2809         register_hugetlbfs_with_node(hugetlb_register_node,
2810                                      hugetlb_unregister_node);
2811 }
2812 #else   /* !CONFIG_NUMA */
2813
2814 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2815 {
2816         BUG();
2817         if (nidp)
2818                 *nidp = -1;
2819         return NULL;
2820 }
2821
2822 static void hugetlb_register_all_nodes(void) { }
2823
2824 #endif
2825
2826 static int __init hugetlb_init(void)
2827 {
2828         int i;
2829
2830         if (!hugepages_supported())
2831                 return 0;
2832
2833         if (!size_to_hstate(default_hstate_size)) {
2834                 if (default_hstate_size != 0) {
2835                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2836                                default_hstate_size, HPAGE_SIZE);
2837                 }
2838
2839                 default_hstate_size = HPAGE_SIZE;
2840                 if (!size_to_hstate(default_hstate_size))
2841                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2842         }
2843         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2844         if (default_hstate_max_huge_pages) {
2845                 if (!default_hstate.max_huge_pages)
2846                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2847         }
2848
2849         hugetlb_init_hstates();
2850         gather_bootmem_prealloc();
2851         report_hugepages();
2852
2853         hugetlb_sysfs_init();
2854         hugetlb_register_all_nodes();
2855         hugetlb_cgroup_file_init();
2856
2857 #ifdef CONFIG_SMP
2858         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2859 #else
2860         num_fault_mutexes = 1;
2861 #endif
2862         hugetlb_fault_mutex_table =
2863                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2864                               GFP_KERNEL);
2865         BUG_ON(!hugetlb_fault_mutex_table);
2866
2867         for (i = 0; i < num_fault_mutexes; i++)
2868                 mutex_init(&hugetlb_fault_mutex_table[i]);
2869         return 0;
2870 }
2871 subsys_initcall(hugetlb_init);
2872
2873 /* Should be called on processing a hugepagesz=... option */
2874 void __init hugetlb_bad_size(void)
2875 {
2876         parsed_valid_hugepagesz = false;
2877 }
2878
2879 void __init hugetlb_add_hstate(unsigned int order)
2880 {
2881         struct hstate *h;
2882         unsigned long i;
2883
2884         if (size_to_hstate(PAGE_SIZE << order)) {
2885                 pr_warn("hugepagesz= specified twice, ignoring\n");
2886                 return;
2887         }
2888         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2889         BUG_ON(order == 0);
2890         h = &hstates[hugetlb_max_hstate++];
2891         h->order = order;
2892         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2893         h->nr_huge_pages = 0;
2894         h->free_huge_pages = 0;
2895         for (i = 0; i < MAX_NUMNODES; ++i)
2896                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2897         INIT_LIST_HEAD(&h->hugepage_activelist);
2898         h->next_nid_to_alloc = first_memory_node;
2899         h->next_nid_to_free = first_memory_node;
2900         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2901                                         huge_page_size(h)/1024);
2902
2903         parsed_hstate = h;
2904 }
2905
2906 static int __init hugetlb_nrpages_setup(char *s)
2907 {
2908         unsigned long *mhp;
2909         static unsigned long *last_mhp;
2910
2911         if (!parsed_valid_hugepagesz) {
2912                 pr_warn("hugepages = %s preceded by "
2913                         "an unsupported hugepagesz, ignoring\n", s);
2914                 parsed_valid_hugepagesz = true;
2915                 return 1;
2916         }
2917         /*
2918          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2919          * so this hugepages= parameter goes to the "default hstate".
2920          */
2921         else if (!hugetlb_max_hstate)
2922                 mhp = &default_hstate_max_huge_pages;
2923         else
2924                 mhp = &parsed_hstate->max_huge_pages;
2925
2926         if (mhp == last_mhp) {
2927                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2928                 return 1;
2929         }
2930
2931         if (sscanf(s, "%lu", mhp) <= 0)
2932                 *mhp = 0;
2933
2934         /*
2935          * Global state is always initialized later in hugetlb_init.
2936          * But we need to allocate >= MAX_ORDER hstates here early to still
2937          * use the bootmem allocator.
2938          */
2939         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2940                 hugetlb_hstate_alloc_pages(parsed_hstate);
2941
2942         last_mhp = mhp;
2943
2944         return 1;
2945 }
2946 __setup("hugepages=", hugetlb_nrpages_setup);
2947
2948 static int __init hugetlb_default_setup(char *s)
2949 {
2950         default_hstate_size = memparse(s, &s);
2951         return 1;
2952 }
2953 __setup("default_hugepagesz=", hugetlb_default_setup);
2954
2955 static unsigned int cpuset_mems_nr(unsigned int *array)
2956 {
2957         int node;
2958         unsigned int nr = 0;
2959
2960         for_each_node_mask(node, cpuset_current_mems_allowed)
2961                 nr += array[node];
2962
2963         return nr;
2964 }
2965
2966 #ifdef CONFIG_SYSCTL
2967 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
2968                                           void *buffer, size_t *length,
2969                                           loff_t *ppos, unsigned long *out)
2970 {
2971         struct ctl_table dup_table;
2972
2973         /*
2974          * In order to avoid races with __do_proc_doulongvec_minmax(), we
2975          * can duplicate the @table and alter the duplicate of it.
2976          */
2977         dup_table = *table;
2978         dup_table.data = out;
2979
2980         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
2981 }
2982
2983 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2984                          struct ctl_table *table, int write,
2985                          void __user *buffer, size_t *length, loff_t *ppos)
2986 {
2987         struct hstate *h = &default_hstate;
2988         unsigned long tmp = h->max_huge_pages;
2989         int ret;
2990
2991         if (!hugepages_supported())
2992                 return -EOPNOTSUPP;
2993
2994         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2995                                              &tmp);
2996         if (ret)
2997                 goto out;
2998
2999         if (write)
3000                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3001                                                   NUMA_NO_NODE, tmp, *length);
3002 out:
3003         return ret;
3004 }
3005
3006 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3007                           void __user *buffer, size_t *length, loff_t *ppos)
3008 {
3009
3010         return hugetlb_sysctl_handler_common(false, table, write,
3011                                                         buffer, length, ppos);
3012 }
3013
3014 #ifdef CONFIG_NUMA
3015 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3016                           void __user *buffer, size_t *length, loff_t *ppos)
3017 {
3018         return hugetlb_sysctl_handler_common(true, table, write,
3019                                                         buffer, length, ppos);
3020 }
3021 #endif /* CONFIG_NUMA */
3022
3023 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3024                         void __user *buffer,
3025                         size_t *length, loff_t *ppos)
3026 {
3027         struct hstate *h = &default_hstate;
3028         unsigned long tmp;
3029         int ret;
3030
3031         if (!hugepages_supported())
3032                 return -EOPNOTSUPP;
3033
3034         tmp = h->nr_overcommit_huge_pages;
3035
3036         if (write && hstate_is_gigantic(h))
3037                 return -EINVAL;
3038
3039         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3040                                              &tmp);
3041         if (ret)
3042                 goto out;
3043
3044         if (write) {
3045                 spin_lock(&hugetlb_lock);
3046                 h->nr_overcommit_huge_pages = tmp;
3047                 spin_unlock(&hugetlb_lock);
3048         }
3049 out:
3050         return ret;
3051 }
3052
3053 #endif /* CONFIG_SYSCTL */
3054
3055 void hugetlb_report_meminfo(struct seq_file *m)
3056 {
3057         struct hstate *h;
3058         unsigned long total = 0;
3059
3060         if (!hugepages_supported())
3061                 return;
3062
3063         for_each_hstate(h) {
3064                 unsigned long count = h->nr_huge_pages;
3065
3066                 total += (PAGE_SIZE << huge_page_order(h)) * count;
3067
3068                 if (h == &default_hstate)
3069                         seq_printf(m,
3070                                    "HugePages_Total:   %5lu\n"
3071                                    "HugePages_Free:    %5lu\n"
3072                                    "HugePages_Rsvd:    %5lu\n"
3073                                    "HugePages_Surp:    %5lu\n"
3074                                    "Hugepagesize:   %8lu kB\n",
3075                                    count,
3076                                    h->free_huge_pages,
3077                                    h->resv_huge_pages,
3078                                    h->surplus_huge_pages,
3079                                    (PAGE_SIZE << huge_page_order(h)) / 1024);
3080         }
3081
3082         seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3083 }
3084
3085 int hugetlb_report_node_meminfo(int nid, char *buf)
3086 {
3087         struct hstate *h = &default_hstate;
3088         if (!hugepages_supported())
3089                 return 0;
3090         return sprintf(buf,
3091                 "Node %d HugePages_Total: %5u\n"
3092                 "Node %d HugePages_Free:  %5u\n"
3093                 "Node %d HugePages_Surp:  %5u\n",
3094                 nid, h->nr_huge_pages_node[nid],
3095                 nid, h->free_huge_pages_node[nid],
3096                 nid, h->surplus_huge_pages_node[nid]);
3097 }
3098
3099 void hugetlb_show_meminfo(void)
3100 {
3101         struct hstate *h;
3102         int nid;
3103
3104         if (!hugepages_supported())
3105                 return;
3106
3107         for_each_node_state(nid, N_MEMORY)
3108                 for_each_hstate(h)
3109                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3110                                 nid,
3111                                 h->nr_huge_pages_node[nid],
3112                                 h->free_huge_pages_node[nid],
3113                                 h->surplus_huge_pages_node[nid],
3114                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3115 }
3116
3117 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3118 {
3119         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3120                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3121 }
3122
3123 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3124 unsigned long hugetlb_total_pages(void)
3125 {
3126         struct hstate *h;
3127         unsigned long nr_total_pages = 0;
3128
3129         for_each_hstate(h)
3130                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3131         return nr_total_pages;
3132 }
3133
3134 static int hugetlb_acct_memory(struct hstate *h, long delta)
3135 {
3136         int ret = -ENOMEM;
3137
3138         spin_lock(&hugetlb_lock);
3139         /*
3140          * When cpuset is configured, it breaks the strict hugetlb page
3141          * reservation as the accounting is done on a global variable. Such
3142          * reservation is completely rubbish in the presence of cpuset because
3143          * the reservation is not checked against page availability for the
3144          * current cpuset. Application can still potentially OOM'ed by kernel
3145          * with lack of free htlb page in cpuset that the task is in.
3146          * Attempt to enforce strict accounting with cpuset is almost
3147          * impossible (or too ugly) because cpuset is too fluid that
3148          * task or memory node can be dynamically moved between cpusets.
3149          *
3150          * The change of semantics for shared hugetlb mapping with cpuset is
3151          * undesirable. However, in order to preserve some of the semantics,
3152          * we fall back to check against current free page availability as
3153          * a best attempt and hopefully to minimize the impact of changing
3154          * semantics that cpuset has.
3155          */
3156         if (delta > 0) {
3157                 if (gather_surplus_pages(h, delta) < 0)
3158                         goto out;
3159
3160                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3161                         return_unused_surplus_pages(h, delta);
3162                         goto out;
3163                 }
3164         }
3165
3166         ret = 0;
3167         if (delta < 0)
3168                 return_unused_surplus_pages(h, (unsigned long) -delta);
3169
3170 out:
3171         spin_unlock(&hugetlb_lock);
3172         return ret;
3173 }
3174
3175 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3176 {
3177         struct resv_map *resv = vma_resv_map(vma);
3178
3179         /*
3180          * This new VMA should share its siblings reservation map if present.
3181          * The VMA will only ever have a valid reservation map pointer where
3182          * it is being copied for another still existing VMA.  As that VMA
3183          * has a reference to the reservation map it cannot disappear until
3184          * after this open call completes.  It is therefore safe to take a
3185          * new reference here without additional locking.
3186          */
3187         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3188                 kref_get(&resv->refs);
3189 }
3190
3191 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3192 {
3193         struct hstate *h = hstate_vma(vma);
3194         struct resv_map *resv = vma_resv_map(vma);
3195         struct hugepage_subpool *spool = subpool_vma(vma);
3196         unsigned long reserve, start, end;
3197         long gbl_reserve;
3198
3199         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3200                 return;
3201
3202         start = vma_hugecache_offset(h, vma, vma->vm_start);
3203         end = vma_hugecache_offset(h, vma, vma->vm_end);
3204
3205         reserve = (end - start) - region_count(resv, start, end);
3206
3207         kref_put(&resv->refs, resv_map_release);
3208
3209         if (reserve) {
3210                 /*
3211                  * Decrement reserve counts.  The global reserve count may be
3212                  * adjusted if the subpool has a minimum size.
3213                  */
3214                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3215                 hugetlb_acct_memory(h, -gbl_reserve);
3216         }
3217 }
3218
3219 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3220 {
3221         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3222                 return -EINVAL;
3223         return 0;
3224 }
3225
3226 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3227 {
3228         struct hstate *hstate = hstate_vma(vma);
3229
3230         return 1UL << huge_page_shift(hstate);
3231 }
3232
3233 /*
3234  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3235  * handle_mm_fault() to try to instantiate regular-sized pages in the
3236  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3237  * this far.
3238  */
3239 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3240 {
3241         BUG();
3242         return 0;
3243 }
3244
3245 /*
3246  * When a new function is introduced to vm_operations_struct and added
3247  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3248  * This is because under System V memory model, mappings created via
3249  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3250  * their original vm_ops are overwritten with shm_vm_ops.
3251  */
3252 const struct vm_operations_struct hugetlb_vm_ops = {
3253         .fault = hugetlb_vm_op_fault,
3254         .open = hugetlb_vm_op_open,
3255         .close = hugetlb_vm_op_close,
3256         .split = hugetlb_vm_op_split,
3257         .pagesize = hugetlb_vm_op_pagesize,
3258 };
3259
3260 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3261                                 int writable)
3262 {
3263         pte_t entry;
3264
3265         if (writable) {
3266                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3267                                          vma->vm_page_prot)));
3268         } else {
3269                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3270                                            vma->vm_page_prot));
3271         }
3272         entry = pte_mkyoung(entry);
3273         entry = pte_mkhuge(entry);
3274         entry = arch_make_huge_pte(entry, vma, page, writable);
3275
3276         return entry;
3277 }
3278
3279 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3280                                    unsigned long address, pte_t *ptep)
3281 {
3282         pte_t entry;
3283
3284         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3285         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3286                 update_mmu_cache(vma, address, ptep);
3287 }
3288
3289 bool is_hugetlb_entry_migration(pte_t pte)
3290 {
3291         swp_entry_t swp;
3292
3293         if (huge_pte_none(pte) || pte_present(pte))
3294                 return false;
3295         swp = pte_to_swp_entry(pte);
3296         if (non_swap_entry(swp) && is_migration_entry(swp))
3297                 return true;
3298         else
3299                 return false;
3300 }
3301
3302 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3303 {
3304         swp_entry_t swp;
3305
3306         if (huge_pte_none(pte) || pte_present(pte))
3307                 return 0;
3308         swp = pte_to_swp_entry(pte);
3309         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3310                 return 1;
3311         else
3312                 return 0;
3313 }
3314
3315 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3316                             struct vm_area_struct *vma)
3317 {
3318         pte_t *src_pte, *dst_pte, entry, dst_entry;
3319         struct page *ptepage;
3320         unsigned long addr;
3321         int cow;
3322         struct hstate *h = hstate_vma(vma);
3323         unsigned long sz = huge_page_size(h);
3324         unsigned long mmun_start;       /* For mmu_notifiers */
3325         unsigned long mmun_end;         /* For mmu_notifiers */
3326         int ret = 0;
3327
3328         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3329
3330         mmun_start = vma->vm_start;
3331         mmun_end = vma->vm_end;
3332         if (cow)
3333                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3334
3335         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3336                 spinlock_t *src_ptl, *dst_ptl;
3337                 src_pte = huge_pte_offset(src, addr, sz);
3338                 if (!src_pte)
3339                         continue;
3340                 dst_pte = huge_pte_alloc(dst, addr, sz);
3341                 if (!dst_pte) {
3342                         ret = -ENOMEM;
3343                         break;
3344                 }
3345
3346                 /*
3347                  * If the pagetables are shared don't copy or take references.
3348                  * dst_pte == src_pte is the common case of src/dest sharing.
3349                  *
3350                  * However, src could have 'unshared' and dst shares with
3351                  * another vma.  If dst_pte !none, this implies sharing.
3352                  * Check here before taking page table lock, and once again
3353                  * after taking the lock below.
3354                  */
3355                 dst_entry = huge_ptep_get(dst_pte);
3356                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3357                         continue;
3358
3359                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3360                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3361                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3362                 entry = huge_ptep_get(src_pte);
3363                 dst_entry = huge_ptep_get(dst_pte);
3364                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3365                         /*
3366                          * Skip if src entry none.  Also, skip in the
3367                          * unlikely case dst entry !none as this implies
3368                          * sharing with another vma.
3369                          */
3370                         ;
3371                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3372                                     is_hugetlb_entry_hwpoisoned(entry))) {
3373                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3374
3375                         if (is_write_migration_entry(swp_entry) && cow) {
3376                                 /*
3377                                  * COW mappings require pages in both
3378                                  * parent and child to be set to read.
3379                                  */
3380                                 make_migration_entry_read(&swp_entry);
3381                                 entry = swp_entry_to_pte(swp_entry);
3382                                 set_huge_swap_pte_at(src, addr, src_pte,
3383                                                      entry, sz);
3384                         }
3385                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3386                 } else {
3387                         if (cow) {
3388                                 /*
3389                                  * No need to notify as we are downgrading page
3390                                  * table protection not changing it to point
3391                                  * to a new page.
3392                                  *
3393                                  * See Documentation/vm/mmu_notifier.rst
3394                                  */
3395                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3396                         }
3397                         entry = huge_ptep_get(src_pte);
3398                         ptepage = pte_page(entry);
3399                         get_page(ptepage);
3400                         page_dup_rmap(ptepage, true);
3401                         set_huge_pte_at(dst, addr, dst_pte, entry);
3402                         hugetlb_count_add(pages_per_huge_page(h), dst);
3403                 }
3404                 spin_unlock(src_ptl);
3405                 spin_unlock(dst_ptl);
3406         }
3407
3408         if (cow)
3409                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3410
3411         return ret;
3412 }
3413
3414 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3415                             unsigned long start, unsigned long end,
3416                             struct page *ref_page)
3417 {
3418         struct mm_struct *mm = vma->vm_mm;
3419         unsigned long address;
3420         pte_t *ptep;
3421         pte_t pte;
3422         spinlock_t *ptl;
3423         struct page *page;
3424         struct hstate *h = hstate_vma(vma);
3425         unsigned long sz = huge_page_size(h);
3426         unsigned long mmun_start = start;       /* For mmu_notifiers */
3427         unsigned long mmun_end   = end;         /* For mmu_notifiers */
3428         bool force_flush = false;
3429
3430         WARN_ON(!is_vm_hugetlb_page(vma));
3431         BUG_ON(start & ~huge_page_mask(h));
3432         BUG_ON(end & ~huge_page_mask(h));
3433
3434         /*
3435          * This is a hugetlb vma, all the pte entries should point
3436          * to huge page.
3437          */
3438         tlb_remove_check_page_size_change(tlb, sz);
3439         tlb_start_vma(tlb, vma);
3440
3441         /*
3442          * If sharing possible, alert mmu notifiers of worst case.
3443          */
3444         adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
3445         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3446         address = start;
3447         for (; address < end; address += sz) {
3448                 ptep = huge_pte_offset(mm, address, sz);
3449                 if (!ptep)
3450                         continue;
3451
3452                 ptl = huge_pte_lock(h, mm, ptep);
3453                 if (huge_pmd_unshare(mm, &address, ptep)) {
3454                         spin_unlock(ptl);
3455                         tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
3456                         force_flush = true;
3457                         continue;
3458                 }
3459
3460                 pte = huge_ptep_get(ptep);
3461                 if (huge_pte_none(pte)) {
3462                         spin_unlock(ptl);
3463                         continue;
3464                 }
3465
3466                 /*
3467                  * Migrating hugepage or HWPoisoned hugepage is already
3468                  * unmapped and its refcount is dropped, so just clear pte here.
3469                  */
3470                 if (unlikely(!pte_present(pte))) {
3471                         huge_pte_clear(mm, address, ptep, sz);
3472                         spin_unlock(ptl);
3473                         continue;
3474                 }
3475
3476                 page = pte_page(pte);
3477                 /*
3478                  * If a reference page is supplied, it is because a specific
3479                  * page is being unmapped, not a range. Ensure the page we
3480                  * are about to unmap is the actual page of interest.
3481                  */
3482                 if (ref_page) {
3483                         if (page != ref_page) {
3484                                 spin_unlock(ptl);
3485                                 continue;
3486                         }
3487                         /*
3488                          * Mark the VMA as having unmapped its page so that
3489                          * future faults in this VMA will fail rather than
3490                          * looking like data was lost
3491                          */
3492                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3493                 }
3494
3495                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3496                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3497                 if (huge_pte_dirty(pte))
3498                         set_page_dirty(page);
3499
3500                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3501                 page_remove_rmap(page, true);
3502
3503                 spin_unlock(ptl);
3504                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3505                 /*
3506                  * Bail out after unmapping reference page if supplied
3507                  */
3508                 if (ref_page)
3509                         break;
3510         }
3511         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3512         tlb_end_vma(tlb, vma);
3513
3514         /*
3515          * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
3516          * could defer the flush until now, since by holding i_mmap_rwsem we
3517          * guaranteed that the last refernece would not be dropped. But we must
3518          * do the flushing before we return, as otherwise i_mmap_rwsem will be
3519          * dropped and the last reference to the shared PMDs page might be
3520          * dropped as well.
3521          *
3522          * In theory we could defer the freeing of the PMD pages as well, but
3523          * huge_pmd_unshare() relies on the exact page_count for the PMD page to
3524          * detect sharing, so we cannot defer the release of the page either.
3525          * Instead, do flush now.
3526          */
3527         if (force_flush)
3528                 tlb_flush_mmu_tlbonly(tlb);
3529 }
3530
3531 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3532                           struct vm_area_struct *vma, unsigned long start,
3533                           unsigned long end, struct page *ref_page)
3534 {
3535         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3536
3537         /*
3538          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3539          * test will fail on a vma being torn down, and not grab a page table
3540          * on its way out.  We're lucky that the flag has such an appropriate
3541          * name, and can in fact be safely cleared here. We could clear it
3542          * before the __unmap_hugepage_range above, but all that's necessary
3543          * is to clear it before releasing the i_mmap_rwsem. This works
3544          * because in the context this is called, the VMA is about to be
3545          * destroyed and the i_mmap_rwsem is held.
3546          */
3547         vma->vm_flags &= ~VM_MAYSHARE;
3548 }
3549
3550 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3551                           unsigned long end, struct page *ref_page)
3552 {
3553         struct mm_struct *mm;
3554         struct mmu_gather tlb;
3555         unsigned long tlb_start = start;
3556         unsigned long tlb_end = end;
3557
3558         /*
3559          * If shared PMDs were possibly used within this vma range, adjust
3560          * start/end for worst case tlb flushing.
3561          * Note that we can not be sure if PMDs are shared until we try to
3562          * unmap pages.  However, we want to make sure TLB flushing covers
3563          * the largest possible range.
3564          */
3565         adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3566
3567         mm = vma->vm_mm;
3568
3569         tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3570         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3571         tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3572 }
3573
3574 /*
3575  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3576  * mappping it owns the reserve page for. The intention is to unmap the page
3577  * from other VMAs and let the children be SIGKILLed if they are faulting the
3578  * same region.
3579  */
3580 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3581                               struct page *page, unsigned long address)
3582 {
3583         struct hstate *h = hstate_vma(vma);
3584         struct vm_area_struct *iter_vma;
3585         struct address_space *mapping;
3586         pgoff_t pgoff;
3587
3588         /*
3589          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3590          * from page cache lookup which is in HPAGE_SIZE units.
3591          */
3592         address = address & huge_page_mask(h);
3593         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3594                         vma->vm_pgoff;
3595         mapping = vma->vm_file->f_mapping;
3596
3597         /*
3598          * Take the mapping lock for the duration of the table walk. As
3599          * this mapping should be shared between all the VMAs,
3600          * __unmap_hugepage_range() is called as the lock is already held
3601          */
3602         i_mmap_lock_write(mapping);
3603         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3604                 /* Do not unmap the current VMA */
3605                 if (iter_vma == vma)
3606                         continue;
3607
3608                 /*
3609                  * Shared VMAs have their own reserves and do not affect
3610                  * MAP_PRIVATE accounting but it is possible that a shared
3611                  * VMA is using the same page so check and skip such VMAs.
3612                  */
3613                 if (iter_vma->vm_flags & VM_MAYSHARE)
3614                         continue;
3615
3616                 /*
3617                  * Unmap the page from other VMAs without their own reserves.
3618                  * They get marked to be SIGKILLed if they fault in these
3619                  * areas. This is because a future no-page fault on this VMA
3620                  * could insert a zeroed page instead of the data existing
3621                  * from the time of fork. This would look like data corruption
3622                  */
3623                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3624                         unmap_hugepage_range(iter_vma, address,
3625                                              address + huge_page_size(h), page);
3626         }
3627         i_mmap_unlock_write(mapping);
3628 }
3629
3630 /*
3631  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3632  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3633  * cannot race with other handlers or page migration.
3634  * Keep the pte_same checks anyway to make transition from the mutex easier.
3635  */
3636 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3637                        unsigned long address, pte_t *ptep,
3638                        struct page *pagecache_page, spinlock_t *ptl)
3639 {
3640         pte_t pte;
3641         struct hstate *h = hstate_vma(vma);
3642         struct page *old_page, *new_page;
3643         int outside_reserve = 0;
3644         vm_fault_t ret = 0;
3645         unsigned long mmun_start;       /* For mmu_notifiers */
3646         unsigned long mmun_end;         /* For mmu_notifiers */
3647         unsigned long haddr = address & huge_page_mask(h);
3648
3649         pte = huge_ptep_get(ptep);
3650         old_page = pte_page(pte);
3651
3652 retry_avoidcopy:
3653         /* If no-one else is actually using this page, avoid the copy
3654          * and just make the page writable */
3655         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3656                 page_move_anon_rmap(old_page, vma);
3657                 set_huge_ptep_writable(vma, haddr, ptep);
3658                 return 0;
3659         }
3660
3661         /*
3662          * If the process that created a MAP_PRIVATE mapping is about to
3663          * perform a COW due to a shared page count, attempt to satisfy
3664          * the allocation without using the existing reserves. The pagecache
3665          * page is used to determine if the reserve at this address was
3666          * consumed or not. If reserves were used, a partial faulted mapping
3667          * at the time of fork() could consume its reserves on COW instead
3668          * of the full address range.
3669          */
3670         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3671                         old_page != pagecache_page)
3672                 outside_reserve = 1;
3673
3674         get_page(old_page);
3675
3676         /*
3677          * Drop page table lock as buddy allocator may be called. It will
3678          * be acquired again before returning to the caller, as expected.
3679          */
3680         spin_unlock(ptl);
3681         new_page = alloc_huge_page(vma, haddr, outside_reserve);
3682
3683         if (IS_ERR(new_page)) {
3684                 /*
3685                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3686                  * it is due to references held by a child and an insufficient
3687                  * huge page pool. To guarantee the original mappers
3688                  * reliability, unmap the page from child processes. The child
3689                  * may get SIGKILLed if it later faults.
3690                  */
3691                 if (outside_reserve) {
3692                         put_page(old_page);
3693                         BUG_ON(huge_pte_none(pte));
3694                         unmap_ref_private(mm, vma, old_page, haddr);
3695                         BUG_ON(huge_pte_none(pte));
3696                         spin_lock(ptl);
3697                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3698                         if (likely(ptep &&
3699                                    pte_same(huge_ptep_get(ptep), pte)))
3700                                 goto retry_avoidcopy;
3701                         /*
3702                          * race occurs while re-acquiring page table
3703                          * lock, and our job is done.
3704                          */
3705                         return 0;
3706                 }
3707
3708                 ret = vmf_error(PTR_ERR(new_page));
3709                 goto out_release_old;
3710         }
3711
3712         /*
3713          * When the original hugepage is shared one, it does not have
3714          * anon_vma prepared.
3715          */
3716         if (unlikely(anon_vma_prepare(vma))) {
3717                 ret = VM_FAULT_OOM;
3718                 goto out_release_all;
3719         }
3720
3721         copy_user_huge_page(new_page, old_page, address, vma,
3722                             pages_per_huge_page(h));
3723         __SetPageUptodate(new_page);
3724
3725         mmun_start = haddr;
3726         mmun_end = mmun_start + huge_page_size(h);
3727         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3728
3729         /*
3730          * Retake the page table lock to check for racing updates
3731          * before the page tables are altered
3732          */
3733         spin_lock(ptl);
3734         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3735         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3736                 ClearPagePrivate(new_page);
3737
3738                 /* Break COW */
3739                 huge_ptep_clear_flush(vma, haddr, ptep);
3740                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3741                 set_huge_pte_at(mm, haddr, ptep,
3742                                 make_huge_pte(vma, new_page, 1));
3743                 page_remove_rmap(old_page, true);
3744                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3745                 set_page_huge_active(new_page);
3746                 /* Make the old page be freed below */
3747                 new_page = old_page;
3748         }
3749         spin_unlock(ptl);
3750         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3751 out_release_all:
3752         restore_reserve_on_error(h, vma, haddr, new_page);
3753         put_page(new_page);
3754 out_release_old:
3755         put_page(old_page);
3756
3757         spin_lock(ptl); /* Caller expects lock to be held */
3758         return ret;
3759 }
3760
3761 /* Return the pagecache page at a given address within a VMA */
3762 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3763                         struct vm_area_struct *vma, unsigned long address)
3764 {
3765         struct address_space *mapping;
3766         pgoff_t idx;
3767
3768         mapping = vma->vm_file->f_mapping;
3769         idx = vma_hugecache_offset(h, vma, address);
3770
3771         return find_lock_page(mapping, idx);
3772 }
3773
3774 /*
3775  * Return whether there is a pagecache page to back given address within VMA.
3776  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3777  */
3778 static bool hugetlbfs_pagecache_present(struct hstate *h,
3779                         struct vm_area_struct *vma, unsigned long address)
3780 {
3781         struct address_space *mapping;
3782         pgoff_t idx;
3783         struct page *page;
3784
3785         mapping = vma->vm_file->f_mapping;
3786         idx = vma_hugecache_offset(h, vma, address);
3787
3788         page = find_get_page(mapping, idx);
3789         if (page)
3790                 put_page(page);
3791         return page != NULL;
3792 }
3793
3794 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3795                            pgoff_t idx)
3796 {
3797         struct inode *inode = mapping->host;
3798         struct hstate *h = hstate_inode(inode);
3799         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3800
3801         if (err)
3802                 return err;
3803         ClearPagePrivate(page);
3804
3805         /*
3806          * set page dirty so that it will not be removed from cache/file
3807          * by non-hugetlbfs specific code paths.
3808          */
3809         set_page_dirty(page);
3810
3811         spin_lock(&inode->i_lock);
3812         inode->i_blocks += blocks_per_huge_page(h);
3813         spin_unlock(&inode->i_lock);
3814         return 0;
3815 }
3816
3817 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3818                         struct vm_area_struct *vma,
3819                         struct address_space *mapping, pgoff_t idx,
3820                         unsigned long address, pte_t *ptep, unsigned int flags)
3821 {
3822         struct hstate *h = hstate_vma(vma);
3823         vm_fault_t ret = VM_FAULT_SIGBUS;
3824         int anon_rmap = 0;
3825         unsigned long size;
3826         struct page *page;
3827         pte_t new_pte;
3828         spinlock_t *ptl;
3829         unsigned long haddr = address & huge_page_mask(h);
3830         bool new_page = false;
3831
3832         /*
3833          * Currently, we are forced to kill the process in the event the
3834          * original mapper has unmapped pages from the child due to a failed
3835          * COW. Warn that such a situation has occurred as it may not be obvious
3836          */
3837         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3838                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3839                            current->pid);
3840                 return ret;
3841         }
3842
3843         /*
3844          * Use page lock to guard against racing truncation
3845          * before we get page_table_lock.
3846          */
3847 retry:
3848         page = find_lock_page(mapping, idx);
3849         if (!page) {
3850                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3851                 if (idx >= size)
3852                         goto out;
3853
3854                 /*
3855                  * Check for page in userfault range
3856                  */
3857                 if (userfaultfd_missing(vma)) {
3858                         u32 hash;
3859                         struct vm_fault vmf = {
3860                                 .vma = vma,
3861                                 .address = haddr,
3862                                 .flags = flags,
3863                                 /*
3864                                  * Hard to debug if it ends up being
3865                                  * used by a callee that assumes
3866                                  * something about the other
3867                                  * uninitialized fields... same as in
3868                                  * memory.c
3869                                  */
3870                         };
3871
3872                         /*
3873                          * hugetlb_fault_mutex must be dropped before
3874                          * handling userfault.  Reacquire after handling
3875                          * fault to make calling code simpler.
3876                          */
3877                         hash = hugetlb_fault_mutex_hash(h, mapping, idx);
3878                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3879                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3880                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3881                         goto out;
3882                 }
3883
3884                 page = alloc_huge_page(vma, haddr, 0);
3885                 if (IS_ERR(page)) {
3886                         ret = vmf_error(PTR_ERR(page));
3887                         goto out;
3888                 }
3889                 clear_huge_page(page, address, pages_per_huge_page(h));
3890                 __SetPageUptodate(page);
3891                 new_page = true;
3892
3893                 if (vma->vm_flags & VM_MAYSHARE) {
3894                         int err = huge_add_to_page_cache(page, mapping, idx);
3895                         if (err) {
3896                                 put_page(page);
3897                                 if (err == -EEXIST)
3898                                         goto retry;
3899                                 goto out;
3900                         }
3901                 } else {
3902                         lock_page(page);
3903                         if (unlikely(anon_vma_prepare(vma))) {
3904                                 ret = VM_FAULT_OOM;
3905                                 goto backout_unlocked;
3906                         }
3907                         anon_rmap = 1;
3908                 }
3909         } else {
3910                 /*
3911                  * If memory error occurs between mmap() and fault, some process
3912                  * don't have hwpoisoned swap entry for errored virtual address.
3913                  * So we need to block hugepage fault by PG_hwpoison bit check.
3914                  */
3915                 if (unlikely(PageHWPoison(page))) {
3916                         ret = VM_FAULT_HWPOISON_LARGE |
3917                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3918                         goto backout_unlocked;
3919                 }
3920         }
3921
3922         /*
3923          * If we are going to COW a private mapping later, we examine the
3924          * pending reservations for this page now. This will ensure that
3925          * any allocations necessary to record that reservation occur outside
3926          * the spinlock.
3927          */
3928         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3929                 if (vma_needs_reservation(h, vma, haddr) < 0) {
3930                         ret = VM_FAULT_OOM;
3931                         goto backout_unlocked;
3932                 }
3933                 /* Just decrements count, does not deallocate */
3934                 vma_end_reservation(h, vma, haddr);
3935         }
3936
3937         ptl = huge_pte_lock(h, mm, ptep);
3938         size = i_size_read(mapping->host) >> huge_page_shift(h);
3939         if (idx >= size)
3940                 goto backout;
3941
3942         ret = 0;
3943         if (!huge_pte_none(huge_ptep_get(ptep)))
3944                 goto backout;
3945
3946         if (anon_rmap) {
3947                 ClearPagePrivate(page);
3948                 hugepage_add_new_anon_rmap(page, vma, haddr);
3949         } else
3950                 page_dup_rmap(page, true);
3951         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3952                                 && (vma->vm_flags & VM_SHARED)));
3953         set_huge_pte_at(mm, haddr, ptep, new_pte);
3954
3955         hugetlb_count_add(pages_per_huge_page(h), mm);
3956         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3957                 /* Optimization, do the COW without a second fault */
3958                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3959         }
3960
3961         spin_unlock(ptl);
3962
3963         /*
3964          * Only make newly allocated pages active.  Existing pages found
3965          * in the pagecache could be !page_huge_active() if they have been
3966          * isolated for migration.
3967          */
3968         if (new_page)
3969                 set_page_huge_active(page);
3970
3971         unlock_page(page);
3972 out:
3973         return ret;
3974
3975 backout:
3976         spin_unlock(ptl);
3977 backout_unlocked:
3978         unlock_page(page);
3979         restore_reserve_on_error(h, vma, haddr, page);
3980         put_page(page);
3981         goto out;
3982 }
3983
3984 #ifdef CONFIG_SMP
3985 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3986                             pgoff_t idx)
3987 {
3988         unsigned long key[2];
3989         u32 hash;
3990
3991         key[0] = (unsigned long) mapping;
3992         key[1] = idx;
3993
3994         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
3995
3996         return hash & (num_fault_mutexes - 1);
3997 }
3998 #else
3999 /*
4000  * For uniprocesor systems we always use a single mutex, so just
4001  * return 0 and avoid the hashing overhead.
4002  */
4003 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4004                             pgoff_t idx)
4005 {
4006         return 0;
4007 }
4008 #endif
4009
4010 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4011                         unsigned long address, unsigned int flags)
4012 {
4013         pte_t *ptep, entry;
4014         spinlock_t *ptl;
4015         vm_fault_t ret;
4016         u32 hash;
4017         pgoff_t idx;
4018         struct page *page = NULL;
4019         struct page *pagecache_page = NULL;
4020         struct hstate *h = hstate_vma(vma);
4021         struct address_space *mapping;
4022         int need_wait_lock = 0;
4023         unsigned long haddr = address & huge_page_mask(h);
4024
4025         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4026         if (ptep) {
4027                 entry = huge_ptep_get(ptep);
4028                 if (unlikely(is_hugetlb_entry_migration(entry))) {
4029                         migration_entry_wait_huge(vma, mm, ptep);
4030                         return 0;
4031                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4032                         return VM_FAULT_HWPOISON_LARGE |
4033                                 VM_FAULT_SET_HINDEX(hstate_index(h));
4034         } else {
4035                 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4036                 if (!ptep)
4037                         return VM_FAULT_OOM;
4038         }
4039
4040         mapping = vma->vm_file->f_mapping;
4041         idx = vma_hugecache_offset(h, vma, haddr);
4042
4043         /*
4044          * Serialize hugepage allocation and instantiation, so that we don't
4045          * get spurious allocation failures if two CPUs race to instantiate
4046          * the same page in the page cache.
4047          */
4048         hash = hugetlb_fault_mutex_hash(h, mapping, idx);
4049         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4050
4051         entry = huge_ptep_get(ptep);
4052         if (huge_pte_none(entry)) {
4053                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4054                 goto out_mutex;
4055         }
4056
4057         ret = 0;
4058
4059         /*
4060          * entry could be a migration/hwpoison entry at this point, so this
4061          * check prevents the kernel from going below assuming that we have
4062          * a active hugepage in pagecache. This goto expects the 2nd page fault,
4063          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4064          * handle it.
4065          */
4066         if (!pte_present(entry))
4067                 goto out_mutex;
4068
4069         /*
4070          * If we are going to COW the mapping later, we examine the pending
4071          * reservations for this page now. This will ensure that any
4072          * allocations necessary to record that reservation occur outside the
4073          * spinlock. For private mappings, we also lookup the pagecache
4074          * page now as it is used to determine if a reservation has been
4075          * consumed.
4076          */
4077         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4078                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4079                         ret = VM_FAULT_OOM;
4080                         goto out_mutex;
4081                 }
4082                 /* Just decrements count, does not deallocate */
4083                 vma_end_reservation(h, vma, haddr);
4084
4085                 if (!(vma->vm_flags & VM_MAYSHARE))
4086                         pagecache_page = hugetlbfs_pagecache_page(h,
4087                                                                 vma, haddr);
4088         }
4089
4090         ptl = huge_pte_lock(h, mm, ptep);
4091
4092         /* Check for a racing update before calling hugetlb_cow */
4093         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4094                 goto out_ptl;
4095
4096         /*
4097          * hugetlb_cow() requires page locks of pte_page(entry) and
4098          * pagecache_page, so here we need take the former one
4099          * when page != pagecache_page or !pagecache_page.
4100          */
4101         page = pte_page(entry);
4102         if (page != pagecache_page)
4103                 if (!trylock_page(page)) {
4104                         need_wait_lock = 1;
4105                         goto out_ptl;
4106                 }
4107
4108         get_page(page);
4109
4110         if (flags & FAULT_FLAG_WRITE) {
4111                 if (!huge_pte_write(entry)) {
4112                         ret = hugetlb_cow(mm, vma, address, ptep,
4113                                           pagecache_page, ptl);
4114                         goto out_put_page;
4115                 }
4116                 entry = huge_pte_mkdirty(entry);
4117         }
4118         entry = pte_mkyoung(entry);
4119         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4120                                                 flags & FAULT_FLAG_WRITE))
4121                 update_mmu_cache(vma, haddr, ptep);
4122 out_put_page:
4123         if (page != pagecache_page)
4124                 unlock_page(page);
4125         put_page(page);
4126 out_ptl:
4127         spin_unlock(ptl);
4128
4129         if (pagecache_page) {
4130                 unlock_page(pagecache_page);
4131                 put_page(pagecache_page);
4132         }
4133 out_mutex:
4134         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4135         /*
4136          * Generally it's safe to hold refcount during waiting page lock. But
4137          * here we just wait to defer the next page fault to avoid busy loop and
4138          * the page is not used after unlocked before returning from the current
4139          * page fault. So we are safe from accessing freed page, even if we wait
4140          * here without taking refcount.
4141          */
4142         if (need_wait_lock)
4143                 wait_on_page_locked(page);
4144         return ret;
4145 }
4146
4147 /*
4148  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4149  * modifications for huge pages.
4150  */
4151 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4152                             pte_t *dst_pte,
4153                             struct vm_area_struct *dst_vma,
4154                             unsigned long dst_addr,
4155                             unsigned long src_addr,
4156                             struct page **pagep)
4157 {
4158         struct address_space *mapping;
4159         pgoff_t idx;
4160         unsigned long size;
4161         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4162         struct hstate *h = hstate_vma(dst_vma);
4163         pte_t _dst_pte;
4164         spinlock_t *ptl;
4165         int ret;
4166         struct page *page;
4167
4168         if (!*pagep) {
4169                 /* If a page already exists, then it's UFFDIO_COPY for
4170                  * a non-missing case. Return -EEXIST.
4171                  */
4172                 if (vm_shared &&
4173                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4174                         ret = -EEXIST;
4175                         goto out;
4176                 }
4177
4178                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4179                 if (IS_ERR(page)) {
4180                         ret = -ENOMEM;
4181                         goto out;
4182                 }
4183
4184                 ret = copy_huge_page_from_user(page,
4185                                                 (const void __user *) src_addr,
4186                                                 pages_per_huge_page(h), false);
4187
4188                 /* fallback to copy_from_user outside mmap_sem */
4189                 if (unlikely(ret)) {
4190                         ret = -ENOENT;
4191                         *pagep = page;
4192                         /* don't free the page */
4193                         goto out;
4194                 }
4195         } else {
4196                 page = *pagep;
4197                 *pagep = NULL;
4198         }
4199
4200         /*
4201          * The memory barrier inside __SetPageUptodate makes sure that
4202          * preceding stores to the page contents become visible before
4203          * the set_pte_at() write.
4204          */
4205         __SetPageUptodate(page);
4206
4207         mapping = dst_vma->vm_file->f_mapping;
4208         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4209
4210         /*
4211          * If shared, add to page cache
4212          */
4213         if (vm_shared) {
4214                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4215                 ret = -EFAULT;
4216                 if (idx >= size)
4217                         goto out_release_nounlock;
4218
4219                 /*
4220                  * Serialization between remove_inode_hugepages() and
4221                  * huge_add_to_page_cache() below happens through the
4222                  * hugetlb_fault_mutex_table that here must be hold by
4223                  * the caller.
4224                  */
4225                 ret = huge_add_to_page_cache(page, mapping, idx);
4226                 if (ret)
4227                         goto out_release_nounlock;
4228         }
4229
4230         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4231         spin_lock(ptl);
4232
4233         /*
4234          * Recheck the i_size after holding PT lock to make sure not
4235          * to leave any page mapped (as page_mapped()) beyond the end
4236          * of the i_size (remove_inode_hugepages() is strict about
4237          * enforcing that). If we bail out here, we'll also leave a
4238          * page in the radix tree in the vm_shared case beyond the end
4239          * of the i_size, but remove_inode_hugepages() will take care
4240          * of it as soon as we drop the hugetlb_fault_mutex_table.
4241          */
4242         size = i_size_read(mapping->host) >> huge_page_shift(h);
4243         ret = -EFAULT;
4244         if (idx >= size)
4245                 goto out_release_unlock;
4246
4247         ret = -EEXIST;
4248         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4249                 goto out_release_unlock;
4250
4251         if (vm_shared) {
4252                 page_dup_rmap(page, true);
4253         } else {
4254                 ClearPagePrivate(page);
4255                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4256         }
4257
4258         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4259         if (dst_vma->vm_flags & VM_WRITE)
4260                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4261         _dst_pte = pte_mkyoung(_dst_pte);
4262
4263         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4264
4265         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4266                                         dst_vma->vm_flags & VM_WRITE);
4267         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4268
4269         /* No need to invalidate - it was non-present before */
4270         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4271
4272         spin_unlock(ptl);
4273         set_page_huge_active(page);
4274         if (vm_shared)
4275                 unlock_page(page);
4276         ret = 0;
4277 out:
4278         return ret;
4279 out_release_unlock:
4280         spin_unlock(ptl);
4281         if (vm_shared)
4282                 unlock_page(page);
4283 out_release_nounlock:
4284         put_page(page);
4285         goto out;
4286 }
4287
4288 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4289                          struct page **pages, struct vm_area_struct **vmas,
4290                          unsigned long *position, unsigned long *nr_pages,
4291                          long i, unsigned int flags, int *nonblocking)
4292 {
4293         unsigned long pfn_offset;
4294         unsigned long vaddr = *position;
4295         unsigned long remainder = *nr_pages;
4296         struct hstate *h = hstate_vma(vma);
4297         int err = -EFAULT;
4298
4299         while (vaddr < vma->vm_end && remainder) {
4300                 pte_t *pte;
4301                 spinlock_t *ptl = NULL;
4302                 int absent;
4303                 struct page *page;
4304
4305                 /*
4306                  * If we have a pending SIGKILL, don't keep faulting pages and
4307                  * potentially allocating memory.
4308                  */
4309                 if (unlikely(fatal_signal_pending(current))) {
4310                         remainder = 0;
4311                         break;
4312                 }
4313
4314                 /*
4315                  * Some archs (sparc64, sh*) have multiple pte_ts to
4316                  * each hugepage.  We have to make sure we get the
4317                  * first, for the page indexing below to work.
4318                  *
4319                  * Note that page table lock is not held when pte is null.
4320                  */
4321                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4322                                       huge_page_size(h));
4323                 if (pte)
4324                         ptl = huge_pte_lock(h, mm, pte);
4325                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4326
4327                 /*
4328                  * When coredumping, it suits get_dump_page if we just return
4329                  * an error where there's an empty slot with no huge pagecache
4330                  * to back it.  This way, we avoid allocating a hugepage, and
4331                  * the sparse dumpfile avoids allocating disk blocks, but its
4332                  * huge holes still show up with zeroes where they need to be.
4333                  */
4334                 if (absent && (flags & FOLL_DUMP) &&
4335                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4336                         if (pte)
4337                                 spin_unlock(ptl);
4338                         remainder = 0;
4339                         break;
4340                 }
4341
4342                 /*
4343                  * We need call hugetlb_fault for both hugepages under migration
4344                  * (in which case hugetlb_fault waits for the migration,) and
4345                  * hwpoisoned hugepages (in which case we need to prevent the
4346                  * caller from accessing to them.) In order to do this, we use
4347                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4348                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4349                  * both cases, and because we can't follow correct pages
4350                  * directly from any kind of swap entries.
4351                  */
4352                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4353                     ((flags & FOLL_WRITE) &&
4354                       !huge_pte_write(huge_ptep_get(pte)))) {
4355                         vm_fault_t ret;
4356                         unsigned int fault_flags = 0;
4357
4358                         if (pte)
4359                                 spin_unlock(ptl);
4360                         if (flags & FOLL_WRITE)
4361                                 fault_flags |= FAULT_FLAG_WRITE;
4362                         if (nonblocking)
4363                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4364                         if (flags & FOLL_NOWAIT)
4365                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4366                                         FAULT_FLAG_RETRY_NOWAIT;
4367                         if (flags & FOLL_TRIED) {
4368                                 VM_WARN_ON_ONCE(fault_flags &
4369                                                 FAULT_FLAG_ALLOW_RETRY);
4370                                 fault_flags |= FAULT_FLAG_TRIED;
4371                         }
4372                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4373                         if (ret & VM_FAULT_ERROR) {
4374                                 err = vm_fault_to_errno(ret, flags);
4375                                 remainder = 0;
4376                                 break;
4377                         }
4378                         if (ret & VM_FAULT_RETRY) {
4379                                 if (nonblocking &&
4380                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4381                                         *nonblocking = 0;
4382                                 *nr_pages = 0;
4383                                 /*
4384                                  * VM_FAULT_RETRY must not return an
4385                                  * error, it will return zero
4386                                  * instead.
4387                                  *
4388                                  * No need to update "position" as the
4389                                  * caller will not check it after
4390                                  * *nr_pages is set to 0.
4391                                  */
4392                                 return i;
4393                         }
4394                         continue;
4395                 }
4396
4397                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4398                 page = pte_page(huge_ptep_get(pte));
4399
4400                 /*
4401                  * Instead of doing 'try_get_page()' below in the same_page
4402                  * loop, just check the count once here.
4403                  */
4404                 if (unlikely(page_count(page) <= 0)) {
4405                         if (pages) {
4406                                 spin_unlock(ptl);
4407                                 remainder = 0;
4408                                 err = -ENOMEM;
4409                                 break;
4410                         }
4411                 }
4412 same_page:
4413                 if (pages) {
4414                         pages[i] = mem_map_offset(page, pfn_offset);
4415                         get_page(pages[i]);
4416                 }
4417
4418                 if (vmas)
4419                         vmas[i] = vma;
4420
4421                 vaddr += PAGE_SIZE;
4422                 ++pfn_offset;
4423                 --remainder;
4424                 ++i;
4425                 if (vaddr < vma->vm_end && remainder &&
4426                                 pfn_offset < pages_per_huge_page(h)) {
4427                         /*
4428                          * We use pfn_offset to avoid touching the pageframes
4429                          * of this compound page.
4430                          */
4431                         goto same_page;
4432                 }
4433                 spin_unlock(ptl);
4434         }
4435         *nr_pages = remainder;
4436         /*
4437          * setting position is actually required only if remainder is
4438          * not zero but it's faster not to add a "if (remainder)"
4439          * branch.
4440          */
4441         *position = vaddr;
4442
4443         return i ? i : err;
4444 }
4445
4446 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4447 /*
4448  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4449  * implement this.
4450  */
4451 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4452 #endif
4453
4454 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4455                 unsigned long address, unsigned long end, pgprot_t newprot)
4456 {
4457         struct mm_struct *mm = vma->vm_mm;
4458         unsigned long start = address;
4459         pte_t *ptep;
4460         pte_t pte;
4461         struct hstate *h = hstate_vma(vma);
4462         unsigned long pages = 0;
4463         unsigned long f_start = start;
4464         unsigned long f_end = end;
4465         bool shared_pmd = false;
4466
4467         /*
4468          * In the case of shared PMDs, the area to flush could be beyond
4469          * start/end.  Set f_start/f_end to cover the maximum possible
4470          * range if PMD sharing is possible.
4471          */
4472         adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
4473
4474         BUG_ON(address >= end);
4475         flush_cache_range(vma, f_start, f_end);
4476
4477         mmu_notifier_invalidate_range_start(mm, f_start, f_end);
4478         i_mmap_lock_write(vma->vm_file->f_mapping);
4479         for (; address < end; address += huge_page_size(h)) {
4480                 spinlock_t *ptl;
4481                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4482                 if (!ptep)
4483                         continue;
4484                 ptl = huge_pte_lock(h, mm, ptep);
4485                 if (huge_pmd_unshare(mm, &address, ptep)) {
4486                         pages++;
4487                         spin_unlock(ptl);
4488                         shared_pmd = true;
4489                         continue;
4490                 }
4491                 pte = huge_ptep_get(ptep);
4492                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4493                         spin_unlock(ptl);
4494                         continue;
4495                 }
4496                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4497                         swp_entry_t entry = pte_to_swp_entry(pte);
4498
4499                         if (is_write_migration_entry(entry)) {
4500                                 pte_t newpte;
4501
4502                                 make_migration_entry_read(&entry);
4503                                 newpte = swp_entry_to_pte(entry);
4504                                 set_huge_swap_pte_at(mm, address, ptep,
4505                                                      newpte, huge_page_size(h));
4506                                 pages++;
4507                         }
4508                         spin_unlock(ptl);
4509                         continue;
4510                 }
4511                 if (!huge_pte_none(pte)) {
4512                         pte = huge_ptep_get_and_clear(mm, address, ptep);
4513                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4514                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4515                         set_huge_pte_at(mm, address, ptep, pte);
4516                         pages++;
4517                 }
4518                 spin_unlock(ptl);
4519         }
4520         /*
4521          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4522          * may have cleared our pud entry and done put_page on the page table:
4523          * once we release i_mmap_rwsem, another task can do the final put_page
4524          * and that page table be reused and filled with junk.  If we actually
4525          * did unshare a page of pmds, flush the range corresponding to the pud.
4526          */
4527         if (shared_pmd)
4528                 flush_hugetlb_tlb_range(vma, f_start, f_end);
4529         else
4530                 flush_hugetlb_tlb_range(vma, start, end);
4531         /*
4532          * No need to call mmu_notifier_invalidate_range() we are downgrading
4533          * page table protection not changing it to point to a new page.
4534          *
4535          * See Documentation/vm/mmu_notifier.rst
4536          */
4537         i_mmap_unlock_write(vma->vm_file->f_mapping);
4538         mmu_notifier_invalidate_range_end(mm, f_start, f_end);
4539
4540         return pages << h->order;
4541 }
4542
4543 int hugetlb_reserve_pages(struct inode *inode,
4544                                         long from, long to,
4545                                         struct vm_area_struct *vma,
4546                                         vm_flags_t vm_flags)
4547 {
4548         long ret, chg;
4549         struct hstate *h = hstate_inode(inode);
4550         struct hugepage_subpool *spool = subpool_inode(inode);
4551         struct resv_map *resv_map;
4552         long gbl_reserve;
4553
4554         /* This should never happen */
4555         if (from > to) {
4556                 VM_WARN(1, "%s called with a negative range\n", __func__);
4557                 return -EINVAL;
4558         }
4559
4560         /*
4561          * Only apply hugepage reservation if asked. At fault time, an
4562          * attempt will be made for VM_NORESERVE to allocate a page
4563          * without using reserves
4564          */
4565         if (vm_flags & VM_NORESERVE)
4566                 return 0;
4567
4568         /*
4569          * Shared mappings base their reservation on the number of pages that
4570          * are already allocated on behalf of the file. Private mappings need
4571          * to reserve the full area even if read-only as mprotect() may be
4572          * called to make the mapping read-write. Assume !vma is a shm mapping
4573          */
4574         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4575                 resv_map = inode_resv_map(inode);
4576
4577                 chg = region_chg(resv_map, from, to);
4578
4579         } else {
4580                 resv_map = resv_map_alloc();
4581                 if (!resv_map)
4582                         return -ENOMEM;
4583
4584                 chg = to - from;
4585
4586                 set_vma_resv_map(vma, resv_map);
4587                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4588         }
4589
4590         if (chg < 0) {
4591                 ret = chg;
4592                 goto out_err;
4593         }
4594
4595         /*
4596          * There must be enough pages in the subpool for the mapping. If
4597          * the subpool has a minimum size, there may be some global
4598          * reservations already in place (gbl_reserve).
4599          */
4600         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4601         if (gbl_reserve < 0) {
4602                 ret = -ENOSPC;
4603                 goto out_err;
4604         }
4605
4606         /*
4607          * Check enough hugepages are available for the reservation.
4608          * Hand the pages back to the subpool if there are not
4609          */
4610         ret = hugetlb_acct_memory(h, gbl_reserve);
4611         if (ret < 0) {
4612                 /* put back original number of pages, chg */
4613                 (void)hugepage_subpool_put_pages(spool, chg);
4614                 goto out_err;
4615         }
4616
4617         /*
4618          * Account for the reservations made. Shared mappings record regions
4619          * that have reservations as they are shared by multiple VMAs.
4620          * When the last VMA disappears, the region map says how much
4621          * the reservation was and the page cache tells how much of
4622          * the reservation was consumed. Private mappings are per-VMA and
4623          * only the consumed reservations are tracked. When the VMA
4624          * disappears, the original reservation is the VMA size and the
4625          * consumed reservations are stored in the map. Hence, nothing
4626          * else has to be done for private mappings here
4627          */
4628         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4629                 long add = region_add(resv_map, from, to);
4630
4631                 if (unlikely(chg > add)) {
4632                         /*
4633                          * pages in this range were added to the reserve
4634                          * map between region_chg and region_add.  This
4635                          * indicates a race with alloc_huge_page.  Adjust
4636                          * the subpool and reserve counts modified above
4637                          * based on the difference.
4638                          */
4639                         long rsv_adjust;
4640
4641                         rsv_adjust = hugepage_subpool_put_pages(spool,
4642                                                                 chg - add);
4643                         hugetlb_acct_memory(h, -rsv_adjust);
4644                 }
4645         }
4646         return 0;
4647 out_err:
4648         if (!vma || vma->vm_flags & VM_MAYSHARE)
4649                 /* Don't call region_abort if region_chg failed */
4650                 if (chg >= 0)
4651                         region_abort(resv_map, from, to);
4652         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4653                 kref_put(&resv_map->refs, resv_map_release);
4654         return ret;
4655 }
4656
4657 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4658                                                                 long freed)
4659 {
4660         struct hstate *h = hstate_inode(inode);
4661         struct resv_map *resv_map = inode_resv_map(inode);
4662         long chg = 0;
4663         struct hugepage_subpool *spool = subpool_inode(inode);
4664         long gbl_reserve;
4665
4666         if (resv_map) {
4667                 chg = region_del(resv_map, start, end);
4668                 /*
4669                  * region_del() can fail in the rare case where a region
4670                  * must be split and another region descriptor can not be
4671                  * allocated.  If end == LONG_MAX, it will not fail.
4672                  */
4673                 if (chg < 0)
4674                         return chg;
4675         }
4676
4677         spin_lock(&inode->i_lock);
4678         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4679         spin_unlock(&inode->i_lock);
4680
4681         /*
4682          * If the subpool has a minimum size, the number of global
4683          * reservations to be released may be adjusted.
4684          */
4685         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4686         hugetlb_acct_memory(h, -gbl_reserve);
4687
4688         return 0;
4689 }
4690
4691 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4692 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4693                                 struct vm_area_struct *vma,
4694                                 unsigned long addr, pgoff_t idx)
4695 {
4696         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4697                                 svma->vm_start;
4698         unsigned long sbase = saddr & PUD_MASK;
4699         unsigned long s_end = sbase + PUD_SIZE;
4700
4701         /* Allow segments to share if only one is marked locked */
4702         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4703         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4704
4705         /*
4706          * match the virtual addresses, permission and the alignment of the
4707          * page table page.
4708          */
4709         if (pmd_index(addr) != pmd_index(saddr) ||
4710             vm_flags != svm_flags ||
4711             sbase < svma->vm_start || svma->vm_end < s_end)
4712                 return 0;
4713
4714         return saddr;
4715 }
4716
4717 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4718 {
4719         unsigned long base = addr & PUD_MASK;
4720         unsigned long end = base + PUD_SIZE;
4721
4722         /*
4723          * check on proper vm_flags and page table alignment
4724          */
4725         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4726                 return true;
4727         return false;
4728 }
4729
4730 /*
4731  * Determine if start,end range within vma could be mapped by shared pmd.
4732  * If yes, adjust start and end to cover range associated with possible
4733  * shared pmd mappings.
4734  */
4735 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4736                                 unsigned long *start, unsigned long *end)
4737 {
4738         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
4739                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
4740
4741         /*
4742          * vma need span at least one aligned PUD size and the start,end range
4743          * must at least partialy within it.
4744          */
4745         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
4746                 (*end <= v_start) || (*start >= v_end))
4747                 return;
4748
4749         /* Extend the range to be PUD aligned for a worst case scenario */
4750         if (*start > v_start)
4751                 *start = ALIGN_DOWN(*start, PUD_SIZE);
4752
4753         if (*end < v_end)
4754                 *end = ALIGN(*end, PUD_SIZE);
4755 }
4756
4757 /*
4758  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4759  * and returns the corresponding pte. While this is not necessary for the
4760  * !shared pmd case because we can allocate the pmd later as well, it makes the
4761  * code much cleaner. pmd allocation is essential for the shared case because
4762  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4763  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4764  * bad pmd for sharing.
4765  */
4766 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4767 {
4768         struct vm_area_struct *vma = find_vma(mm, addr);
4769         struct address_space *mapping = vma->vm_file->f_mapping;
4770         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4771                         vma->vm_pgoff;
4772         struct vm_area_struct *svma;
4773         unsigned long saddr;
4774         pte_t *spte = NULL;
4775         pte_t *pte;
4776         spinlock_t *ptl;
4777
4778         if (!vma_shareable(vma, addr))
4779                 return (pte_t *)pmd_alloc(mm, pud, addr);
4780
4781         i_mmap_lock_write(mapping);
4782         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4783                 if (svma == vma)
4784                         continue;
4785
4786                 saddr = page_table_shareable(svma, vma, addr, idx);
4787                 if (saddr) {
4788                         spte = huge_pte_offset(svma->vm_mm, saddr,
4789                                                vma_mmu_pagesize(svma));
4790                         if (spte) {
4791                                 get_page(virt_to_page(spte));
4792                                 break;
4793                         }
4794                 }
4795         }
4796
4797         if (!spte)
4798                 goto out;
4799
4800         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4801         if (pud_none(*pud)) {
4802                 pud_populate(mm, pud,
4803                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4804                 mm_inc_nr_pmds(mm);
4805         } else {
4806                 put_page(virt_to_page(spte));
4807         }
4808         spin_unlock(ptl);
4809 out:
4810         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4811         i_mmap_unlock_write(mapping);
4812         return pte;
4813 }
4814
4815 /*
4816  * unmap huge page backed by shared pte.
4817  *
4818  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4819  * indicated by page_count > 1, unmap is achieved by clearing pud and
4820  * decrementing the ref count. If count == 1, the pte page is not shared.
4821  *
4822  * called with page table lock held.
4823  *
4824  * returns: 1 successfully unmapped a shared pte page
4825  *          0 the underlying pte page is not shared, or it is the last user
4826  */
4827 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4828 {
4829         pgd_t *pgd = pgd_offset(mm, *addr);
4830         p4d_t *p4d = p4d_offset(pgd, *addr);
4831         pud_t *pud = pud_offset(p4d, *addr);
4832
4833         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4834         if (page_count(virt_to_page(ptep)) == 1)
4835                 return 0;
4836
4837         pud_clear(pud);
4838         put_page(virt_to_page(ptep));
4839         mm_dec_nr_pmds(mm);
4840         /*
4841          * This update of passed address optimizes loops sequentially
4842          * processing addresses in increments of huge page size (PMD_SIZE
4843          * in this case).  By clearing the pud, a PUD_SIZE area is unmapped.
4844          * Update address to the 'last page' in the cleared area so that
4845          * calling loop can move to first page past this area.
4846          */
4847         *addr |= PUD_SIZE - PMD_SIZE;
4848         return 1;
4849 }
4850 #define want_pmd_share()        (1)
4851 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4852 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4853 {
4854         return NULL;
4855 }
4856
4857 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4858 {
4859         return 0;
4860 }
4861
4862 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4863                                 unsigned long *start, unsigned long *end)
4864 {
4865 }
4866 #define want_pmd_share()        (0)
4867 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4868
4869 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4870 pte_t *huge_pte_alloc(struct mm_struct *mm,
4871                         unsigned long addr, unsigned long sz)
4872 {
4873         pgd_t *pgd;
4874         p4d_t *p4d;
4875         pud_t *pud;
4876         pte_t *pte = NULL;
4877
4878         pgd = pgd_offset(mm, addr);
4879         p4d = p4d_alloc(mm, pgd, addr);
4880         if (!p4d)
4881                 return NULL;
4882         pud = pud_alloc(mm, p4d, addr);
4883         if (pud) {
4884                 if (sz == PUD_SIZE) {
4885                         pte = (pte_t *)pud;
4886                 } else {
4887                         BUG_ON(sz != PMD_SIZE);
4888                         if (want_pmd_share() && pud_none(*pud))
4889                                 pte = huge_pmd_share(mm, addr, pud);
4890                         else
4891                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4892                 }
4893         }
4894         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4895
4896         return pte;
4897 }
4898
4899 /*
4900  * huge_pte_offset() - Walk the page table to resolve the hugepage
4901  * entry at address @addr
4902  *
4903  * Return: Pointer to page table or swap entry (PUD or PMD) for
4904  * address @addr, or NULL if a p*d_none() entry is encountered and the
4905  * size @sz doesn't match the hugepage size at this level of the page
4906  * table.
4907  */
4908 pte_t *huge_pte_offset(struct mm_struct *mm,
4909                        unsigned long addr, unsigned long sz)
4910 {
4911         pgd_t *pgd;
4912         p4d_t *p4d;
4913         pud_t *pud, pud_entry;
4914         pmd_t *pmd, pmd_entry;
4915
4916         pgd = pgd_offset(mm, addr);
4917         if (!pgd_present(*pgd))
4918                 return NULL;
4919         p4d = p4d_offset(pgd, addr);
4920         if (!p4d_present(*p4d))
4921                 return NULL;
4922
4923         pud = pud_offset(p4d, addr);
4924         pud_entry = READ_ONCE(*pud);
4925         if (sz != PUD_SIZE && pud_none(pud_entry))
4926                 return NULL;
4927         /* hugepage or swap? */
4928         if (pud_huge(pud_entry) || !pud_present(pud_entry))
4929                 return (pte_t *)pud;
4930
4931         pmd = pmd_offset(pud, addr);
4932         pmd_entry = READ_ONCE(*pmd);
4933         if (sz != PMD_SIZE && pmd_none(pmd_entry))
4934                 return NULL;
4935         /* hugepage or swap? */
4936         if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
4937                 return (pte_t *)pmd;
4938
4939         return NULL;
4940 }
4941
4942 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4943
4944 /*
4945  * These functions are overwritable if your architecture needs its own
4946  * behavior.
4947  */
4948 struct page * __weak
4949 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4950                               int write)
4951 {
4952         return ERR_PTR(-EINVAL);
4953 }
4954
4955 struct page * __weak
4956 follow_huge_pd(struct vm_area_struct *vma,
4957                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4958 {
4959         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4960         return NULL;
4961 }
4962
4963 struct page * __weak
4964 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4965                 pmd_t *pmd, int flags)
4966 {
4967         struct page *page = NULL;
4968         spinlock_t *ptl;
4969         pte_t pte;
4970 retry:
4971         ptl = pmd_lockptr(mm, pmd);
4972         spin_lock(ptl);
4973         /*
4974          * make sure that the address range covered by this pmd is not
4975          * unmapped from other threads.
4976          */
4977         if (!pmd_huge(*pmd))
4978                 goto out;
4979         pte = huge_ptep_get((pte_t *)pmd);
4980         if (pte_present(pte)) {
4981                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4982                 if (flags & FOLL_GET)
4983                         get_page(page);
4984         } else {
4985                 if (is_hugetlb_entry_migration(pte)) {
4986                         spin_unlock(ptl);
4987                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4988                         goto retry;
4989                 }
4990                 /*
4991                  * hwpoisoned entry is treated as no_page_table in
4992                  * follow_page_mask().
4993                  */
4994         }
4995 out:
4996         spin_unlock(ptl);
4997         return page;
4998 }
4999
5000 struct page * __weak
5001 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5002                 pud_t *pud, int flags)
5003 {
5004         if (flags & FOLL_GET)
5005                 return NULL;
5006
5007         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5008 }
5009
5010 struct page * __weak
5011 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5012 {
5013         if (flags & FOLL_GET)
5014                 return NULL;
5015
5016         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5017 }
5018
5019 bool isolate_huge_page(struct page *page, struct list_head *list)
5020 {
5021         bool ret = true;
5022
5023         spin_lock(&hugetlb_lock);
5024         if (!PageHeadHuge(page) || !page_huge_active(page) ||
5025             !get_page_unless_zero(page)) {
5026                 ret = false;
5027                 goto unlock;
5028         }
5029         clear_page_huge_active(page);
5030         list_move_tail(&page->lru, list);
5031 unlock:
5032         spin_unlock(&hugetlb_lock);
5033         return ret;
5034 }
5035
5036 void putback_active_hugepage(struct page *page)
5037 {
5038         VM_BUG_ON_PAGE(!PageHead(page), page);
5039         spin_lock(&hugetlb_lock);
5040         set_page_huge_active(page);
5041         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5042         spin_unlock(&hugetlb_lock);
5043         put_page(page);
5044 }
5045
5046 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5047 {
5048         struct hstate *h = page_hstate(oldpage);
5049
5050         hugetlb_cgroup_migrate(oldpage, newpage);
5051         set_page_owner_migrate_reason(newpage, reason);
5052
5053         /*
5054          * transfer temporary state of the new huge page. This is
5055          * reverse to other transitions because the newpage is going to
5056          * be final while the old one will be freed so it takes over
5057          * the temporary status.
5058          *
5059          * Also note that we have to transfer the per-node surplus state
5060          * here as well otherwise the global surplus count will not match
5061          * the per-node's.
5062          */
5063         if (PageHugeTemporary(newpage)) {
5064                 int old_nid = page_to_nid(oldpage);
5065                 int new_nid = page_to_nid(newpage);
5066
5067                 SetPageHugeTemporary(oldpage);
5068                 ClearPageHugeTemporary(newpage);
5069
5070                 spin_lock(&hugetlb_lock);
5071                 if (h->surplus_huge_pages_node[old_nid]) {
5072                         h->surplus_huge_pages_node[old_nid]--;
5073                         h->surplus_huge_pages_node[new_nid]++;
5074                 }
5075                 spin_unlock(&hugetlb_lock);
5076         }
5077 }