GNU Linux-libre 4.19.286-gnu1
[releases.git] / mm / swap_state.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/swap_state.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  *
8  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
9  */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24
25 #include <asm/pgtable.h>
26 #include "internal.h"
27
28 /*
29  * swapper_space is a fiction, retained to simplify the path through
30  * vmscan's shrink_page_list.
31  */
32 static const struct address_space_operations swap_aops = {
33         .writepage      = swap_writepage,
34         .set_page_dirty = swap_set_page_dirty,
35 #ifdef CONFIG_MIGRATION
36         .migratepage    = migrate_page,
37 #endif
38 };
39
40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42 static bool enable_vma_readahead __read_mostly = true;
43
44 #define SWAP_RA_WIN_SHIFT       (PAGE_SHIFT / 2)
45 #define SWAP_RA_HITS_MASK       ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46 #define SWAP_RA_HITS_MAX        SWAP_RA_HITS_MASK
47 #define SWAP_RA_WIN_MASK        (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49 #define SWAP_RA_HITS(v)         ((v) & SWAP_RA_HITS_MASK)
50 #define SWAP_RA_WIN(v)          (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51 #define SWAP_RA_ADDR(v)         ((v) & PAGE_MASK)
52
53 #define SWAP_RA_VAL(addr, win, hits)                            \
54         (((addr) & PAGE_MASK) |                                 \
55          (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |    \
56          ((hits) & SWAP_RA_HITS_MASK))
57
58 /* Initial readahead hits is 4 to start up with a small window */
59 #define GET_SWAP_RA_VAL(vma)                                    \
60         (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61
62 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
63 #define ADD_CACHE_INFO(x, nr)   do { swap_cache_info.x += (nr); } while (0)
64
65 static struct {
66         unsigned long add_total;
67         unsigned long del_total;
68         unsigned long find_success;
69         unsigned long find_total;
70 } swap_cache_info;
71
72 unsigned long total_swapcache_pages(void)
73 {
74         unsigned int i, j, nr;
75         unsigned long ret = 0;
76         struct address_space *spaces;
77
78         rcu_read_lock();
79         for (i = 0; i < MAX_SWAPFILES; i++) {
80                 /*
81                  * The corresponding entries in nr_swapper_spaces and
82                  * swapper_spaces will be reused only after at least
83                  * one grace period.  So it is impossible for them
84                  * belongs to different usage.
85                  */
86                 nr = nr_swapper_spaces[i];
87                 spaces = rcu_dereference(swapper_spaces[i]);
88                 if (!nr || !spaces)
89                         continue;
90                 for (j = 0; j < nr; j++)
91                         ret += spaces[j].nrpages;
92         }
93         rcu_read_unlock();
94         return ret;
95 }
96
97 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
98
99 void show_swap_cache_info(void)
100 {
101         printk("%lu pages in swap cache\n", total_swapcache_pages());
102         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
103                 swap_cache_info.add_total, swap_cache_info.del_total,
104                 swap_cache_info.find_success, swap_cache_info.find_total);
105         printk("Free swap  = %ldkB\n",
106                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
107         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
108 }
109
110 /*
111  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
112  * but sets SwapCache flag and private instead of mapping and index.
113  */
114 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
115 {
116         int error, i, nr = hpage_nr_pages(page);
117         struct address_space *address_space;
118         pgoff_t idx = swp_offset(entry);
119
120         VM_BUG_ON_PAGE(!PageLocked(page), page);
121         VM_BUG_ON_PAGE(PageSwapCache(page), page);
122         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
123
124         page_ref_add(page, nr);
125         SetPageSwapCache(page);
126
127         address_space = swap_address_space(entry);
128         xa_lock_irq(&address_space->i_pages);
129         for (i = 0; i < nr; i++) {
130                 set_page_private(page + i, entry.val + i);
131                 error = radix_tree_insert(&address_space->i_pages,
132                                           idx + i, page + i);
133                 if (unlikely(error))
134                         break;
135         }
136         if (likely(!error)) {
137                 address_space->nrpages += nr;
138                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
139                 ADD_CACHE_INFO(add_total, nr);
140         } else {
141                 /*
142                  * Only the context which have set SWAP_HAS_CACHE flag
143                  * would call add_to_swap_cache().
144                  * So add_to_swap_cache() doesn't returns -EEXIST.
145                  */
146                 VM_BUG_ON(error == -EEXIST);
147                 set_page_private(page + i, 0UL);
148                 while (i--) {
149                         radix_tree_delete(&address_space->i_pages, idx + i);
150                         set_page_private(page + i, 0UL);
151                 }
152                 ClearPageSwapCache(page);
153                 page_ref_sub(page, nr);
154         }
155         xa_unlock_irq(&address_space->i_pages);
156
157         return error;
158 }
159
160
161 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
162 {
163         int error;
164
165         error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
166         if (!error) {
167                 error = __add_to_swap_cache(page, entry);
168                 radix_tree_preload_end();
169         }
170         return error;
171 }
172
173 /*
174  * This must be called only on pages that have
175  * been verified to be in the swap cache.
176  */
177 void __delete_from_swap_cache(struct page *page)
178 {
179         struct address_space *address_space;
180         int i, nr = hpage_nr_pages(page);
181         swp_entry_t entry;
182         pgoff_t idx;
183
184         VM_BUG_ON_PAGE(!PageLocked(page), page);
185         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
186         VM_BUG_ON_PAGE(PageWriteback(page), page);
187
188         entry.val = page_private(page);
189         address_space = swap_address_space(entry);
190         idx = swp_offset(entry);
191         for (i = 0; i < nr; i++) {
192                 radix_tree_delete(&address_space->i_pages, idx + i);
193                 set_page_private(page + i, 0);
194         }
195         ClearPageSwapCache(page);
196         address_space->nrpages -= nr;
197         __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
198         ADD_CACHE_INFO(del_total, nr);
199 }
200
201 /**
202  * add_to_swap - allocate swap space for a page
203  * @page: page we want to move to swap
204  *
205  * Allocate swap space for the page and add the page to the
206  * swap cache.  Caller needs to hold the page lock. 
207  */
208 int add_to_swap(struct page *page)
209 {
210         swp_entry_t entry;
211         int err;
212
213         VM_BUG_ON_PAGE(!PageLocked(page), page);
214         VM_BUG_ON_PAGE(!PageUptodate(page), page);
215
216         entry = get_swap_page(page);
217         if (!entry.val)
218                 return 0;
219
220         /*
221          * Radix-tree node allocations from PF_MEMALLOC contexts could
222          * completely exhaust the page allocator. __GFP_NOMEMALLOC
223          * stops emergency reserves from being allocated.
224          *
225          * TODO: this could cause a theoretical memory reclaim
226          * deadlock in the swap out path.
227          */
228         /*
229          * Add it to the swap cache.
230          */
231         err = add_to_swap_cache(page, entry,
232                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
233         /* -ENOMEM radix-tree allocation failure */
234         if (err)
235                 /*
236                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
237                  * clear SWAP_HAS_CACHE flag.
238                  */
239                 goto fail;
240         /*
241          * Normally the page will be dirtied in unmap because its pte should be
242          * dirty. A special case is MADV_FREE page. The page'e pte could have
243          * dirty bit cleared but the page's SwapBacked bit is still set because
244          * clearing the dirty bit and SwapBacked bit has no lock protected. For
245          * such page, unmap will not set dirty bit for it, so page reclaim will
246          * not write the page out. This can cause data corruption when the page
247          * is swap in later. Always setting the dirty bit for the page solves
248          * the problem.
249          */
250         set_page_dirty(page);
251
252         return 1;
253
254 fail:
255         put_swap_page(page, entry);
256         return 0;
257 }
258
259 /*
260  * This must be called only on pages that have
261  * been verified to be in the swap cache and locked.
262  * It will never put the page into the free list,
263  * the caller has a reference on the page.
264  */
265 void delete_from_swap_cache(struct page *page)
266 {
267         swp_entry_t entry;
268         struct address_space *address_space;
269
270         entry.val = page_private(page);
271
272         address_space = swap_address_space(entry);
273         xa_lock_irq(&address_space->i_pages);
274         __delete_from_swap_cache(page);
275         xa_unlock_irq(&address_space->i_pages);
276
277         put_swap_page(page, entry);
278         page_ref_sub(page, hpage_nr_pages(page));
279 }
280
281 /* 
282  * If we are the only user, then try to free up the swap cache. 
283  * 
284  * Its ok to check for PageSwapCache without the page lock
285  * here because we are going to recheck again inside
286  * try_to_free_swap() _with_ the lock.
287  *                                      - Marcelo
288  */
289 static inline void free_swap_cache(struct page *page)
290 {
291         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
292                 try_to_free_swap(page);
293                 unlock_page(page);
294         }
295 }
296
297 /* 
298  * Perform a free_page(), also freeing any swap cache associated with
299  * this page if it is the last user of the page.
300  */
301 void free_page_and_swap_cache(struct page *page)
302 {
303         free_swap_cache(page);
304         if (!is_huge_zero_page(page))
305                 put_page(page);
306 }
307
308 /*
309  * Passed an array of pages, drop them all from swapcache and then release
310  * them.  They are removed from the LRU and freed if this is their last use.
311  */
312 void free_pages_and_swap_cache(struct page **pages, int nr)
313 {
314         struct page **pagep = pages;
315         int i;
316
317         lru_add_drain();
318         for (i = 0; i < nr; i++)
319                 free_swap_cache(pagep[i]);
320         release_pages(pagep, nr);
321 }
322
323 static inline bool swap_use_vma_readahead(void)
324 {
325         return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
326 }
327
328 /*
329  * Lookup a swap entry in the swap cache. A found page will be returned
330  * unlocked and with its refcount incremented - we rely on the kernel
331  * lock getting page table operations atomic even if we drop the page
332  * lock before returning.
333  */
334 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
335                                unsigned long addr)
336 {
337         struct page *page;
338
339         page = find_get_page(swap_address_space(entry), swp_offset(entry));
340
341         INC_CACHE_INFO(find_total);
342         if (page) {
343                 bool vma_ra = swap_use_vma_readahead();
344                 bool readahead;
345
346                 INC_CACHE_INFO(find_success);
347                 /*
348                  * At the moment, we don't support PG_readahead for anon THP
349                  * so let's bail out rather than confusing the readahead stat.
350                  */
351                 if (unlikely(PageTransCompound(page)))
352                         return page;
353
354                 readahead = TestClearPageReadahead(page);
355                 if (vma && vma_ra) {
356                         unsigned long ra_val;
357                         int win, hits;
358
359                         ra_val = GET_SWAP_RA_VAL(vma);
360                         win = SWAP_RA_WIN(ra_val);
361                         hits = SWAP_RA_HITS(ra_val);
362                         if (readahead)
363                                 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
364                         atomic_long_set(&vma->swap_readahead_info,
365                                         SWAP_RA_VAL(addr, win, hits));
366                 }
367
368                 if (readahead) {
369                         count_vm_event(SWAP_RA_HIT);
370                         if (!vma || !vma_ra)
371                                 atomic_inc(&swapin_readahead_hits);
372                 }
373         }
374
375         return page;
376 }
377
378 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
379                         struct vm_area_struct *vma, unsigned long addr,
380                         bool *new_page_allocated)
381 {
382         struct page *found_page, *new_page = NULL;
383         struct address_space *swapper_space = swap_address_space(entry);
384         int err;
385         *new_page_allocated = false;
386
387         do {
388                 /*
389                  * First check the swap cache.  Since this is normally
390                  * called after lookup_swap_cache() failed, re-calling
391                  * that would confuse statistics.
392                  */
393                 found_page = find_get_page(swapper_space, swp_offset(entry));
394                 if (found_page)
395                         break;
396
397                 /*
398                  * Just skip read ahead for unused swap slot.
399                  * During swap_off when swap_slot_cache is disabled,
400                  * we have to handle the race between putting
401                  * swap entry in swap cache and marking swap slot
402                  * as SWAP_HAS_CACHE.  That's done in later part of code or
403                  * else swap_off will be aborted if we return NULL.
404                  */
405                 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
406                         break;
407
408                 /*
409                  * Get a new page to read into from swap.
410                  */
411                 if (!new_page) {
412                         new_page = alloc_page_vma(gfp_mask, vma, addr);
413                         if (!new_page)
414                                 break;          /* Out of memory */
415                 }
416
417                 /*
418                  * call radix_tree_preload() while we can wait.
419                  */
420                 err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
421                 if (err)
422                         break;
423
424                 /*
425                  * Swap entry may have been freed since our caller observed it.
426                  */
427                 err = swapcache_prepare(entry);
428                 if (err == -EEXIST) {
429                         radix_tree_preload_end();
430                         /*
431                          * We might race against get_swap_page() and stumble
432                          * across a SWAP_HAS_CACHE swap_map entry whose page
433                          * has not been brought into the swapcache yet.
434                          */
435                         cond_resched();
436                         continue;
437                 }
438                 if (err) {              /* swp entry is obsolete ? */
439                         radix_tree_preload_end();
440                         break;
441                 }
442
443                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
444                 __SetPageLocked(new_page);
445                 __SetPageSwapBacked(new_page);
446                 err = __add_to_swap_cache(new_page, entry);
447                 if (likely(!err)) {
448                         radix_tree_preload_end();
449                         /*
450                          * Initiate read into locked page and return.
451                          */
452                         lru_cache_add_anon(new_page);
453                         *new_page_allocated = true;
454                         return new_page;
455                 }
456                 radix_tree_preload_end();
457                 __ClearPageLocked(new_page);
458                 /*
459                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
460                  * clear SWAP_HAS_CACHE flag.
461                  */
462                 put_swap_page(new_page, entry);
463         } while (err != -ENOMEM);
464
465         if (new_page)
466                 put_page(new_page);
467         return found_page;
468 }
469
470 /*
471  * Locate a page of swap in physical memory, reserving swap cache space
472  * and reading the disk if it is not already cached.
473  * A failure return means that either the page allocation failed or that
474  * the swap entry is no longer in use.
475  */
476 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
477                 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
478 {
479         bool page_was_allocated;
480         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
481                         vma, addr, &page_was_allocated);
482
483         if (page_was_allocated)
484                 swap_readpage(retpage, do_poll);
485
486         return retpage;
487 }
488
489 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
490                                       unsigned long offset,
491                                       int hits,
492                                       int max_pages,
493                                       int prev_win)
494 {
495         unsigned int pages, last_ra;
496
497         /*
498          * This heuristic has been found to work well on both sequential and
499          * random loads, swapping to hard disk or to SSD: please don't ask
500          * what the "+ 2" means, it just happens to work well, that's all.
501          */
502         pages = hits + 2;
503         if (pages == 2) {
504                 /*
505                  * We can have no readahead hits to judge by: but must not get
506                  * stuck here forever, so check for an adjacent offset instead
507                  * (and don't even bother to check whether swap type is same).
508                  */
509                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
510                         pages = 1;
511         } else {
512                 unsigned int roundup = 4;
513                 while (roundup < pages)
514                         roundup <<= 1;
515                 pages = roundup;
516         }
517
518         if (pages > max_pages)
519                 pages = max_pages;
520
521         /* Don't shrink readahead too fast */
522         last_ra = prev_win / 2;
523         if (pages < last_ra)
524                 pages = last_ra;
525
526         return pages;
527 }
528
529 static unsigned long swapin_nr_pages(unsigned long offset)
530 {
531         static unsigned long prev_offset;
532         unsigned int hits, pages, max_pages;
533         static atomic_t last_readahead_pages;
534
535         max_pages = 1 << READ_ONCE(page_cluster);
536         if (max_pages <= 1)
537                 return 1;
538
539         hits = atomic_xchg(&swapin_readahead_hits, 0);
540         pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
541                                   max_pages,
542                                   atomic_read(&last_readahead_pages));
543         if (!hits)
544                 WRITE_ONCE(prev_offset, offset);
545         atomic_set(&last_readahead_pages, pages);
546
547         return pages;
548 }
549
550 /**
551  * swap_cluster_readahead - swap in pages in hope we need them soon
552  * @entry: swap entry of this memory
553  * @gfp_mask: memory allocation flags
554  * @vmf: fault information
555  *
556  * Returns the struct page for entry and addr, after queueing swapin.
557  *
558  * Primitive swap readahead code. We simply read an aligned block of
559  * (1 << page_cluster) entries in the swap area. This method is chosen
560  * because it doesn't cost us any seek time.  We also make sure to queue
561  * the 'original' request together with the readahead ones...
562  *
563  * This has been extended to use the NUMA policies from the mm triggering
564  * the readahead.
565  *
566  * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
567  */
568 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
569                                 struct vm_fault *vmf)
570 {
571         struct page *page;
572         unsigned long entry_offset = swp_offset(entry);
573         unsigned long offset = entry_offset;
574         unsigned long start_offset, end_offset;
575         unsigned long mask;
576         struct swap_info_struct *si = swp_swap_info(entry);
577         struct blk_plug plug;
578         bool do_poll = true, page_allocated;
579         struct vm_area_struct *vma = vmf->vma;
580         unsigned long addr = vmf->address;
581
582         mask = swapin_nr_pages(offset) - 1;
583         if (!mask)
584                 goto skip;
585
586         do_poll = false;
587         /* Read a page_cluster sized and aligned cluster around offset. */
588         start_offset = offset & ~mask;
589         end_offset = offset | mask;
590         if (!start_offset)      /* First page is swap header. */
591                 start_offset++;
592         if (end_offset >= si->max)
593                 end_offset = si->max - 1;
594
595         blk_start_plug(&plug);
596         for (offset = start_offset; offset <= end_offset ; offset++) {
597                 /* Ok, do the async read-ahead now */
598                 page = __read_swap_cache_async(
599                         swp_entry(swp_type(entry), offset),
600                         gfp_mask, vma, addr, &page_allocated);
601                 if (!page)
602                         continue;
603                 if (page_allocated) {
604                         swap_readpage(page, false);
605                         if (offset != entry_offset) {
606                                 SetPageReadahead(page);
607                                 count_vm_event(SWAP_RA);
608                         }
609                 }
610                 put_page(page);
611         }
612         blk_finish_plug(&plug);
613
614         lru_add_drain();        /* Push any new pages onto the LRU now */
615 skip:
616         return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
617 }
618
619 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
620 {
621         struct address_space *spaces, *space;
622         unsigned int i, nr;
623
624         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
625         spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
626         if (!spaces)
627                 return -ENOMEM;
628         for (i = 0; i < nr; i++) {
629                 space = spaces + i;
630                 INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
631                 atomic_set(&space->i_mmap_writable, 0);
632                 space->a_ops = &swap_aops;
633                 /* swap cache doesn't use writeback related tags */
634                 mapping_set_no_writeback_tags(space);
635         }
636         nr_swapper_spaces[type] = nr;
637         rcu_assign_pointer(swapper_spaces[type], spaces);
638
639         return 0;
640 }
641
642 void exit_swap_address_space(unsigned int type)
643 {
644         struct address_space *spaces;
645
646         spaces = swapper_spaces[type];
647         nr_swapper_spaces[type] = 0;
648         rcu_assign_pointer(swapper_spaces[type], NULL);
649         synchronize_rcu();
650         kvfree(spaces);
651 }
652
653 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
654                                      unsigned long faddr,
655                                      unsigned long lpfn,
656                                      unsigned long rpfn,
657                                      unsigned long *start,
658                                      unsigned long *end)
659 {
660         *start = max3(lpfn, PFN_DOWN(vma->vm_start),
661                       PFN_DOWN(faddr & PMD_MASK));
662         *end = min3(rpfn, PFN_DOWN(vma->vm_end),
663                     PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
664 }
665
666 static void swap_ra_info(struct vm_fault *vmf,
667                         struct vma_swap_readahead *ra_info)
668 {
669         struct vm_area_struct *vma = vmf->vma;
670         unsigned long ra_val;
671         swp_entry_t entry;
672         unsigned long faddr, pfn, fpfn;
673         unsigned long start, end;
674         pte_t *pte, *orig_pte;
675         unsigned int max_win, hits, prev_win, win, left;
676 #ifndef CONFIG_64BIT
677         pte_t *tpte;
678 #endif
679
680         max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
681                              SWAP_RA_ORDER_CEILING);
682         if (max_win == 1) {
683                 ra_info->win = 1;
684                 return;
685         }
686
687         faddr = vmf->address;
688         orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
689         entry = pte_to_swp_entry(*pte);
690         if ((unlikely(non_swap_entry(entry)))) {
691                 pte_unmap(orig_pte);
692                 return;
693         }
694
695         fpfn = PFN_DOWN(faddr);
696         ra_val = GET_SWAP_RA_VAL(vma);
697         pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
698         prev_win = SWAP_RA_WIN(ra_val);
699         hits = SWAP_RA_HITS(ra_val);
700         ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
701                                                max_win, prev_win);
702         atomic_long_set(&vma->swap_readahead_info,
703                         SWAP_RA_VAL(faddr, win, 0));
704
705         if (win == 1) {
706                 pte_unmap(orig_pte);
707                 return;
708         }
709
710         /* Copy the PTEs because the page table may be unmapped */
711         if (fpfn == pfn + 1)
712                 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
713         else if (pfn == fpfn + 1)
714                 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
715                                   &start, &end);
716         else {
717                 left = (win - 1) / 2;
718                 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
719                                   &start, &end);
720         }
721         ra_info->nr_pte = end - start;
722         ra_info->offset = fpfn - start;
723         pte -= ra_info->offset;
724 #ifdef CONFIG_64BIT
725         ra_info->ptes = pte;
726 #else
727         tpte = ra_info->ptes;
728         for (pfn = start; pfn != end; pfn++)
729                 *tpte++ = *pte++;
730 #endif
731         pte_unmap(orig_pte);
732 }
733
734 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
735                                        struct vm_fault *vmf)
736 {
737         struct blk_plug plug;
738         struct vm_area_struct *vma = vmf->vma;
739         struct page *page;
740         pte_t *pte, pentry;
741         swp_entry_t entry;
742         unsigned int i;
743         bool page_allocated;
744         struct vma_swap_readahead ra_info = {0,};
745
746         swap_ra_info(vmf, &ra_info);
747         if (ra_info.win == 1)
748                 goto skip;
749
750         blk_start_plug(&plug);
751         for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
752              i++, pte++) {
753                 pentry = *pte;
754                 if (pte_none(pentry))
755                         continue;
756                 if (pte_present(pentry))
757                         continue;
758                 entry = pte_to_swp_entry(pentry);
759                 if (unlikely(non_swap_entry(entry)))
760                         continue;
761                 page = __read_swap_cache_async(entry, gfp_mask, vma,
762                                                vmf->address, &page_allocated);
763                 if (!page)
764                         continue;
765                 if (page_allocated) {
766                         swap_readpage(page, false);
767                         if (i != ra_info.offset) {
768                                 SetPageReadahead(page);
769                                 count_vm_event(SWAP_RA);
770                         }
771                 }
772                 put_page(page);
773         }
774         blk_finish_plug(&plug);
775         lru_add_drain();
776 skip:
777         return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
778                                      ra_info.win == 1);
779 }
780
781 /**
782  * swapin_readahead - swap in pages in hope we need them soon
783  * @entry: swap entry of this memory
784  * @gfp_mask: memory allocation flags
785  * @vmf: fault information
786  *
787  * Returns the struct page for entry and addr, after queueing swapin.
788  *
789  * It's a main entry function for swap readahead. By the configuration,
790  * it will read ahead blocks by cluster-based(ie, physical disk based)
791  * or vma-based(ie, virtual address based on faulty address) readahead.
792  */
793 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
794                                 struct vm_fault *vmf)
795 {
796         return swap_use_vma_readahead() ?
797                         swap_vma_readahead(entry, gfp_mask, vmf) :
798                         swap_cluster_readahead(entry, gfp_mask, vmf);
799 }
800
801 #ifdef CONFIG_SYSFS
802 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
803                                      struct kobj_attribute *attr, char *buf)
804 {
805         return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
806 }
807 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
808                                       struct kobj_attribute *attr,
809                                       const char *buf, size_t count)
810 {
811         if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
812                 enable_vma_readahead = true;
813         else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
814                 enable_vma_readahead = false;
815         else
816                 return -EINVAL;
817
818         return count;
819 }
820 static struct kobj_attribute vma_ra_enabled_attr =
821         __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
822                vma_ra_enabled_store);
823
824 static struct attribute *swap_attrs[] = {
825         &vma_ra_enabled_attr.attr,
826         NULL,
827 };
828
829 static struct attribute_group swap_attr_group = {
830         .attrs = swap_attrs,
831 };
832
833 static int __init swap_init_sysfs(void)
834 {
835         int err;
836         struct kobject *swap_kobj;
837
838         swap_kobj = kobject_create_and_add("swap", mm_kobj);
839         if (!swap_kobj) {
840                 pr_err("failed to create swap kobject\n");
841                 return -ENOMEM;
842         }
843         err = sysfs_create_group(swap_kobj, &swap_attr_group);
844         if (err) {
845                 pr_err("failed to register swap group\n");
846                 goto delete_obj;
847         }
848         return 0;
849
850 delete_obj:
851         kobject_put(swap_kobj);
852         return err;
853 }
854 subsys_initcall(swap_init_sysfs);
855 #endif