GNU Linux-libre 4.14.290-gnu1
[releases.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5 #define MMU_NO_CONTEXT  ~0UL
6
7
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10
11 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
12 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
13                                        unsigned long start, unsigned long end)
14 {
15         if (radix_enabled())
16                 return radix__flush_pmd_tlb_range(vma, start, end);
17         return hash__flush_tlb_range(vma, start, end);
18 }
19
20 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
21 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
22                                            unsigned long start,
23                                            unsigned long end)
24 {
25         if (radix_enabled())
26                 return radix__flush_hugetlb_tlb_range(vma, start, end);
27         return hash__flush_tlb_range(vma, start, end);
28 }
29
30 static inline void flush_tlb_range(struct vm_area_struct *vma,
31                                    unsigned long start, unsigned long end)
32 {
33         if (radix_enabled())
34                 return radix__flush_tlb_range(vma, start, end);
35         return hash__flush_tlb_range(vma, start, end);
36 }
37
38 static inline void flush_tlb_kernel_range(unsigned long start,
39                                           unsigned long end)
40 {
41         if (radix_enabled())
42                 return radix__flush_tlb_kernel_range(start, end);
43         return hash__flush_tlb_kernel_range(start, end);
44 }
45
46 static inline void local_flush_tlb_mm(struct mm_struct *mm)
47 {
48         if (radix_enabled())
49                 return radix__local_flush_tlb_mm(mm);
50         return hash__local_flush_tlb_mm(mm);
51 }
52
53 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
54                                         unsigned long vmaddr)
55 {
56         if (radix_enabled())
57                 return radix__local_flush_tlb_page(vma, vmaddr);
58         return hash__local_flush_tlb_page(vma, vmaddr);
59 }
60
61 static inline void tlb_flush(struct mmu_gather *tlb)
62 {
63         if (radix_enabled())
64                 return radix__tlb_flush(tlb);
65         return hash__tlb_flush(tlb);
66 }
67
68 #ifdef CONFIG_SMP
69 static inline void flush_tlb_mm(struct mm_struct *mm)
70 {
71         if (radix_enabled())
72                 return radix__flush_tlb_mm(mm);
73         return hash__flush_tlb_mm(mm);
74 }
75
76 static inline void flush_tlb_page(struct vm_area_struct *vma,
77                                   unsigned long vmaddr)
78 {
79         if (radix_enabled())
80                 return radix__flush_tlb_page(vma, vmaddr);
81         return hash__flush_tlb_page(vma, vmaddr);
82 }
83 #else
84 #define flush_tlb_mm(mm)                local_flush_tlb_mm(mm)
85 #define flush_tlb_page(vma, addr)       local_flush_tlb_page(vma, addr)
86 #endif /* CONFIG_SMP */
87 /*
88  * flush the page walk cache for the address
89  */
90 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
91 {
92         /*
93          * Flush the page table walk cache on freeing a page table. We already
94          * have marked the upper/higher level page table entry none by now.
95          * So it is safe to flush PWC here.
96          */
97         if (!radix_enabled())
98                 return;
99
100         radix__flush_tlb_pwc(tlb, address);
101 }
102 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */