GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / sh / mm / cache.c
1 /*
2  * arch/sh/mm/cache.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2010  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
29 void (*__flush_wback_region)(void *start, int size);
30 EXPORT_SYMBOL(__flush_wback_region);
31 void (*__flush_purge_region)(void *start, int size);
32 EXPORT_SYMBOL(__flush_purge_region);
33 void (*__flush_invalidate_region)(void *start, int size);
34 EXPORT_SYMBOL(__flush_invalidate_region);
35
36 static inline void noop__flush_region(void *start, int size)
37 {
38 }
39
40 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41                                    int wait)
42 {
43         preempt_disable();
44
45         /* Needing IPI for cross-core flush is SHX3-specific. */
46 #ifdef CONFIG_CPU_SHX3
47         /*
48          * It's possible that this gets called early on when IRQs are
49          * still disabled due to ioremapping by the boot CPU, so don't
50          * even attempt IPIs unless there are other CPUs online.
51          */
52         if (num_online_cpus() > 1)
53                 smp_call_function(func, info, wait);
54 #endif
55
56         func(info);
57
58         preempt_enable();
59 }
60
61 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
62                        unsigned long vaddr, void *dst, const void *src,
63                        unsigned long len)
64 {
65         if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
66             test_bit(PG_dcache_clean, &page->flags)) {
67                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
68                 memcpy(vto, src, len);
69                 kunmap_coherent(vto);
70         } else {
71                 memcpy(dst, src, len);
72                 if (boot_cpu_data.dcache.n_aliases)
73                         clear_bit(PG_dcache_clean, &page->flags);
74         }
75
76         if (vma->vm_flags & VM_EXEC)
77                 flush_cache_page(vma, vaddr, page_to_pfn(page));
78 }
79
80 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
81                          unsigned long vaddr, void *dst, const void *src,
82                          unsigned long len)
83 {
84         if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
85             test_bit(PG_dcache_clean, &page->flags)) {
86                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
87                 memcpy(dst, vfrom, len);
88                 kunmap_coherent(vfrom);
89         } else {
90                 memcpy(dst, src, len);
91                 if (boot_cpu_data.dcache.n_aliases)
92                         clear_bit(PG_dcache_clean, &page->flags);
93         }
94 }
95
96 void copy_user_highpage(struct page *to, struct page *from,
97                         unsigned long vaddr, struct vm_area_struct *vma)
98 {
99         void *vfrom, *vto;
100
101         vto = kmap_atomic(to);
102
103         if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) &&
104             test_bit(PG_dcache_clean, &from->flags)) {
105                 vfrom = kmap_coherent(from, vaddr);
106                 copy_page(vto, vfrom);
107                 kunmap_coherent(vfrom);
108         } else {
109                 vfrom = kmap_atomic(from);
110                 copy_page(vto, vfrom);
111                 kunmap_atomic(vfrom);
112         }
113
114         if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
115             (vma->vm_flags & VM_EXEC))
116                 __flush_purge_region(vto, PAGE_SIZE);
117
118         kunmap_atomic(vto);
119         /* Make sure this page is cleared on other CPU's too before using it */
120         smp_wmb();
121 }
122 EXPORT_SYMBOL(copy_user_highpage);
123
124 void clear_user_highpage(struct page *page, unsigned long vaddr)
125 {
126         void *kaddr = kmap_atomic(page);
127
128         clear_page(kaddr);
129
130         if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
131                 __flush_purge_region(kaddr, PAGE_SIZE);
132
133         kunmap_atomic(kaddr);
134 }
135 EXPORT_SYMBOL(clear_user_highpage);
136
137 void __update_cache(struct vm_area_struct *vma,
138                     unsigned long address, pte_t pte)
139 {
140         struct page *page;
141         unsigned long pfn = pte_pfn(pte);
142
143         if (!boot_cpu_data.dcache.n_aliases)
144                 return;
145
146         page = pfn_to_page(pfn);
147         if (pfn_valid(pfn)) {
148                 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
149                 if (dirty)
150                         __flush_purge_region(page_address(page), PAGE_SIZE);
151         }
152 }
153
154 void __flush_anon_page(struct page *page, unsigned long vmaddr)
155 {
156         unsigned long addr = (unsigned long) page_address(page);
157
158         if (pages_do_alias(addr, vmaddr)) {
159                 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
160                     test_bit(PG_dcache_clean, &page->flags)) {
161                         void *kaddr;
162
163                         kaddr = kmap_coherent(page, vmaddr);
164                         /* XXX.. For now kunmap_coherent() does a purge */
165                         /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
166                         kunmap_coherent(kaddr);
167                 } else
168                         __flush_purge_region((void *)addr, PAGE_SIZE);
169         }
170 }
171
172 void flush_cache_all(void)
173 {
174         cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
175 }
176 EXPORT_SYMBOL(flush_cache_all);
177
178 void flush_cache_mm(struct mm_struct *mm)
179 {
180         if (boot_cpu_data.dcache.n_aliases == 0)
181                 return;
182
183         cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
184 }
185
186 void flush_cache_dup_mm(struct mm_struct *mm)
187 {
188         if (boot_cpu_data.dcache.n_aliases == 0)
189                 return;
190
191         cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
192 }
193
194 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
195                       unsigned long pfn)
196 {
197         struct flusher_data data;
198
199         data.vma = vma;
200         data.addr1 = addr;
201         data.addr2 = pfn;
202
203         cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
204 }
205
206 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
207                        unsigned long end)
208 {
209         struct flusher_data data;
210
211         data.vma = vma;
212         data.addr1 = start;
213         data.addr2 = end;
214
215         cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
216 }
217 EXPORT_SYMBOL(flush_cache_range);
218
219 void flush_dcache_page(struct page *page)
220 {
221         cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
222 }
223 EXPORT_SYMBOL(flush_dcache_page);
224
225 void flush_icache_range(unsigned long start, unsigned long end)
226 {
227         struct flusher_data data;
228
229         data.vma = NULL;
230         data.addr1 = start;
231         data.addr2 = end;
232
233         cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
234 }
235 EXPORT_SYMBOL(flush_icache_range);
236
237 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
238 {
239         /* Nothing uses the VMA, so just pass the struct page along */
240         cacheop_on_each_cpu(local_flush_icache_page, page, 1);
241 }
242
243 void flush_cache_sigtramp(unsigned long address)
244 {
245         cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
246 }
247
248 static void compute_alias(struct cache_info *c)
249 {
250 #ifdef CONFIG_MMU
251         c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
252 #else
253         c->alias_mask = 0;
254 #endif
255         c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
256 }
257
258 static void __init emit_cache_params(void)
259 {
260         printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261                 boot_cpu_data.icache.ways,
262                 boot_cpu_data.icache.sets,
263                 boot_cpu_data.icache.way_incr);
264         printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265                 boot_cpu_data.icache.entry_mask,
266                 boot_cpu_data.icache.alias_mask,
267                 boot_cpu_data.icache.n_aliases);
268         printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
269                 boot_cpu_data.dcache.ways,
270                 boot_cpu_data.dcache.sets,
271                 boot_cpu_data.dcache.way_incr);
272         printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
273                 boot_cpu_data.dcache.entry_mask,
274                 boot_cpu_data.dcache.alias_mask,
275                 boot_cpu_data.dcache.n_aliases);
276
277         /*
278          * Emit Secondary Cache parameters if the CPU has a probed L2.
279          */
280         if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
281                 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
282                         boot_cpu_data.scache.ways,
283                         boot_cpu_data.scache.sets,
284                         boot_cpu_data.scache.way_incr);
285                 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
286                         boot_cpu_data.scache.entry_mask,
287                         boot_cpu_data.scache.alias_mask,
288                         boot_cpu_data.scache.n_aliases);
289         }
290 }
291
292 void __init cpu_cache_init(void)
293 {
294         unsigned int cache_disabled = 0;
295
296 #ifdef SH_CCR
297         cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
298 #endif
299
300         compute_alias(&boot_cpu_data.icache);
301         compute_alias(&boot_cpu_data.dcache);
302         compute_alias(&boot_cpu_data.scache);
303
304         __flush_wback_region            = noop__flush_region;
305         __flush_purge_region            = noop__flush_region;
306         __flush_invalidate_region       = noop__flush_region;
307
308         /*
309          * No flushing is necessary in the disabled cache case so we can
310          * just keep the noop functions in local_flush_..() and __flush_..()
311          */
312         if (unlikely(cache_disabled))
313                 goto skip;
314
315         if (boot_cpu_data.type == CPU_J2) {
316                 extern void __weak j2_cache_init(void);
317
318                 j2_cache_init();
319         } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
320                 extern void __weak sh2_cache_init(void);
321
322                 sh2_cache_init();
323         }
324
325         if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
326                 extern void __weak sh2a_cache_init(void);
327
328                 sh2a_cache_init();
329         }
330
331         if (boot_cpu_data.family == CPU_FAMILY_SH3) {
332                 extern void __weak sh3_cache_init(void);
333
334                 sh3_cache_init();
335
336                 if ((boot_cpu_data.type == CPU_SH7705) &&
337                     (boot_cpu_data.dcache.sets == 512)) {
338                         extern void __weak sh7705_cache_init(void);
339
340                         sh7705_cache_init();
341                 }
342         }
343
344         if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
345             (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
346             (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
347                 extern void __weak sh4_cache_init(void);
348
349                 sh4_cache_init();
350
351                 if ((boot_cpu_data.type == CPU_SH7786) ||
352                     (boot_cpu_data.type == CPU_SHX3)) {
353                         extern void __weak shx3_cache_init(void);
354
355                         shx3_cache_init();
356                 }
357         }
358
359         if (boot_cpu_data.family == CPU_FAMILY_SH5) {
360                 extern void __weak sh5_cache_init(void);
361
362                 sh5_cache_init();
363         }
364
365 skip:
366         emit_cache_params();
367 }