GNU Linux-libre 4.9-gnu1
[releases.git] / arch / sparc / mm / srmmu.c
1 /*
2  * srmmu.c:  SRMMU specific routines for memory management.
3  *
4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9  */
10
11 #include <linux/seq_file.h>
12 #include <linux/spinlock.h>
13 #include <linux/bootmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kdebug.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/gfp.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/io-unit.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/bitext.h>
32 #include <asm/vaddrs.h>
33 #include <asm/cache.h>
34 #include <asm/traps.h>
35 #include <asm/oplib.h>
36 #include <asm/mbus.h>
37 #include <asm/page.h>
38 #include <asm/asi.h>
39 #include <asm/msi.h>
40 #include <asm/smp.h>
41 #include <asm/io.h>
42
43 /* Now the cpu specific definitions. */
44 #include <asm/turbosparc.h>
45 #include <asm/tsunami.h>
46 #include <asm/viking.h>
47 #include <asm/swift.h>
48 #include <asm/leon.h>
49 #include <asm/mxcc.h>
50 #include <asm/ross.h>
51
52 #include "mm_32.h"
53
54 enum mbus_module srmmu_modtype;
55 static unsigned int hwbug_bitmask;
56 int vac_cache_size;
57 int vac_line_size;
58
59 extern struct resource sparc_iomap;
60
61 extern unsigned long last_valid_pfn;
62
63 static pgd_t *srmmu_swapper_pg_dir;
64
65 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
66 EXPORT_SYMBOL(sparc32_cachetlb_ops);
67
68 #ifdef CONFIG_SMP
69 const struct sparc32_cachetlb_ops *local_ops;
70
71 #define FLUSH_BEGIN(mm)
72 #define FLUSH_END
73 #else
74 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
75 #define FLUSH_END       }
76 #endif
77
78 int flush_page_for_dma_global = 1;
79
80 char *srmmu_name;
81
82 ctxd_t *srmmu_ctx_table_phys;
83 static ctxd_t *srmmu_context_table;
84
85 int viking_mxcc_present;
86 static DEFINE_SPINLOCK(srmmu_context_spinlock);
87
88 static int is_hypersparc;
89
90 static int srmmu_cache_pagetables;
91
92 /* these will be initialized in srmmu_nocache_calcsize() */
93 static unsigned long srmmu_nocache_size;
94 static unsigned long srmmu_nocache_end;
95
96 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
97 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
98
99 /* The context table is a nocache user with the biggest alignment needs. */
100 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
101
102 void *srmmu_nocache_pool;
103 static struct bit_map srmmu_nocache_map;
104
105 static inline int srmmu_pmd_none(pmd_t pmd)
106 { return !(pmd_val(pmd) & 0xFFFFFFF); }
107
108 /* XXX should we hyper_flush_whole_icache here - Anton */
109 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
110 {
111         pte_t pte;
112
113         pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
114         set_pte((pte_t *)ctxp, pte);
115 }
116
117 void pmd_set(pmd_t *pmdp, pte_t *ptep)
118 {
119         unsigned long ptp;      /* Physical address, shifted right by 4 */
120         int i;
121
122         ptp = __nocache_pa(ptep) >> 4;
123         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
124                 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
125                 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
126         }
127 }
128
129 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
130 {
131         unsigned long ptp;      /* Physical address, shifted right by 4 */
132         int i;
133
134         ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);      /* watch for overflow */
135         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
136                 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
137                 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
138         }
139 }
140
141 /* Find an entry in the third-level page table.. */
142 pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
143 {
144         void *pte;
145
146         pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
147         return (pte_t *) pte +
148             ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
149 }
150
151 /*
152  * size: bytes to allocate in the nocache area.
153  * align: bytes, number to align at.
154  * Returns the virtual address of the allocated area.
155  */
156 static void *__srmmu_get_nocache(int size, int align)
157 {
158         int offset;
159         unsigned long addr;
160
161         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
162                 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
163                        size);
164                 size = SRMMU_NOCACHE_BITMAP_SHIFT;
165         }
166         if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
167                 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
168                        size);
169                 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
170         }
171         BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
172
173         offset = bit_map_string_get(&srmmu_nocache_map,
174                                     size >> SRMMU_NOCACHE_BITMAP_SHIFT,
175                                     align >> SRMMU_NOCACHE_BITMAP_SHIFT);
176         if (offset == -1) {
177                 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
178                        size, (int) srmmu_nocache_size,
179                        srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
180                 return NULL;
181         }
182
183         addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
184         return (void *)addr;
185 }
186
187 void *srmmu_get_nocache(int size, int align)
188 {
189         void *tmp;
190
191         tmp = __srmmu_get_nocache(size, align);
192
193         if (tmp)
194                 memset(tmp, 0, size);
195
196         return tmp;
197 }
198
199 void srmmu_free_nocache(void *addr, int size)
200 {
201         unsigned long vaddr;
202         int offset;
203
204         vaddr = (unsigned long)addr;
205         if (vaddr < SRMMU_NOCACHE_VADDR) {
206                 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
207                     vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
208                 BUG();
209         }
210         if (vaddr + size > srmmu_nocache_end) {
211                 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
212                     vaddr, srmmu_nocache_end);
213                 BUG();
214         }
215         if (!is_power_of_2(size)) {
216                 printk("Size 0x%x is not a power of 2\n", size);
217                 BUG();
218         }
219         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
220                 printk("Size 0x%x is too small\n", size);
221                 BUG();
222         }
223         if (vaddr & (size - 1)) {
224                 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
225                 BUG();
226         }
227
228         offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
229         size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
230
231         bit_map_clear(&srmmu_nocache_map, offset, size);
232 }
233
234 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
235                                                  unsigned long end);
236
237 /* Return how much physical memory we have.  */
238 static unsigned long __init probe_memory(void)
239 {
240         unsigned long total = 0;
241         int i;
242
243         for (i = 0; sp_banks[i].num_bytes; i++)
244                 total += sp_banks[i].num_bytes;
245
246         return total;
247 }
248
249 /*
250  * Reserve nocache dynamically proportionally to the amount of
251  * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
252  */
253 static void __init srmmu_nocache_calcsize(void)
254 {
255         unsigned long sysmemavail = probe_memory() / 1024;
256         int srmmu_nocache_npages;
257
258         srmmu_nocache_npages =
259                 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
260
261  /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
262         // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
263         if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
264                 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
265
266         /* anything above 1280 blows up */
267         if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
268                 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
269
270         srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
271         srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
272 }
273
274 static void __init srmmu_nocache_init(void)
275 {
276         void *srmmu_nocache_bitmap;
277         unsigned int bitmap_bits;
278         pgd_t *pgd;
279         pmd_t *pmd;
280         pte_t *pte;
281         unsigned long paddr, vaddr;
282         unsigned long pteval;
283
284         bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
285
286         srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
287                 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
288         memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
289
290         srmmu_nocache_bitmap =
291                 __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
292                                 SMP_CACHE_BYTES, 0UL);
293         bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
294
295         srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
296         memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
297         init_mm.pgd = srmmu_swapper_pg_dir;
298
299         srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
300
301         paddr = __pa((unsigned long)srmmu_nocache_pool);
302         vaddr = SRMMU_NOCACHE_VADDR;
303
304         while (vaddr < srmmu_nocache_end) {
305                 pgd = pgd_offset_k(vaddr);
306                 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
307                 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
308
309                 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
310
311                 if (srmmu_cache_pagetables)
312                         pteval |= SRMMU_CACHE;
313
314                 set_pte(__nocache_fix(pte), __pte(pteval));
315
316                 vaddr += PAGE_SIZE;
317                 paddr += PAGE_SIZE;
318         }
319
320         flush_cache_all();
321         flush_tlb_all();
322 }
323
324 pgd_t *get_pgd_fast(void)
325 {
326         pgd_t *pgd = NULL;
327
328         pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
329         if (pgd) {
330                 pgd_t *init = pgd_offset_k(0);
331                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
332                 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
333                                                 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
334         }
335
336         return pgd;
337 }
338
339 /*
340  * Hardware needs alignment to 256 only, but we align to whole page size
341  * to reduce fragmentation problems due to the buddy principle.
342  * XXX Provide actual fragmentation statistics in /proc.
343  *
344  * Alignments up to the page size are the same for physical and virtual
345  * addresses of the nocache area.
346  */
347 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
348 {
349         unsigned long pte;
350         struct page *page;
351
352         if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
353                 return NULL;
354         page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
355         if (!pgtable_page_ctor(page)) {
356                 __free_page(page);
357                 return NULL;
358         }
359         return page;
360 }
361
362 void pte_free(struct mm_struct *mm, pgtable_t pte)
363 {
364         unsigned long p;
365
366         pgtable_page_dtor(pte);
367         p = (unsigned long)page_address(pte);   /* Cached address (for test) */
368         if (p == 0)
369                 BUG();
370         p = page_to_pfn(pte) << PAGE_SHIFT;     /* Physical address */
371
372         /* free non cached virtual address*/
373         srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
374 }
375
376 /* context handling - a dynamically sized pool is used */
377 #define NO_CONTEXT      -1
378
379 struct ctx_list {
380         struct ctx_list *next;
381         struct ctx_list *prev;
382         unsigned int ctx_number;
383         struct mm_struct *ctx_mm;
384 };
385
386 static struct ctx_list *ctx_list_pool;
387 static struct ctx_list ctx_free;
388 static struct ctx_list ctx_used;
389
390 /* At boot time we determine the number of contexts */
391 static int num_contexts;
392
393 static inline void remove_from_ctx_list(struct ctx_list *entry)
394 {
395         entry->next->prev = entry->prev;
396         entry->prev->next = entry->next;
397 }
398
399 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
400 {
401         entry->next = head;
402         (entry->prev = head->prev)->next = entry;
403         head->prev = entry;
404 }
405 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
406 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
407
408
409 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
410 {
411         struct ctx_list *ctxp;
412
413         ctxp = ctx_free.next;
414         if (ctxp != &ctx_free) {
415                 remove_from_ctx_list(ctxp);
416                 add_to_used_ctxlist(ctxp);
417                 mm->context = ctxp->ctx_number;
418                 ctxp->ctx_mm = mm;
419                 return;
420         }
421         ctxp = ctx_used.next;
422         if (ctxp->ctx_mm == old_mm)
423                 ctxp = ctxp->next;
424         if (ctxp == &ctx_used)
425                 panic("out of mmu contexts");
426         flush_cache_mm(ctxp->ctx_mm);
427         flush_tlb_mm(ctxp->ctx_mm);
428         remove_from_ctx_list(ctxp);
429         add_to_used_ctxlist(ctxp);
430         ctxp->ctx_mm->context = NO_CONTEXT;
431         ctxp->ctx_mm = mm;
432         mm->context = ctxp->ctx_number;
433 }
434
435 static inline void free_context(int context)
436 {
437         struct ctx_list *ctx_old;
438
439         ctx_old = ctx_list_pool + context;
440         remove_from_ctx_list(ctx_old);
441         add_to_free_ctxlist(ctx_old);
442 }
443
444 static void __init sparc_context_init(int numctx)
445 {
446         int ctx;
447         unsigned long size;
448
449         size = numctx * sizeof(struct ctx_list);
450         ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
451
452         for (ctx = 0; ctx < numctx; ctx++) {
453                 struct ctx_list *clist;
454
455                 clist = (ctx_list_pool + ctx);
456                 clist->ctx_number = ctx;
457                 clist->ctx_mm = NULL;
458         }
459         ctx_free.next = ctx_free.prev = &ctx_free;
460         ctx_used.next = ctx_used.prev = &ctx_used;
461         for (ctx = 0; ctx < numctx; ctx++)
462                 add_to_free_ctxlist(ctx_list_pool + ctx);
463 }
464
465 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
466                struct task_struct *tsk)
467 {
468         unsigned long flags;
469
470         if (mm->context == NO_CONTEXT) {
471                 spin_lock_irqsave(&srmmu_context_spinlock, flags);
472                 alloc_context(old_mm, mm);
473                 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
474                 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
475         }
476
477         if (sparc_cpu_model == sparc_leon)
478                 leon_switch_mm();
479
480         if (is_hypersparc)
481                 hyper_flush_whole_icache();
482
483         srmmu_set_context(mm->context);
484 }
485
486 /* Low level IO area allocation on the SRMMU. */
487 static inline void srmmu_mapioaddr(unsigned long physaddr,
488                                    unsigned long virt_addr, int bus_type)
489 {
490         pgd_t *pgdp;
491         pmd_t *pmdp;
492         pte_t *ptep;
493         unsigned long tmp;
494
495         physaddr &= PAGE_MASK;
496         pgdp = pgd_offset_k(virt_addr);
497         pmdp = pmd_offset(pgdp, virt_addr);
498         ptep = pte_offset_kernel(pmdp, virt_addr);
499         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
500
501         /* I need to test whether this is consistent over all
502          * sun4m's.  The bus_type represents the upper 4 bits of
503          * 36-bit physical address on the I/O space lines...
504          */
505         tmp |= (bus_type << 28);
506         tmp |= SRMMU_PRIV;
507         __flush_page_to_ram(virt_addr);
508         set_pte(ptep, __pte(tmp));
509 }
510
511 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
512                       unsigned long xva, unsigned int len)
513 {
514         while (len != 0) {
515                 len -= PAGE_SIZE;
516                 srmmu_mapioaddr(xpa, xva, bus);
517                 xva += PAGE_SIZE;
518                 xpa += PAGE_SIZE;
519         }
520         flush_tlb_all();
521 }
522
523 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
524 {
525         pgd_t *pgdp;
526         pmd_t *pmdp;
527         pte_t *ptep;
528
529         pgdp = pgd_offset_k(virt_addr);
530         pmdp = pmd_offset(pgdp, virt_addr);
531         ptep = pte_offset_kernel(pmdp, virt_addr);
532
533         /* No need to flush uncacheable page. */
534         __pte_clear(ptep);
535 }
536
537 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
538 {
539         while (len != 0) {
540                 len -= PAGE_SIZE;
541                 srmmu_unmapioaddr(virt_addr);
542                 virt_addr += PAGE_SIZE;
543         }
544         flush_tlb_all();
545 }
546
547 /* tsunami.S */
548 extern void tsunami_flush_cache_all(void);
549 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
550 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
551 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
552 extern void tsunami_flush_page_to_ram(unsigned long page);
553 extern void tsunami_flush_page_for_dma(unsigned long page);
554 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
555 extern void tsunami_flush_tlb_all(void);
556 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
557 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
558 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
559 extern void tsunami_setup_blockops(void);
560
561 /* swift.S */
562 extern void swift_flush_cache_all(void);
563 extern void swift_flush_cache_mm(struct mm_struct *mm);
564 extern void swift_flush_cache_range(struct vm_area_struct *vma,
565                                     unsigned long start, unsigned long end);
566 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
567 extern void swift_flush_page_to_ram(unsigned long page);
568 extern void swift_flush_page_for_dma(unsigned long page);
569 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
570 extern void swift_flush_tlb_all(void);
571 extern void swift_flush_tlb_mm(struct mm_struct *mm);
572 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
573                                   unsigned long start, unsigned long end);
574 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
575
576 #if 0  /* P3: deadwood to debug precise flushes on Swift. */
577 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
578 {
579         int cctx, ctx1;
580
581         page &= PAGE_MASK;
582         if ((ctx1 = vma->vm_mm->context) != -1) {
583                 cctx = srmmu_get_context();
584 /* Is context # ever different from current context? P3 */
585                 if (cctx != ctx1) {
586                         printk("flush ctx %02x curr %02x\n", ctx1, cctx);
587                         srmmu_set_context(ctx1);
588                         swift_flush_page(page);
589                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
590                                         "r" (page), "i" (ASI_M_FLUSH_PROBE));
591                         srmmu_set_context(cctx);
592                 } else {
593                          /* Rm. prot. bits from virt. c. */
594                         /* swift_flush_cache_all(); */
595                         /* swift_flush_cache_page(vma, page); */
596                         swift_flush_page(page);
597
598                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
599                                 "r" (page), "i" (ASI_M_FLUSH_PROBE));
600                         /* same as above: srmmu_flush_tlb_page() */
601                 }
602         }
603 }
604 #endif
605
606 /*
607  * The following are all MBUS based SRMMU modules, and therefore could
608  * be found in a multiprocessor configuration.  On the whole, these
609  * chips seems to be much more touchy about DVMA and page tables
610  * with respect to cache coherency.
611  */
612
613 /* viking.S */
614 extern void viking_flush_cache_all(void);
615 extern void viking_flush_cache_mm(struct mm_struct *mm);
616 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
617                                      unsigned long end);
618 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
619 extern void viking_flush_page_to_ram(unsigned long page);
620 extern void viking_flush_page_for_dma(unsigned long page);
621 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
622 extern void viking_flush_page(unsigned long page);
623 extern void viking_mxcc_flush_page(unsigned long page);
624 extern void viking_flush_tlb_all(void);
625 extern void viking_flush_tlb_mm(struct mm_struct *mm);
626 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
627                                    unsigned long end);
628 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
629                                   unsigned long page);
630 extern void sun4dsmp_flush_tlb_all(void);
631 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
632 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
633                                    unsigned long end);
634 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
635                                   unsigned long page);
636
637 /* hypersparc.S */
638 extern void hypersparc_flush_cache_all(void);
639 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
640 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
641 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
642 extern void hypersparc_flush_page_to_ram(unsigned long page);
643 extern void hypersparc_flush_page_for_dma(unsigned long page);
644 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
645 extern void hypersparc_flush_tlb_all(void);
646 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
647 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
648 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
649 extern void hypersparc_setup_blockops(void);
650
651 /*
652  * NOTE: All of this startup code assumes the low 16mb (approx.) of
653  *       kernel mappings are done with one single contiguous chunk of
654  *       ram.  On small ram machines (classics mainly) we only get
655  *       around 8mb mapped for us.
656  */
657
658 static void __init early_pgtable_allocfail(char *type)
659 {
660         prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
661         prom_halt();
662 }
663
664 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
665                                                         unsigned long end)
666 {
667         pgd_t *pgdp;
668         pmd_t *pmdp;
669         pte_t *ptep;
670
671         while (start < end) {
672                 pgdp = pgd_offset_k(start);
673                 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
674                         pmdp = __srmmu_get_nocache(
675                             SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
676                         if (pmdp == NULL)
677                                 early_pgtable_allocfail("pmd");
678                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
679                         pgd_set(__nocache_fix(pgdp), pmdp);
680                 }
681                 pmdp = pmd_offset(__nocache_fix(pgdp), start);
682                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
683                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
684                         if (ptep == NULL)
685                                 early_pgtable_allocfail("pte");
686                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
687                         pmd_set(__nocache_fix(pmdp), ptep);
688                 }
689                 if (start > (0xffffffffUL - PMD_SIZE))
690                         break;
691                 start = (start + PMD_SIZE) & PMD_MASK;
692         }
693 }
694
695 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
696                                                   unsigned long end)
697 {
698         pgd_t *pgdp;
699         pmd_t *pmdp;
700         pte_t *ptep;
701
702         while (start < end) {
703                 pgdp = pgd_offset_k(start);
704                 if (pgd_none(*pgdp)) {
705                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
706                         if (pmdp == NULL)
707                                 early_pgtable_allocfail("pmd");
708                         memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
709                         pgd_set(pgdp, pmdp);
710                 }
711                 pmdp = pmd_offset(pgdp, start);
712                 if (srmmu_pmd_none(*pmdp)) {
713                         ptep = __srmmu_get_nocache(PTE_SIZE,
714                                                              PTE_SIZE);
715                         if (ptep == NULL)
716                                 early_pgtable_allocfail("pte");
717                         memset(ptep, 0, PTE_SIZE);
718                         pmd_set(pmdp, ptep);
719                 }
720                 if (start > (0xffffffffUL - PMD_SIZE))
721                         break;
722                 start = (start + PMD_SIZE) & PMD_MASK;
723         }
724 }
725
726 /* These flush types are not available on all chips... */
727 static inline unsigned long srmmu_probe(unsigned long vaddr)
728 {
729         unsigned long retval;
730
731         if (sparc_cpu_model != sparc_leon) {
732
733                 vaddr &= PAGE_MASK;
734                 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
735                                      "=r" (retval) :
736                                      "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
737         } else {
738                 retval = leon_swprobe(vaddr, NULL);
739         }
740         return retval;
741 }
742
743 /*
744  * This is much cleaner than poking around physical address space
745  * looking at the prom's page table directly which is what most
746  * other OS's do.  Yuck... this is much better.
747  */
748 static void __init srmmu_inherit_prom_mappings(unsigned long start,
749                                                unsigned long end)
750 {
751         unsigned long probed;
752         unsigned long addr;
753         pgd_t *pgdp;
754         pmd_t *pmdp;
755         pte_t *ptep;
756         int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
757
758         while (start <= end) {
759                 if (start == 0)
760                         break; /* probably wrap around */
761                 if (start == 0xfef00000)
762                         start = KADB_DEBUGGER_BEGVM;
763                 probed = srmmu_probe(start);
764                 if (!probed) {
765                         /* continue probing until we find an entry */
766                         start += PAGE_SIZE;
767                         continue;
768                 }
769
770                 /* A red snapper, see what it really is. */
771                 what = 0;
772                 addr = start - PAGE_SIZE;
773
774                 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
775                         if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
776                                 what = 1;
777                 }
778
779                 if (!(start & ~(SRMMU_PGDIR_MASK))) {
780                         if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
781                                 what = 2;
782                 }
783
784                 pgdp = pgd_offset_k(start);
785                 if (what == 2) {
786                         *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
787                         start += SRMMU_PGDIR_SIZE;
788                         continue;
789                 }
790                 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
791                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
792                                                    SRMMU_PMD_TABLE_SIZE);
793                         if (pmdp == NULL)
794                                 early_pgtable_allocfail("pmd");
795                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
796                         pgd_set(__nocache_fix(pgdp), pmdp);
797                 }
798                 pmdp = pmd_offset(__nocache_fix(pgdp), start);
799                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
800                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
801                         if (ptep == NULL)
802                                 early_pgtable_allocfail("pte");
803                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
804                         pmd_set(__nocache_fix(pmdp), ptep);
805                 }
806                 if (what == 1) {
807                         /* We bend the rule where all 16 PTPs in a pmd_t point
808                          * inside the same PTE page, and we leak a perfectly
809                          * good hardware PTE piece. Alternatives seem worse.
810                          */
811                         unsigned int x; /* Index of HW PMD in soft cluster */
812                         unsigned long *val;
813                         x = (start >> PMD_SHIFT) & 15;
814                         val = &pmdp->pmdv[x];
815                         *(unsigned long *)__nocache_fix(val) = probed;
816                         start += SRMMU_REAL_PMD_SIZE;
817                         continue;
818                 }
819                 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
820                 *(pte_t *)__nocache_fix(ptep) = __pte(probed);
821                 start += PAGE_SIZE;
822         }
823 }
824
825 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
826
827 /* Create a third-level SRMMU 16MB page mapping. */
828 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
829 {
830         pgd_t *pgdp = pgd_offset_k(vaddr);
831         unsigned long big_pte;
832
833         big_pte = KERNEL_PTE(phys_base >> 4);
834         *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
835 }
836
837 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
838 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
839 {
840         unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
841         unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
842         unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
843         /* Map "low" memory only */
844         const unsigned long min_vaddr = PAGE_OFFSET;
845         const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
846
847         if (vstart < min_vaddr || vstart >= max_vaddr)
848                 return vstart;
849
850         if (vend > max_vaddr || vend < min_vaddr)
851                 vend = max_vaddr;
852
853         while (vstart < vend) {
854                 do_large_mapping(vstart, pstart);
855                 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
856         }
857         return vstart;
858 }
859
860 static void __init map_kernel(void)
861 {
862         int i;
863
864         if (phys_base > 0) {
865                 do_large_mapping(PAGE_OFFSET, phys_base);
866         }
867
868         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
869                 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
870         }
871 }
872
873 void (*poke_srmmu)(void) = NULL;
874
875 void __init srmmu_paging_init(void)
876 {
877         int i;
878         phandle cpunode;
879         char node_str[128];
880         pgd_t *pgd;
881         pmd_t *pmd;
882         pte_t *pte;
883         unsigned long pages_avail;
884
885         init_mm.context = (unsigned long) NO_CONTEXT;
886         sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
887
888         if (sparc_cpu_model == sun4d)
889                 num_contexts = 65536; /* We know it is Viking */
890         else {
891                 /* Find the number of contexts on the srmmu. */
892                 cpunode = prom_getchild(prom_root_node);
893                 num_contexts = 0;
894                 while (cpunode != 0) {
895                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
896                         if (!strcmp(node_str, "cpu")) {
897                                 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
898                                 break;
899                         }
900                         cpunode = prom_getsibling(cpunode);
901                 }
902         }
903
904         if (!num_contexts) {
905                 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
906                 prom_halt();
907         }
908
909         pages_avail = 0;
910         last_valid_pfn = bootmem_init(&pages_avail);
911
912         srmmu_nocache_calcsize();
913         srmmu_nocache_init();
914         srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
915         map_kernel();
916
917         /* ctx table has to be physically aligned to its size */
918         srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
919         srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
920
921         for (i = 0; i < num_contexts; i++)
922                 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
923
924         flush_cache_all();
925         srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
926 #ifdef CONFIG_SMP
927         /* Stop from hanging here... */
928         local_ops->tlb_all();
929 #else
930         flush_tlb_all();
931 #endif
932         poke_srmmu();
933
934         srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
935         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
936
937         srmmu_allocate_ptable_skeleton(
938                 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
939         srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
940
941         pgd = pgd_offset_k(PKMAP_BASE);
942         pmd = pmd_offset(pgd, PKMAP_BASE);
943         pte = pte_offset_kernel(pmd, PKMAP_BASE);
944         pkmap_page_table = pte;
945
946         flush_cache_all();
947         flush_tlb_all();
948
949         sparc_context_init(num_contexts);
950
951         kmap_init();
952
953         {
954                 unsigned long zones_size[MAX_NR_ZONES];
955                 unsigned long zholes_size[MAX_NR_ZONES];
956                 unsigned long npages;
957                 int znum;
958
959                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
960                         zones_size[znum] = zholes_size[znum] = 0;
961
962                 npages = max_low_pfn - pfn_base;
963
964                 zones_size[ZONE_DMA] = npages;
965                 zholes_size[ZONE_DMA] = npages - pages_avail;
966
967                 npages = highend_pfn - max_low_pfn;
968                 zones_size[ZONE_HIGHMEM] = npages;
969                 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
970
971                 free_area_init_node(0, zones_size, pfn_base, zholes_size);
972         }
973 }
974
975 void mmu_info(struct seq_file *m)
976 {
977         seq_printf(m,
978                    "MMU type\t: %s\n"
979                    "contexts\t: %d\n"
980                    "nocache total\t: %ld\n"
981                    "nocache used\t: %d\n",
982                    srmmu_name,
983                    num_contexts,
984                    srmmu_nocache_size,
985                    srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
986 }
987
988 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
989 {
990         mm->context = NO_CONTEXT;
991         return 0;
992 }
993
994 void destroy_context(struct mm_struct *mm)
995 {
996         unsigned long flags;
997
998         if (mm->context != NO_CONTEXT) {
999                 flush_cache_mm(mm);
1000                 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1001                 flush_tlb_mm(mm);
1002                 spin_lock_irqsave(&srmmu_context_spinlock, flags);
1003                 free_context(mm->context);
1004                 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
1005                 mm->context = NO_CONTEXT;
1006         }
1007 }
1008
1009 /* Init various srmmu chip types. */
1010 static void __init srmmu_is_bad(void)
1011 {
1012         prom_printf("Could not determine SRMMU chip type.\n");
1013         prom_halt();
1014 }
1015
1016 static void __init init_vac_layout(void)
1017 {
1018         phandle nd;
1019         int cache_lines;
1020         char node_str[128];
1021 #ifdef CONFIG_SMP
1022         int cpu = 0;
1023         unsigned long max_size = 0;
1024         unsigned long min_line_size = 0x10000000;
1025 #endif
1026
1027         nd = prom_getchild(prom_root_node);
1028         while ((nd = prom_getsibling(nd)) != 0) {
1029                 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1030                 if (!strcmp(node_str, "cpu")) {
1031                         vac_line_size = prom_getint(nd, "cache-line-size");
1032                         if (vac_line_size == -1) {
1033                                 prom_printf("can't determine cache-line-size, halting.\n");
1034                                 prom_halt();
1035                         }
1036                         cache_lines = prom_getint(nd, "cache-nlines");
1037                         if (cache_lines == -1) {
1038                                 prom_printf("can't determine cache-nlines, halting.\n");
1039                                 prom_halt();
1040                         }
1041
1042                         vac_cache_size = cache_lines * vac_line_size;
1043 #ifdef CONFIG_SMP
1044                         if (vac_cache_size > max_size)
1045                                 max_size = vac_cache_size;
1046                         if (vac_line_size < min_line_size)
1047                                 min_line_size = vac_line_size;
1048                         //FIXME: cpus not contiguous!!
1049                         cpu++;
1050                         if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1051                                 break;
1052 #else
1053                         break;
1054 #endif
1055                 }
1056         }
1057         if (nd == 0) {
1058                 prom_printf("No CPU nodes found, halting.\n");
1059                 prom_halt();
1060         }
1061 #ifdef CONFIG_SMP
1062         vac_cache_size = max_size;
1063         vac_line_size = min_line_size;
1064 #endif
1065         printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1066                (int)vac_cache_size, (int)vac_line_size);
1067 }
1068
1069 static void poke_hypersparc(void)
1070 {
1071         volatile unsigned long clear;
1072         unsigned long mreg = srmmu_get_mmureg();
1073
1074         hyper_flush_unconditional_combined();
1075
1076         mreg &= ~(HYPERSPARC_CWENABLE);
1077         mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1078         mreg |= (HYPERSPARC_CMODE);
1079
1080         srmmu_set_mmureg(mreg);
1081
1082 #if 0 /* XXX I think this is bad news... -DaveM */
1083         hyper_clear_all_tags();
1084 #endif
1085
1086         put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1087         hyper_flush_whole_icache();
1088         clear = srmmu_get_faddr();
1089         clear = srmmu_get_fstatus();
1090 }
1091
1092 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1093         .cache_all      = hypersparc_flush_cache_all,
1094         .cache_mm       = hypersparc_flush_cache_mm,
1095         .cache_page     = hypersparc_flush_cache_page,
1096         .cache_range    = hypersparc_flush_cache_range,
1097         .tlb_all        = hypersparc_flush_tlb_all,
1098         .tlb_mm         = hypersparc_flush_tlb_mm,
1099         .tlb_page       = hypersparc_flush_tlb_page,
1100         .tlb_range      = hypersparc_flush_tlb_range,
1101         .page_to_ram    = hypersparc_flush_page_to_ram,
1102         .sig_insns      = hypersparc_flush_sig_insns,
1103         .page_for_dma   = hypersparc_flush_page_for_dma,
1104 };
1105
1106 static void __init init_hypersparc(void)
1107 {
1108         srmmu_name = "ROSS HyperSparc";
1109         srmmu_modtype = HyperSparc;
1110
1111         init_vac_layout();
1112
1113         is_hypersparc = 1;
1114         sparc32_cachetlb_ops = &hypersparc_ops;
1115
1116         poke_srmmu = poke_hypersparc;
1117
1118         hypersparc_setup_blockops();
1119 }
1120
1121 static void poke_swift(void)
1122 {
1123         unsigned long mreg;
1124
1125         /* Clear any crap from the cache or else... */
1126         swift_flush_cache_all();
1127
1128         /* Enable I & D caches */
1129         mreg = srmmu_get_mmureg();
1130         mreg |= (SWIFT_IE | SWIFT_DE);
1131         /*
1132          * The Swift branch folding logic is completely broken.  At
1133          * trap time, if things are just right, if can mistakenly
1134          * think that a trap is coming from kernel mode when in fact
1135          * it is coming from user mode (it mis-executes the branch in
1136          * the trap code).  So you see things like crashme completely
1137          * hosing your machine which is completely unacceptable.  Turn
1138          * this shit off... nice job Fujitsu.
1139          */
1140         mreg &= ~(SWIFT_BF);
1141         srmmu_set_mmureg(mreg);
1142 }
1143
1144 static const struct sparc32_cachetlb_ops swift_ops = {
1145         .cache_all      = swift_flush_cache_all,
1146         .cache_mm       = swift_flush_cache_mm,
1147         .cache_page     = swift_flush_cache_page,
1148         .cache_range    = swift_flush_cache_range,
1149         .tlb_all        = swift_flush_tlb_all,
1150         .tlb_mm         = swift_flush_tlb_mm,
1151         .tlb_page       = swift_flush_tlb_page,
1152         .tlb_range      = swift_flush_tlb_range,
1153         .page_to_ram    = swift_flush_page_to_ram,
1154         .sig_insns      = swift_flush_sig_insns,
1155         .page_for_dma   = swift_flush_page_for_dma,
1156 };
1157
1158 #define SWIFT_MASKID_ADDR  0x10003018
1159 static void __init init_swift(void)
1160 {
1161         unsigned long swift_rev;
1162
1163         __asm__ __volatile__("lda [%1] %2, %0\n\t"
1164                              "srl %0, 0x18, %0\n\t" :
1165                              "=r" (swift_rev) :
1166                              "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1167         srmmu_name = "Fujitsu Swift";
1168         switch (swift_rev) {
1169         case 0x11:
1170         case 0x20:
1171         case 0x23:
1172         case 0x30:
1173                 srmmu_modtype = Swift_lots_o_bugs;
1174                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1175                 /*
1176                  * Gee george, I wonder why Sun is so hush hush about
1177                  * this hardware bug... really braindamage stuff going
1178                  * on here.  However I think we can find a way to avoid
1179                  * all of the workaround overhead under Linux.  Basically,
1180                  * any page fault can cause kernel pages to become user
1181                  * accessible (the mmu gets confused and clears some of
1182                  * the ACC bits in kernel ptes).  Aha, sounds pretty
1183                  * horrible eh?  But wait, after extensive testing it appears
1184                  * that if you use pgd_t level large kernel pte's (like the
1185                  * 4MB pages on the Pentium) the bug does not get tripped
1186                  * at all.  This avoids almost all of the major overhead.
1187                  * Welcome to a world where your vendor tells you to,
1188                  * "apply this kernel patch" instead of "sorry for the
1189                  * broken hardware, send it back and we'll give you
1190                  * properly functioning parts"
1191                  */
1192                 break;
1193         case 0x25:
1194         case 0x31:
1195                 srmmu_modtype = Swift_bad_c;
1196                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1197                 /*
1198                  * You see Sun allude to this hardware bug but never
1199                  * admit things directly, they'll say things like,
1200                  * "the Swift chip cache problems" or similar.
1201                  */
1202                 break;
1203         default:
1204                 srmmu_modtype = Swift_ok;
1205                 break;
1206         }
1207
1208         sparc32_cachetlb_ops = &swift_ops;
1209         flush_page_for_dma_global = 0;
1210
1211         /*
1212          * Are you now convinced that the Swift is one of the
1213          * biggest VLSI abortions of all time?  Bravo Fujitsu!
1214          * Fujitsu, the !#?!%$'d up processor people.  I bet if
1215          * you examined the microcode of the Swift you'd find
1216          * XXX's all over the place.
1217          */
1218         poke_srmmu = poke_swift;
1219 }
1220
1221 static void turbosparc_flush_cache_all(void)
1222 {
1223         flush_user_windows();
1224         turbosparc_idflash_clear();
1225 }
1226
1227 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1228 {
1229         FLUSH_BEGIN(mm)
1230         flush_user_windows();
1231         turbosparc_idflash_clear();
1232         FLUSH_END
1233 }
1234
1235 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1236 {
1237         FLUSH_BEGIN(vma->vm_mm)
1238         flush_user_windows();
1239         turbosparc_idflash_clear();
1240         FLUSH_END
1241 }
1242
1243 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1244 {
1245         FLUSH_BEGIN(vma->vm_mm)
1246         flush_user_windows();
1247         if (vma->vm_flags & VM_EXEC)
1248                 turbosparc_flush_icache();
1249         turbosparc_flush_dcache();
1250         FLUSH_END
1251 }
1252
1253 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1254 static void turbosparc_flush_page_to_ram(unsigned long page)
1255 {
1256 #ifdef TURBOSPARC_WRITEBACK
1257         volatile unsigned long clear;
1258
1259         if (srmmu_probe(page))
1260                 turbosparc_flush_page_cache(page);
1261         clear = srmmu_get_fstatus();
1262 #endif
1263 }
1264
1265 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1266 {
1267 }
1268
1269 static void turbosparc_flush_page_for_dma(unsigned long page)
1270 {
1271         turbosparc_flush_dcache();
1272 }
1273
1274 static void turbosparc_flush_tlb_all(void)
1275 {
1276         srmmu_flush_whole_tlb();
1277 }
1278
1279 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1280 {
1281         FLUSH_BEGIN(mm)
1282         srmmu_flush_whole_tlb();
1283         FLUSH_END
1284 }
1285
1286 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1287 {
1288         FLUSH_BEGIN(vma->vm_mm)
1289         srmmu_flush_whole_tlb();
1290         FLUSH_END
1291 }
1292
1293 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1294 {
1295         FLUSH_BEGIN(vma->vm_mm)
1296         srmmu_flush_whole_tlb();
1297         FLUSH_END
1298 }
1299
1300
1301 static void poke_turbosparc(void)
1302 {
1303         unsigned long mreg = srmmu_get_mmureg();
1304         unsigned long ccreg;
1305
1306         /* Clear any crap from the cache or else... */
1307         turbosparc_flush_cache_all();
1308         /* Temporarily disable I & D caches */
1309         mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1310         mreg &= ~(TURBOSPARC_PCENABLE);         /* Don't check parity */
1311         srmmu_set_mmureg(mreg);
1312
1313         ccreg = turbosparc_get_ccreg();
1314
1315 #ifdef TURBOSPARC_WRITEBACK
1316         ccreg |= (TURBOSPARC_SNENABLE);         /* Do DVMA snooping in Dcache */
1317         ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1318                         /* Write-back D-cache, emulate VLSI
1319                          * abortion number three, not number one */
1320 #else
1321         /* For now let's play safe, optimize later */
1322         ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1323                         /* Do DVMA snooping in Dcache, Write-thru D-cache */
1324         ccreg &= ~(TURBOSPARC_uS2);
1325                         /* Emulate VLSI abortion number three, not number one */
1326 #endif
1327
1328         switch (ccreg & 7) {
1329         case 0: /* No SE cache */
1330         case 7: /* Test mode */
1331                 break;
1332         default:
1333                 ccreg |= (TURBOSPARC_SCENABLE);
1334         }
1335         turbosparc_set_ccreg(ccreg);
1336
1337         mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1338         mreg |= (TURBOSPARC_ICSNOOP);           /* Icache snooping on */
1339         srmmu_set_mmureg(mreg);
1340 }
1341
1342 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1343         .cache_all      = turbosparc_flush_cache_all,
1344         .cache_mm       = turbosparc_flush_cache_mm,
1345         .cache_page     = turbosparc_flush_cache_page,
1346         .cache_range    = turbosparc_flush_cache_range,
1347         .tlb_all        = turbosparc_flush_tlb_all,
1348         .tlb_mm         = turbosparc_flush_tlb_mm,
1349         .tlb_page       = turbosparc_flush_tlb_page,
1350         .tlb_range      = turbosparc_flush_tlb_range,
1351         .page_to_ram    = turbosparc_flush_page_to_ram,
1352         .sig_insns      = turbosparc_flush_sig_insns,
1353         .page_for_dma   = turbosparc_flush_page_for_dma,
1354 };
1355
1356 static void __init init_turbosparc(void)
1357 {
1358         srmmu_name = "Fujitsu TurboSparc";
1359         srmmu_modtype = TurboSparc;
1360         sparc32_cachetlb_ops = &turbosparc_ops;
1361         poke_srmmu = poke_turbosparc;
1362 }
1363
1364 static void poke_tsunami(void)
1365 {
1366         unsigned long mreg = srmmu_get_mmureg();
1367
1368         tsunami_flush_icache();
1369         tsunami_flush_dcache();
1370         mreg &= ~TSUNAMI_ITD;
1371         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1372         srmmu_set_mmureg(mreg);
1373 }
1374
1375 static const struct sparc32_cachetlb_ops tsunami_ops = {
1376         .cache_all      = tsunami_flush_cache_all,
1377         .cache_mm       = tsunami_flush_cache_mm,
1378         .cache_page     = tsunami_flush_cache_page,
1379         .cache_range    = tsunami_flush_cache_range,
1380         .tlb_all        = tsunami_flush_tlb_all,
1381         .tlb_mm         = tsunami_flush_tlb_mm,
1382         .tlb_page       = tsunami_flush_tlb_page,
1383         .tlb_range      = tsunami_flush_tlb_range,
1384         .page_to_ram    = tsunami_flush_page_to_ram,
1385         .sig_insns      = tsunami_flush_sig_insns,
1386         .page_for_dma   = tsunami_flush_page_for_dma,
1387 };
1388
1389 static void __init init_tsunami(void)
1390 {
1391         /*
1392          * Tsunami's pretty sane, Sun and TI actually got it
1393          * somewhat right this time.  Fujitsu should have
1394          * taken some lessons from them.
1395          */
1396
1397         srmmu_name = "TI Tsunami";
1398         srmmu_modtype = Tsunami;
1399         sparc32_cachetlb_ops = &tsunami_ops;
1400         poke_srmmu = poke_tsunami;
1401
1402         tsunami_setup_blockops();
1403 }
1404
1405 static void poke_viking(void)
1406 {
1407         unsigned long mreg = srmmu_get_mmureg();
1408         static int smp_catch;
1409
1410         if (viking_mxcc_present) {
1411                 unsigned long mxcc_control = mxcc_get_creg();
1412
1413                 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1414                 mxcc_control &= ~(MXCC_CTL_RRC);
1415                 mxcc_set_creg(mxcc_control);
1416
1417                 /*
1418                  * We don't need memory parity checks.
1419                  * XXX This is a mess, have to dig out later. ecd.
1420                 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1421                  */
1422
1423                 /* We do cache ptables on MXCC. */
1424                 mreg |= VIKING_TCENABLE;
1425         } else {
1426                 unsigned long bpreg;
1427
1428                 mreg &= ~(VIKING_TCENABLE);
1429                 if (smp_catch++) {
1430                         /* Must disable mixed-cmd mode here for other cpu's. */
1431                         bpreg = viking_get_bpreg();
1432                         bpreg &= ~(VIKING_ACTION_MIX);
1433                         viking_set_bpreg(bpreg);
1434
1435                         /* Just in case PROM does something funny. */
1436                         msi_set_sync();
1437                 }
1438         }
1439
1440         mreg |= VIKING_SPENABLE;
1441         mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1442         mreg |= VIKING_SBENABLE;
1443         mreg &= ~(VIKING_ACENABLE);
1444         srmmu_set_mmureg(mreg);
1445 }
1446
1447 static struct sparc32_cachetlb_ops viking_ops = {
1448         .cache_all      = viking_flush_cache_all,
1449         .cache_mm       = viking_flush_cache_mm,
1450         .cache_page     = viking_flush_cache_page,
1451         .cache_range    = viking_flush_cache_range,
1452         .tlb_all        = viking_flush_tlb_all,
1453         .tlb_mm         = viking_flush_tlb_mm,
1454         .tlb_page       = viking_flush_tlb_page,
1455         .tlb_range      = viking_flush_tlb_range,
1456         .page_to_ram    = viking_flush_page_to_ram,
1457         .sig_insns      = viking_flush_sig_insns,
1458         .page_for_dma   = viking_flush_page_for_dma,
1459 };
1460
1461 #ifdef CONFIG_SMP
1462 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1463  * perform the local TLB flush and all the other cpus will see it.
1464  * But, unfortunately, there is a bug in the sun4d XBUS backplane
1465  * that requires that we add some synchronization to these flushes.
1466  *
1467  * The bug is that the fifo which keeps track of all the pending TLB
1468  * broadcasts in the system is an entry or two too small, so if we
1469  * have too many going at once we'll overflow that fifo and lose a TLB
1470  * flush resulting in corruption.
1471  *
1472  * Our workaround is to take a global spinlock around the TLB flushes,
1473  * which guarentees we won't ever have too many pending.  It's a big
1474  * hammer, but a semaphore like system to make sure we only have N TLB
1475  * flushes going at once will require SMP locking anyways so there's
1476  * no real value in trying any harder than this.
1477  */
1478 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1479         .cache_all      = viking_flush_cache_all,
1480         .cache_mm       = viking_flush_cache_mm,
1481         .cache_page     = viking_flush_cache_page,
1482         .cache_range    = viking_flush_cache_range,
1483         .tlb_all        = sun4dsmp_flush_tlb_all,
1484         .tlb_mm         = sun4dsmp_flush_tlb_mm,
1485         .tlb_page       = sun4dsmp_flush_tlb_page,
1486         .tlb_range      = sun4dsmp_flush_tlb_range,
1487         .page_to_ram    = viking_flush_page_to_ram,
1488         .sig_insns      = viking_flush_sig_insns,
1489         .page_for_dma   = viking_flush_page_for_dma,
1490 };
1491 #endif
1492
1493 static void __init init_viking(void)
1494 {
1495         unsigned long mreg = srmmu_get_mmureg();
1496
1497         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1498         if (mreg & VIKING_MMODE) {
1499                 srmmu_name = "TI Viking";
1500                 viking_mxcc_present = 0;
1501                 msi_set_sync();
1502
1503                 /*
1504                  * We need this to make sure old viking takes no hits
1505                  * on it's cache for dma snoops to workaround the
1506                  * "load from non-cacheable memory" interrupt bug.
1507                  * This is only necessary because of the new way in
1508                  * which we use the IOMMU.
1509                  */
1510                 viking_ops.page_for_dma = viking_flush_page;
1511 #ifdef CONFIG_SMP
1512                 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1513 #endif
1514                 flush_page_for_dma_global = 0;
1515         } else {
1516                 srmmu_name = "TI Viking/MXCC";
1517                 viking_mxcc_present = 1;
1518                 srmmu_cache_pagetables = 1;
1519         }
1520
1521         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1522                 &viking_ops;
1523 #ifdef CONFIG_SMP
1524         if (sparc_cpu_model == sun4d)
1525                 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1526                         &viking_sun4d_smp_ops;
1527 #endif
1528
1529         poke_srmmu = poke_viking;
1530 }
1531
1532 /* Probe for the srmmu chip version. */
1533 static void __init get_srmmu_type(void)
1534 {
1535         unsigned long mreg, psr;
1536         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1537
1538         srmmu_modtype = SRMMU_INVAL_MOD;
1539         hwbug_bitmask = 0;
1540
1541         mreg = srmmu_get_mmureg(); psr = get_psr();
1542         mod_typ = (mreg & 0xf0000000) >> 28;
1543         mod_rev = (mreg & 0x0f000000) >> 24;
1544         psr_typ = (psr >> 28) & 0xf;
1545         psr_vers = (psr >> 24) & 0xf;
1546
1547         /* First, check for sparc-leon. */
1548         if (sparc_cpu_model == sparc_leon) {
1549                 init_leon();
1550                 return;
1551         }
1552
1553         /* Second, check for HyperSparc or Cypress. */
1554         if (mod_typ == 1) {
1555                 switch (mod_rev) {
1556                 case 7:
1557                         /* UP or MP Hypersparc */
1558                         init_hypersparc();
1559                         break;
1560                 case 0:
1561                 case 2:
1562                 case 10:
1563                 case 11:
1564                 case 12:
1565                 case 13:
1566                 case 14:
1567                 case 15:
1568                 default:
1569                         prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1570                         prom_halt();
1571                         break;
1572                 }
1573                 return;
1574         }
1575
1576         /* Now Fujitsu TurboSparc. It might happen that it is
1577          * in Swift emulation mode, so we will check later...
1578          */
1579         if (psr_typ == 0 && psr_vers == 5) {
1580                 init_turbosparc();
1581                 return;
1582         }
1583
1584         /* Next check for Fujitsu Swift. */
1585         if (psr_typ == 0 && psr_vers == 4) {
1586                 phandle cpunode;
1587                 char node_str[128];
1588
1589                 /* Look if it is not a TurboSparc emulating Swift... */
1590                 cpunode = prom_getchild(prom_root_node);
1591                 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1592                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1593                         if (!strcmp(node_str, "cpu")) {
1594                                 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1595                                     prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1596                                         init_turbosparc();
1597                                         return;
1598                                 }
1599                                 break;
1600                         }
1601                 }
1602
1603                 init_swift();
1604                 return;
1605         }
1606
1607         /* Now the Viking family of srmmu. */
1608         if (psr_typ == 4 &&
1609            ((psr_vers == 0) ||
1610             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1611                 init_viking();
1612                 return;
1613         }
1614
1615         /* Finally the Tsunami. */
1616         if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1617                 init_tsunami();
1618                 return;
1619         }
1620
1621         /* Oh well */
1622         srmmu_is_bad();
1623 }
1624
1625 #ifdef CONFIG_SMP
1626 /* Local cross-calls. */
1627 static void smp_flush_page_for_dma(unsigned long page)
1628 {
1629         xc1((smpfunc_t) local_ops->page_for_dma, page);
1630         local_ops->page_for_dma(page);
1631 }
1632
1633 static void smp_flush_cache_all(void)
1634 {
1635         xc0((smpfunc_t) local_ops->cache_all);
1636         local_ops->cache_all();
1637 }
1638
1639 static void smp_flush_tlb_all(void)
1640 {
1641         xc0((smpfunc_t) local_ops->tlb_all);
1642         local_ops->tlb_all();
1643 }
1644
1645 static void smp_flush_cache_mm(struct mm_struct *mm)
1646 {
1647         if (mm->context != NO_CONTEXT) {
1648                 cpumask_t cpu_mask;
1649                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1650                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1651                 if (!cpumask_empty(&cpu_mask))
1652                         xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1653                 local_ops->cache_mm(mm);
1654         }
1655 }
1656
1657 static void smp_flush_tlb_mm(struct mm_struct *mm)
1658 {
1659         if (mm->context != NO_CONTEXT) {
1660                 cpumask_t cpu_mask;
1661                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1662                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1663                 if (!cpumask_empty(&cpu_mask)) {
1664                         xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1665                         if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1666                                 cpumask_copy(mm_cpumask(mm),
1667                                              cpumask_of(smp_processor_id()));
1668                 }
1669                 local_ops->tlb_mm(mm);
1670         }
1671 }
1672
1673 static void smp_flush_cache_range(struct vm_area_struct *vma,
1674                                   unsigned long start,
1675                                   unsigned long end)
1676 {
1677         struct mm_struct *mm = vma->vm_mm;
1678
1679         if (mm->context != NO_CONTEXT) {
1680                 cpumask_t cpu_mask;
1681                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1682                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1683                 if (!cpumask_empty(&cpu_mask))
1684                         xc3((smpfunc_t) local_ops->cache_range,
1685                             (unsigned long) vma, start, end);
1686                 local_ops->cache_range(vma, start, end);
1687         }
1688 }
1689
1690 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1691                                 unsigned long start,
1692                                 unsigned long end)
1693 {
1694         struct mm_struct *mm = vma->vm_mm;
1695
1696         if (mm->context != NO_CONTEXT) {
1697                 cpumask_t cpu_mask;
1698                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1699                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1700                 if (!cpumask_empty(&cpu_mask))
1701                         xc3((smpfunc_t) local_ops->tlb_range,
1702                             (unsigned long) vma, start, end);
1703                 local_ops->tlb_range(vma, start, end);
1704         }
1705 }
1706
1707 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1708 {
1709         struct mm_struct *mm = vma->vm_mm;
1710
1711         if (mm->context != NO_CONTEXT) {
1712                 cpumask_t cpu_mask;
1713                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1714                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1715                 if (!cpumask_empty(&cpu_mask))
1716                         xc2((smpfunc_t) local_ops->cache_page,
1717                             (unsigned long) vma, page);
1718                 local_ops->cache_page(vma, page);
1719         }
1720 }
1721
1722 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1723 {
1724         struct mm_struct *mm = vma->vm_mm;
1725
1726         if (mm->context != NO_CONTEXT) {
1727                 cpumask_t cpu_mask;
1728                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1729                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1730                 if (!cpumask_empty(&cpu_mask))
1731                         xc2((smpfunc_t) local_ops->tlb_page,
1732                             (unsigned long) vma, page);
1733                 local_ops->tlb_page(vma, page);
1734         }
1735 }
1736
1737 static void smp_flush_page_to_ram(unsigned long page)
1738 {
1739         /* Current theory is that those who call this are the one's
1740          * who have just dirtied their cache with the pages contents
1741          * in kernel space, therefore we only run this on local cpu.
1742          *
1743          * XXX This experiment failed, research further... -DaveM
1744          */
1745 #if 1
1746         xc1((smpfunc_t) local_ops->page_to_ram, page);
1747 #endif
1748         local_ops->page_to_ram(page);
1749 }
1750
1751 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1752 {
1753         cpumask_t cpu_mask;
1754         cpumask_copy(&cpu_mask, mm_cpumask(mm));
1755         cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1756         if (!cpumask_empty(&cpu_mask))
1757                 xc2((smpfunc_t) local_ops->sig_insns,
1758                     (unsigned long) mm, insn_addr);
1759         local_ops->sig_insns(mm, insn_addr);
1760 }
1761
1762 static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1763         .cache_all      = smp_flush_cache_all,
1764         .cache_mm       = smp_flush_cache_mm,
1765         .cache_page     = smp_flush_cache_page,
1766         .cache_range    = smp_flush_cache_range,
1767         .tlb_all        = smp_flush_tlb_all,
1768         .tlb_mm         = smp_flush_tlb_mm,
1769         .tlb_page       = smp_flush_tlb_page,
1770         .tlb_range      = smp_flush_tlb_range,
1771         .page_to_ram    = smp_flush_page_to_ram,
1772         .sig_insns      = smp_flush_sig_insns,
1773         .page_for_dma   = smp_flush_page_for_dma,
1774 };
1775 #endif
1776
1777 /* Load up routines and constants for sun4m and sun4d mmu */
1778 void __init load_mmu(void)
1779 {
1780         /* Functions */
1781         get_srmmu_type();
1782
1783 #ifdef CONFIG_SMP
1784         /* El switcheroo... */
1785         local_ops = sparc32_cachetlb_ops;
1786
1787         if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1788                 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1789                 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1790                 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1791                 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1792         }
1793
1794         if (poke_srmmu == poke_viking) {
1795                 /* Avoid unnecessary cross calls. */
1796                 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1797                 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1798                 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1799                 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1800
1801                 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1802                 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1803                 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1804         }
1805
1806         /* It really is const after this point. */
1807         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1808                 &smp_cachetlb_ops;
1809 #endif
1810
1811         if (sparc_cpu_model == sun4d)
1812                 ld_mmu_iounit();
1813         else
1814                 ld_mmu_iommu();
1815 #ifdef CONFIG_SMP
1816         if (sparc_cpu_model == sun4d)
1817                 sun4d_init_smp();
1818         else if (sparc_cpu_model == sparc_leon)
1819                 leon_init_smp();
1820         else
1821                 sun4m_init_smp();
1822 #endif
1823 }