2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
11 #include <linux/kernel.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pagemap.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/kdebug.h>
21 #include <linux/log2.h>
22 #include <linux/gfp.h>
24 #include <asm/bitext.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
29 #include <asm/vaddrs.h>
30 #include <asm/traps.h>
33 #include <asm/cache.h>
34 #include <asm/oplib.h>
37 #include <asm/mmu_context.h>
38 #include <asm/io-unit.h>
39 #include <asm/cacheflush.h>
40 #include <asm/tlbflush.h>
42 /* Now the cpu specific definitions. */
43 #include <asm/viking.h>
46 #include <asm/tsunami.h>
47 #include <asm/swift.h>
48 #include <asm/turbosparc.h>
53 enum mbus_module srmmu_modtype;
54 static unsigned int hwbug_bitmask;
58 struct ctx_list *ctx_list_pool;
59 struct ctx_list ctx_free;
60 struct ctx_list ctx_used;
62 extern struct resource sparc_iomap;
64 extern unsigned long last_valid_pfn;
66 static pgd_t *srmmu_swapper_pg_dir;
68 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
71 const struct sparc32_cachetlb_ops *local_ops;
73 #define FLUSH_BEGIN(mm)
76 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
80 int flush_page_for_dma_global = 1;
84 ctxd_t *srmmu_ctx_table_phys;
85 static ctxd_t *srmmu_context_table;
87 int viking_mxcc_present;
88 static DEFINE_SPINLOCK(srmmu_context_spinlock);
90 static int is_hypersparc;
92 static int srmmu_cache_pagetables;
94 /* these will be initialized in srmmu_nocache_calcsize() */
95 static unsigned long srmmu_nocache_size;
96 static unsigned long srmmu_nocache_end;
98 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
101 /* The context table is a nocache user with the biggest alignment needs. */
102 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
104 void *srmmu_nocache_pool;
105 void *srmmu_nocache_bitmap;
106 static struct bit_map srmmu_nocache_map;
108 static inline int srmmu_pmd_none(pmd_t pmd)
109 { return !(pmd_val(pmd) & 0xFFFFFFF); }
111 /* XXX should we hyper_flush_whole_icache here - Anton */
112 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
113 { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
115 void pmd_set(pmd_t *pmdp, pte_t *ptep)
117 unsigned long ptp; /* Physical address, shifted right by 4 */
120 ptp = __nocache_pa((unsigned long) ptep) >> 4;
121 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
122 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
123 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
127 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
129 unsigned long ptp; /* Physical address, shifted right by 4 */
132 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
133 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
134 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
135 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
139 /* Find an entry in the third-level page table.. */
140 pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
144 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
145 return (pte_t *) pte +
146 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
154 static unsigned long __srmmu_get_nocache(int size, int align)
158 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
159 printk("Size 0x%x too small for nocache request\n", size);
160 size = SRMMU_NOCACHE_BITMAP_SHIFT;
162 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
163 printk("Size 0x%x unaligned int nocache request\n", size);
164 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
166 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
168 offset = bit_map_string_get(&srmmu_nocache_map,
169 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
170 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
172 printk("srmmu: out of nocache %d: %d/%d\n",
173 size, (int) srmmu_nocache_size,
174 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
178 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
181 unsigned long srmmu_get_nocache(int size, int align)
185 tmp = __srmmu_get_nocache(size, align);
188 memset((void *)tmp, 0, size);
193 void srmmu_free_nocache(unsigned long vaddr, int size)
197 if (vaddr < SRMMU_NOCACHE_VADDR) {
198 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
202 if (vaddr+size > srmmu_nocache_end) {
203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204 vaddr, srmmu_nocache_end);
207 if (!is_power_of_2(size)) {
208 printk("Size 0x%x is not a power of 2\n", size);
211 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
212 printk("Size 0x%x is too small\n", size);
215 if (vaddr & (size-1)) {
216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
220 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
221 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
223 bit_map_clear(&srmmu_nocache_map, offset, size);
226 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
229 extern unsigned long probe_memory(void); /* in fault.c */
232 * Reserve nocache dynamically proportionally to the amount of
233 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
235 static void srmmu_nocache_calcsize(void)
237 unsigned long sysmemavail = probe_memory() / 1024;
238 int srmmu_nocache_npages;
240 srmmu_nocache_npages =
241 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
243 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
244 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
245 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
246 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
248 /* anything above 1280 blows up */
249 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
250 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
252 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
253 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
256 static void __init srmmu_nocache_init(void)
258 unsigned int bitmap_bits;
262 unsigned long paddr, vaddr;
263 unsigned long pteval;
265 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
267 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
268 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
269 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
271 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
272 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
274 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
275 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
276 init_mm.pgd = srmmu_swapper_pg_dir;
278 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
280 paddr = __pa((unsigned long)srmmu_nocache_pool);
281 vaddr = SRMMU_NOCACHE_VADDR;
283 while (vaddr < srmmu_nocache_end) {
284 pgd = pgd_offset_k(vaddr);
285 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
286 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
288 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
290 if (srmmu_cache_pagetables)
291 pteval |= SRMMU_CACHE;
293 set_pte(__nocache_fix(pte), __pte(pteval));
303 pgd_t *get_pgd_fast(void)
307 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
309 pgd_t *init = pgd_offset_k(0);
310 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
311 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
312 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
319 * Hardware needs alignment to 256 only, but we align to whole page size
320 * to reduce fragmentation problems due to the buddy principle.
321 * XXX Provide actual fragmentation statistics in /proc.
323 * Alignments up to the page size are the same for physical and virtual
324 * addresses of the nocache area.
326 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
331 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
333 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
334 pgtable_page_ctor(page);
338 void pte_free(struct mm_struct *mm, pgtable_t pte)
342 pgtable_page_dtor(pte);
343 p = (unsigned long)page_address(pte); /* Cached address (for test) */
346 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
347 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
348 srmmu_free_nocache(p, PTE_SIZE);
353 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
355 struct ctx_list *ctxp;
357 ctxp = ctx_free.next;
358 if(ctxp != &ctx_free) {
359 remove_from_ctx_list(ctxp);
360 add_to_used_ctxlist(ctxp);
361 mm->context = ctxp->ctx_number;
365 ctxp = ctx_used.next;
366 if(ctxp->ctx_mm == old_mm)
368 if(ctxp == &ctx_used)
369 panic("out of mmu contexts");
370 flush_cache_mm(ctxp->ctx_mm);
371 flush_tlb_mm(ctxp->ctx_mm);
372 remove_from_ctx_list(ctxp);
373 add_to_used_ctxlist(ctxp);
374 ctxp->ctx_mm->context = NO_CONTEXT;
376 mm->context = ctxp->ctx_number;
379 static inline void free_context(int context)
381 struct ctx_list *ctx_old;
383 ctx_old = ctx_list_pool + context;
384 remove_from_ctx_list(ctx_old);
385 add_to_free_ctxlist(ctx_old);
389 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
390 struct task_struct *tsk)
392 if(mm->context == NO_CONTEXT) {
393 spin_lock(&srmmu_context_spinlock);
394 alloc_context(old_mm, mm);
395 spin_unlock(&srmmu_context_spinlock);
396 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
399 if (sparc_cpu_model == sparc_leon)
403 hyper_flush_whole_icache();
405 srmmu_set_context(mm->context);
408 /* Low level IO area allocation on the SRMMU. */
409 static inline void srmmu_mapioaddr(unsigned long physaddr,
410 unsigned long virt_addr, int bus_type)
417 physaddr &= PAGE_MASK;
418 pgdp = pgd_offset_k(virt_addr);
419 pmdp = pmd_offset(pgdp, virt_addr);
420 ptep = pte_offset_kernel(pmdp, virt_addr);
421 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
424 * I need to test whether this is consistent over all
425 * sun4m's. The bus_type represents the upper 4 bits of
426 * 36-bit physical address on the I/O space lines...
428 tmp |= (bus_type << 28);
430 __flush_page_to_ram(virt_addr);
431 set_pte(ptep, __pte(tmp));
434 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
435 unsigned long xva, unsigned int len)
439 srmmu_mapioaddr(xpa, xva, bus);
446 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
452 pgdp = pgd_offset_k(virt_addr);
453 pmdp = pmd_offset(pgdp, virt_addr);
454 ptep = pte_offset_kernel(pmdp, virt_addr);
456 /* No need to flush uncacheable page. */
460 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
464 srmmu_unmapioaddr(virt_addr);
465 virt_addr += PAGE_SIZE;
471 * On the SRMMU we do not have the problems with limited tlb entries
472 * for mapping kernel pages, so we just take things from the free page
473 * pool. As a side effect we are putting a little too much pressure
474 * on the gfp() subsystem. This setup also makes the logic of the
475 * iommu mapping code a lot easier as we can transparently handle
476 * mappings on the kernel stack without any special code.
478 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
480 struct thread_info *ret;
482 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
484 #ifdef CONFIG_DEBUG_STACK_USAGE
486 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
487 #endif /* DEBUG_STACK_USAGE */
492 void free_thread_info(struct thread_info *ti)
494 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
498 extern void tsunami_flush_cache_all(void);
499 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
500 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
501 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
502 extern void tsunami_flush_page_to_ram(unsigned long page);
503 extern void tsunami_flush_page_for_dma(unsigned long page);
504 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
505 extern void tsunami_flush_tlb_all(void);
506 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
507 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
508 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
509 extern void tsunami_setup_blockops(void);
512 extern void swift_flush_cache_all(void);
513 extern void swift_flush_cache_mm(struct mm_struct *mm);
514 extern void swift_flush_cache_range(struct vm_area_struct *vma,
515 unsigned long start, unsigned long end);
516 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
517 extern void swift_flush_page_to_ram(unsigned long page);
518 extern void swift_flush_page_for_dma(unsigned long page);
519 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
520 extern void swift_flush_tlb_all(void);
521 extern void swift_flush_tlb_mm(struct mm_struct *mm);
522 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
523 unsigned long start, unsigned long end);
524 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
526 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
527 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
532 if ((ctx1 = vma->vm_mm->context) != -1) {
533 cctx = srmmu_get_context();
534 /* Is context # ever different from current context? P3 */
536 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
537 srmmu_set_context(ctx1);
538 swift_flush_page(page);
539 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
540 "r" (page), "i" (ASI_M_FLUSH_PROBE));
541 srmmu_set_context(cctx);
543 /* Rm. prot. bits from virt. c. */
544 /* swift_flush_cache_all(); */
545 /* swift_flush_cache_page(vma, page); */
546 swift_flush_page(page);
548 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
549 "r" (page), "i" (ASI_M_FLUSH_PROBE));
550 /* same as above: srmmu_flush_tlb_page() */
557 * The following are all MBUS based SRMMU modules, and therefore could
558 * be found in a multiprocessor configuration. On the whole, these
559 * chips seems to be much more touchy about DVMA and page tables
560 * with respect to cache coherency.
564 extern void viking_flush_cache_all(void);
565 extern void viking_flush_cache_mm(struct mm_struct *mm);
566 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
568 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
569 extern void viking_flush_page_to_ram(unsigned long page);
570 extern void viking_flush_page_for_dma(unsigned long page);
571 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
572 extern void viking_flush_page(unsigned long page);
573 extern void viking_mxcc_flush_page(unsigned long page);
574 extern void viking_flush_tlb_all(void);
575 extern void viking_flush_tlb_mm(struct mm_struct *mm);
576 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
578 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
580 extern void sun4dsmp_flush_tlb_all(void);
581 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
582 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
584 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
588 extern void hypersparc_flush_cache_all(void);
589 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
590 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
591 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
592 extern void hypersparc_flush_page_to_ram(unsigned long page);
593 extern void hypersparc_flush_page_for_dma(unsigned long page);
594 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
595 extern void hypersparc_flush_tlb_all(void);
596 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
597 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
598 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
599 extern void hypersparc_setup_blockops(void);
602 * NOTE: All of this startup code assumes the low 16mb (approx.) of
603 * kernel mappings are done with one single contiguous chunk of
604 * ram. On small ram machines (classics mainly) we only get
605 * around 8mb mapped for us.
608 static void __init early_pgtable_allocfail(char *type)
610 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
614 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
622 pgdp = pgd_offset_k(start);
623 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
624 pmdp = (pmd_t *) __srmmu_get_nocache(
625 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
627 early_pgtable_allocfail("pmd");
628 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
629 pgd_set(__nocache_fix(pgdp), pmdp);
631 pmdp = pmd_offset(__nocache_fix(pgdp), start);
632 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
633 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
635 early_pgtable_allocfail("pte");
636 memset(__nocache_fix(ptep), 0, PTE_SIZE);
637 pmd_set(__nocache_fix(pmdp), ptep);
639 if (start > (0xffffffffUL - PMD_SIZE))
641 start = (start + PMD_SIZE) & PMD_MASK;
645 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
653 pgdp = pgd_offset_k(start);
654 if (pgd_none(*pgdp)) {
655 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
657 early_pgtable_allocfail("pmd");
658 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
661 pmdp = pmd_offset(pgdp, start);
662 if(srmmu_pmd_none(*pmdp)) {
663 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
666 early_pgtable_allocfail("pte");
667 memset(ptep, 0, PTE_SIZE);
670 if (start > (0xffffffffUL - PMD_SIZE))
672 start = (start + PMD_SIZE) & PMD_MASK;
677 * This is much cleaner than poking around physical address space
678 * looking at the prom's page table directly which is what most
679 * other OS's do. Yuck... this is much better.
681 static void __init srmmu_inherit_prom_mappings(unsigned long start,
687 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
688 unsigned long prompte;
690 while(start <= end) {
692 break; /* probably wrap around */
693 if(start == 0xfef00000)
694 start = KADB_DEBUGGER_BEGVM;
695 if(!(prompte = srmmu_hwprobe(start))) {
700 /* A red snapper, see what it really is. */
703 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
704 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
708 if(!(start & ~(SRMMU_PGDIR_MASK))) {
709 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
714 pgdp = pgd_offset_k(start);
716 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
717 start += SRMMU_PGDIR_SIZE;
720 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
721 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
723 early_pgtable_allocfail("pmd");
724 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
725 pgd_set(__nocache_fix(pgdp), pmdp);
727 pmdp = pmd_offset(__nocache_fix(pgdp), start);
728 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
729 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
732 early_pgtable_allocfail("pte");
733 memset(__nocache_fix(ptep), 0, PTE_SIZE);
734 pmd_set(__nocache_fix(pmdp), ptep);
738 * We bend the rule where all 16 PTPs in a pmd_t point
739 * inside the same PTE page, and we leak a perfectly
740 * good hardware PTE piece. Alternatives seem worse.
742 unsigned int x; /* Index of HW PMD in soft cluster */
743 x = (start >> PMD_SHIFT) & 15;
744 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
745 start += SRMMU_REAL_PMD_SIZE;
748 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
749 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
754 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
756 /* Create a third-level SRMMU 16MB page mapping. */
757 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
759 pgd_t *pgdp = pgd_offset_k(vaddr);
760 unsigned long big_pte;
762 big_pte = KERNEL_PTE(phys_base >> 4);
763 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
766 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
767 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
769 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
770 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
771 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
772 /* Map "low" memory only */
773 const unsigned long min_vaddr = PAGE_OFFSET;
774 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
776 if (vstart < min_vaddr || vstart >= max_vaddr)
779 if (vend > max_vaddr || vend < min_vaddr)
782 while(vstart < vend) {
783 do_large_mapping(vstart, pstart);
784 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
789 static inline void map_kernel(void)
794 do_large_mapping(PAGE_OFFSET, phys_base);
797 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
798 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
802 /* Paging initialization on the Sparc Reference MMU. */
803 extern void sparc_context_init(int);
805 void (*poke_srmmu)(void) __cpuinitdata = NULL;
807 extern unsigned long bootmem_init(unsigned long *pages_avail);
809 void __init srmmu_paging_init(void)
817 unsigned long pages_avail;
819 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
821 if (sparc_cpu_model == sun4d)
822 num_contexts = 65536; /* We know it is Viking */
824 /* Find the number of contexts on the srmmu. */
825 cpunode = prom_getchild(prom_root_node);
827 while(cpunode != 0) {
828 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
829 if(!strcmp(node_str, "cpu")) {
830 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
833 cpunode = prom_getsibling(cpunode);
838 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
843 last_valid_pfn = bootmem_init(&pages_avail);
845 srmmu_nocache_calcsize();
846 srmmu_nocache_init();
847 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
850 /* ctx table has to be physically aligned to its size */
851 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
852 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
854 for(i = 0; i < num_contexts; i++)
855 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
858 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
860 /* Stop from hanging here... */
861 local_ops->tlb_all();
867 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
868 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
870 srmmu_allocate_ptable_skeleton(
871 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
872 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
874 pgd = pgd_offset_k(PKMAP_BASE);
875 pmd = pmd_offset(pgd, PKMAP_BASE);
876 pte = pte_offset_kernel(pmd, PKMAP_BASE);
877 pkmap_page_table = pte;
882 sparc_context_init(num_contexts);
887 unsigned long zones_size[MAX_NR_ZONES];
888 unsigned long zholes_size[MAX_NR_ZONES];
889 unsigned long npages;
892 for (znum = 0; znum < MAX_NR_ZONES; znum++)
893 zones_size[znum] = zholes_size[znum] = 0;
895 npages = max_low_pfn - pfn_base;
897 zones_size[ZONE_DMA] = npages;
898 zholes_size[ZONE_DMA] = npages - pages_avail;
900 npages = highend_pfn - max_low_pfn;
901 zones_size[ZONE_HIGHMEM] = npages;
902 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
904 free_area_init_node(0, zones_size, pfn_base, zholes_size);
908 void mmu_info(struct seq_file *m)
913 "nocache total\t: %ld\n"
914 "nocache used\t: %d\n",
918 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
921 void destroy_context(struct mm_struct *mm)
924 if(mm->context != NO_CONTEXT) {
926 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
928 spin_lock(&srmmu_context_spinlock);
929 free_context(mm->context);
930 spin_unlock(&srmmu_context_spinlock);
931 mm->context = NO_CONTEXT;
935 /* Init various srmmu chip types. */
936 static void __init srmmu_is_bad(void)
938 prom_printf("Could not determine SRMMU chip type.\n");
942 static void __init init_vac_layout(void)
949 unsigned long max_size = 0;
950 unsigned long min_line_size = 0x10000000;
953 nd = prom_getchild(prom_root_node);
954 while((nd = prom_getsibling(nd)) != 0) {
955 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
956 if(!strcmp(node_str, "cpu")) {
957 vac_line_size = prom_getint(nd, "cache-line-size");
958 if (vac_line_size == -1) {
959 prom_printf("can't determine cache-line-size, "
963 cache_lines = prom_getint(nd, "cache-nlines");
964 if (cache_lines == -1) {
965 prom_printf("can't determine cache-nlines, halting.\n");
969 vac_cache_size = cache_lines * vac_line_size;
971 if(vac_cache_size > max_size)
972 max_size = vac_cache_size;
973 if(vac_line_size < min_line_size)
974 min_line_size = vac_line_size;
975 //FIXME: cpus not contiguous!!
977 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
985 prom_printf("No CPU nodes found, halting.\n");
989 vac_cache_size = max_size;
990 vac_line_size = min_line_size;
992 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
993 (int)vac_cache_size, (int)vac_line_size);
996 static void __cpuinit poke_hypersparc(void)
998 volatile unsigned long clear;
999 unsigned long mreg = srmmu_get_mmureg();
1001 hyper_flush_unconditional_combined();
1003 mreg &= ~(HYPERSPARC_CWENABLE);
1004 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1005 mreg |= (HYPERSPARC_CMODE);
1007 srmmu_set_mmureg(mreg);
1009 #if 0 /* XXX I think this is bad news... -DaveM */
1010 hyper_clear_all_tags();
1013 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1014 hyper_flush_whole_icache();
1015 clear = srmmu_get_faddr();
1016 clear = srmmu_get_fstatus();
1019 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1020 .cache_all = hypersparc_flush_cache_all,
1021 .cache_mm = hypersparc_flush_cache_mm,
1022 .cache_page = hypersparc_flush_cache_page,
1023 .cache_range = hypersparc_flush_cache_range,
1024 .tlb_all = hypersparc_flush_tlb_all,
1025 .tlb_mm = hypersparc_flush_tlb_mm,
1026 .tlb_page = hypersparc_flush_tlb_page,
1027 .tlb_range = hypersparc_flush_tlb_range,
1028 .page_to_ram = hypersparc_flush_page_to_ram,
1029 .sig_insns = hypersparc_flush_sig_insns,
1030 .page_for_dma = hypersparc_flush_page_for_dma,
1033 static void __init init_hypersparc(void)
1035 srmmu_name = "ROSS HyperSparc";
1036 srmmu_modtype = HyperSparc;
1041 sparc32_cachetlb_ops = &hypersparc_ops;
1043 poke_srmmu = poke_hypersparc;
1045 hypersparc_setup_blockops();
1048 static void __cpuinit poke_swift(void)
1052 /* Clear any crap from the cache or else... */
1053 swift_flush_cache_all();
1055 /* Enable I & D caches */
1056 mreg = srmmu_get_mmureg();
1057 mreg |= (SWIFT_IE | SWIFT_DE);
1059 * The Swift branch folding logic is completely broken. At
1060 * trap time, if things are just right, if can mistakenly
1061 * think that a trap is coming from kernel mode when in fact
1062 * it is coming from user mode (it mis-executes the branch in
1063 * the trap code). So you see things like crashme completely
1064 * hosing your machine which is completely unacceptable. Turn
1065 * this shit off... nice job Fujitsu.
1067 mreg &= ~(SWIFT_BF);
1068 srmmu_set_mmureg(mreg);
1071 static const struct sparc32_cachetlb_ops swift_ops = {
1072 .cache_all = swift_flush_cache_all,
1073 .cache_mm = swift_flush_cache_mm,
1074 .cache_page = swift_flush_cache_page,
1075 .cache_range = swift_flush_cache_range,
1076 .tlb_all = swift_flush_tlb_all,
1077 .tlb_mm = swift_flush_tlb_mm,
1078 .tlb_page = swift_flush_tlb_page,
1079 .tlb_range = swift_flush_tlb_range,
1080 .page_to_ram = swift_flush_page_to_ram,
1081 .sig_insns = swift_flush_sig_insns,
1082 .page_for_dma = swift_flush_page_for_dma,
1085 #define SWIFT_MASKID_ADDR 0x10003018
1086 static void __init init_swift(void)
1088 unsigned long swift_rev;
1090 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1091 "srl %0, 0x18, %0\n\t" :
1093 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1094 srmmu_name = "Fujitsu Swift";
1100 srmmu_modtype = Swift_lots_o_bugs;
1101 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1103 * Gee george, I wonder why Sun is so hush hush about
1104 * this hardware bug... really braindamage stuff going
1105 * on here. However I think we can find a way to avoid
1106 * all of the workaround overhead under Linux. Basically,
1107 * any page fault can cause kernel pages to become user
1108 * accessible (the mmu gets confused and clears some of
1109 * the ACC bits in kernel ptes). Aha, sounds pretty
1110 * horrible eh? But wait, after extensive testing it appears
1111 * that if you use pgd_t level large kernel pte's (like the
1112 * 4MB pages on the Pentium) the bug does not get tripped
1113 * at all. This avoids almost all of the major overhead.
1114 * Welcome to a world where your vendor tells you to,
1115 * "apply this kernel patch" instead of "sorry for the
1116 * broken hardware, send it back and we'll give you
1117 * properly functioning parts"
1122 srmmu_modtype = Swift_bad_c;
1123 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1125 * You see Sun allude to this hardware bug but never
1126 * admit things directly, they'll say things like,
1127 * "the Swift chip cache problems" or similar.
1131 srmmu_modtype = Swift_ok;
1135 sparc32_cachetlb_ops = &swift_ops;
1136 flush_page_for_dma_global = 0;
1139 * Are you now convinced that the Swift is one of the
1140 * biggest VLSI abortions of all time? Bravo Fujitsu!
1141 * Fujitsu, the !#?!%$'d up processor people. I bet if
1142 * you examined the microcode of the Swift you'd find
1143 * XXX's all over the place.
1145 poke_srmmu = poke_swift;
1148 static void turbosparc_flush_cache_all(void)
1150 flush_user_windows();
1151 turbosparc_idflash_clear();
1154 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1157 flush_user_windows();
1158 turbosparc_idflash_clear();
1162 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1164 FLUSH_BEGIN(vma->vm_mm)
1165 flush_user_windows();
1166 turbosparc_idflash_clear();
1170 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1172 FLUSH_BEGIN(vma->vm_mm)
1173 flush_user_windows();
1174 if (vma->vm_flags & VM_EXEC)
1175 turbosparc_flush_icache();
1176 turbosparc_flush_dcache();
1180 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1181 static void turbosparc_flush_page_to_ram(unsigned long page)
1183 #ifdef TURBOSPARC_WRITEBACK
1184 volatile unsigned long clear;
1186 if (srmmu_hwprobe(page))
1187 turbosparc_flush_page_cache(page);
1188 clear = srmmu_get_fstatus();
1192 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1196 static void turbosparc_flush_page_for_dma(unsigned long page)
1198 turbosparc_flush_dcache();
1201 static void turbosparc_flush_tlb_all(void)
1203 srmmu_flush_whole_tlb();
1206 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1209 srmmu_flush_whole_tlb();
1213 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1215 FLUSH_BEGIN(vma->vm_mm)
1216 srmmu_flush_whole_tlb();
1220 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1222 FLUSH_BEGIN(vma->vm_mm)
1223 srmmu_flush_whole_tlb();
1228 static void __cpuinit poke_turbosparc(void)
1230 unsigned long mreg = srmmu_get_mmureg();
1231 unsigned long ccreg;
1233 /* Clear any crap from the cache or else... */
1234 turbosparc_flush_cache_all();
1235 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1236 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1237 srmmu_set_mmureg(mreg);
1239 ccreg = turbosparc_get_ccreg();
1241 #ifdef TURBOSPARC_WRITEBACK
1242 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1243 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1244 /* Write-back D-cache, emulate VLSI
1245 * abortion number three, not number one */
1247 /* For now let's play safe, optimize later */
1248 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1249 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1250 ccreg &= ~(TURBOSPARC_uS2);
1251 /* Emulate VLSI abortion number three, not number one */
1254 switch (ccreg & 7) {
1255 case 0: /* No SE cache */
1256 case 7: /* Test mode */
1259 ccreg |= (TURBOSPARC_SCENABLE);
1261 turbosparc_set_ccreg (ccreg);
1263 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1264 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1265 srmmu_set_mmureg(mreg);
1268 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1269 .cache_all = turbosparc_flush_cache_all,
1270 .cache_mm = turbosparc_flush_cache_mm,
1271 .cache_page = turbosparc_flush_cache_page,
1272 .cache_range = turbosparc_flush_cache_range,
1273 .tlb_all = turbosparc_flush_tlb_all,
1274 .tlb_mm = turbosparc_flush_tlb_mm,
1275 .tlb_page = turbosparc_flush_tlb_page,
1276 .tlb_range = turbosparc_flush_tlb_range,
1277 .page_to_ram = turbosparc_flush_page_to_ram,
1278 .sig_insns = turbosparc_flush_sig_insns,
1279 .page_for_dma = turbosparc_flush_page_for_dma,
1282 static void __init init_turbosparc(void)
1284 srmmu_name = "Fujitsu TurboSparc";
1285 srmmu_modtype = TurboSparc;
1286 sparc32_cachetlb_ops = &turbosparc_ops;
1287 poke_srmmu = poke_turbosparc;
1290 static void __cpuinit poke_tsunami(void)
1292 unsigned long mreg = srmmu_get_mmureg();
1294 tsunami_flush_icache();
1295 tsunami_flush_dcache();
1296 mreg &= ~TSUNAMI_ITD;
1297 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1298 srmmu_set_mmureg(mreg);
1301 static const struct sparc32_cachetlb_ops tsunami_ops = {
1302 .cache_all = tsunami_flush_cache_all,
1303 .cache_mm = tsunami_flush_cache_mm,
1304 .cache_page = tsunami_flush_cache_page,
1305 .cache_range = tsunami_flush_cache_range,
1306 .tlb_all = tsunami_flush_tlb_all,
1307 .tlb_mm = tsunami_flush_tlb_mm,
1308 .tlb_page = tsunami_flush_tlb_page,
1309 .tlb_range = tsunami_flush_tlb_range,
1310 .page_to_ram = tsunami_flush_page_to_ram,
1311 .sig_insns = tsunami_flush_sig_insns,
1312 .page_for_dma = tsunami_flush_page_for_dma,
1315 static void __init init_tsunami(void)
1318 * Tsunami's pretty sane, Sun and TI actually got it
1319 * somewhat right this time. Fujitsu should have
1320 * taken some lessons from them.
1323 srmmu_name = "TI Tsunami";
1324 srmmu_modtype = Tsunami;
1325 sparc32_cachetlb_ops = &tsunami_ops;
1326 poke_srmmu = poke_tsunami;
1328 tsunami_setup_blockops();
1331 static void __cpuinit poke_viking(void)
1333 unsigned long mreg = srmmu_get_mmureg();
1334 static int smp_catch;
1336 if (viking_mxcc_present) {
1337 unsigned long mxcc_control = mxcc_get_creg();
1339 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1340 mxcc_control &= ~(MXCC_CTL_RRC);
1341 mxcc_set_creg(mxcc_control);
1344 * We don't need memory parity checks.
1345 * XXX This is a mess, have to dig out later. ecd.
1346 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1349 /* We do cache ptables on MXCC. */
1350 mreg |= VIKING_TCENABLE;
1352 unsigned long bpreg;
1354 mreg &= ~(VIKING_TCENABLE);
1356 /* Must disable mixed-cmd mode here for other cpu's. */
1357 bpreg = viking_get_bpreg();
1358 bpreg &= ~(VIKING_ACTION_MIX);
1359 viking_set_bpreg(bpreg);
1361 /* Just in case PROM does something funny. */
1366 mreg |= VIKING_SPENABLE;
1367 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1368 mreg |= VIKING_SBENABLE;
1369 mreg &= ~(VIKING_ACENABLE);
1370 srmmu_set_mmureg(mreg);
1373 static struct sparc32_cachetlb_ops viking_ops = {
1374 .cache_all = viking_flush_cache_all,
1375 .cache_mm = viking_flush_cache_mm,
1376 .cache_page = viking_flush_cache_page,
1377 .cache_range = viking_flush_cache_range,
1378 .tlb_all = viking_flush_tlb_all,
1379 .tlb_mm = viking_flush_tlb_mm,
1380 .tlb_page = viking_flush_tlb_page,
1381 .tlb_range = viking_flush_tlb_range,
1382 .page_to_ram = viking_flush_page_to_ram,
1383 .sig_insns = viking_flush_sig_insns,
1384 .page_for_dma = viking_flush_page_for_dma,
1388 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1389 * perform the local TLB flush and all the other cpus will see it.
1390 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1391 * that requires that we add some synchronization to these flushes.
1393 * The bug is that the fifo which keeps track of all the pending TLB
1394 * broadcasts in the system is an entry or two too small, so if we
1395 * have too many going at once we'll overflow that fifo and lose a TLB
1396 * flush resulting in corruption.
1398 * Our workaround is to take a global spinlock around the TLB flushes,
1399 * which guarentees we won't ever have too many pending. It's a big
1400 * hammer, but a semaphore like system to make sure we only have N TLB
1401 * flushes going at once will require SMP locking anyways so there's
1402 * no real value in trying any harder than this.
1404 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1405 .cache_all = viking_flush_cache_all,
1406 .cache_mm = viking_flush_cache_mm,
1407 .cache_page = viking_flush_cache_page,
1408 .cache_range = viking_flush_cache_range,
1409 .tlb_all = sun4dsmp_flush_tlb_all,
1410 .tlb_mm = sun4dsmp_flush_tlb_mm,
1411 .tlb_page = sun4dsmp_flush_tlb_page,
1412 .tlb_range = sun4dsmp_flush_tlb_range,
1413 .page_to_ram = viking_flush_page_to_ram,
1414 .sig_insns = viking_flush_sig_insns,
1415 .page_for_dma = viking_flush_page_for_dma,
1419 static void __init init_viking(void)
1421 unsigned long mreg = srmmu_get_mmureg();
1423 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1424 if(mreg & VIKING_MMODE) {
1425 srmmu_name = "TI Viking";
1426 viking_mxcc_present = 0;
1430 * We need this to make sure old viking takes no hits
1431 * on it's cache for dma snoops to workaround the
1432 * "load from non-cacheable memory" interrupt bug.
1433 * This is only necessary because of the new way in
1434 * which we use the IOMMU.
1436 viking_ops.page_for_dma = viking_flush_page;
1438 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1440 flush_page_for_dma_global = 0;
1442 srmmu_name = "TI Viking/MXCC";
1443 viking_mxcc_present = 1;
1444 srmmu_cache_pagetables = 1;
1447 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1450 if (sparc_cpu_model == sun4d)
1451 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1452 &viking_sun4d_smp_ops;
1455 poke_srmmu = poke_viking;
1458 /* Probe for the srmmu chip version. */
1459 static void __init get_srmmu_type(void)
1461 unsigned long mreg, psr;
1462 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1464 srmmu_modtype = SRMMU_INVAL_MOD;
1467 mreg = srmmu_get_mmureg(); psr = get_psr();
1468 mod_typ = (mreg & 0xf0000000) >> 28;
1469 mod_rev = (mreg & 0x0f000000) >> 24;
1470 psr_typ = (psr >> 28) & 0xf;
1471 psr_vers = (psr >> 24) & 0xf;
1473 /* First, check for sparc-leon. */
1474 if (sparc_cpu_model == sparc_leon) {
1479 /* Second, check for HyperSparc or Cypress. */
1483 /* UP or MP Hypersparc */
1495 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1503 * Now Fujitsu TurboSparc. It might happen that it is
1504 * in Swift emulation mode, so we will check later...
1506 if (psr_typ == 0 && psr_vers == 5) {
1511 /* Next check for Fujitsu Swift. */
1512 if(psr_typ == 0 && psr_vers == 4) {
1516 /* Look if it is not a TurboSparc emulating Swift... */
1517 cpunode = prom_getchild(prom_root_node);
1518 while((cpunode = prom_getsibling(cpunode)) != 0) {
1519 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1520 if(!strcmp(node_str, "cpu")) {
1521 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1522 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1534 /* Now the Viking family of srmmu. */
1537 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1542 /* Finally the Tsunami. */
1543 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1553 /* Local cross-calls. */
1554 static void smp_flush_page_for_dma(unsigned long page)
1556 xc1((smpfunc_t) local_ops->page_for_dma, page);
1557 local_ops->page_for_dma(page);
1560 static void smp_flush_cache_all(void)
1562 xc0((smpfunc_t) local_ops->cache_all);
1563 local_ops->cache_all();
1566 static void smp_flush_tlb_all(void)
1568 xc0((smpfunc_t) local_ops->tlb_all);
1569 local_ops->tlb_all();
1572 static void smp_flush_cache_mm(struct mm_struct *mm)
1574 if (mm->context != NO_CONTEXT) {
1576 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1577 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1578 if (!cpumask_empty(&cpu_mask))
1579 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1580 local_ops->cache_mm(mm);
1584 static void smp_flush_tlb_mm(struct mm_struct *mm)
1586 if (mm->context != NO_CONTEXT) {
1588 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1589 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1590 if (!cpumask_empty(&cpu_mask)) {
1591 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1592 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1593 cpumask_copy(mm_cpumask(mm),
1594 cpumask_of(smp_processor_id()));
1596 local_ops->tlb_mm(mm);
1600 static void smp_flush_cache_range(struct vm_area_struct *vma,
1601 unsigned long start,
1604 struct mm_struct *mm = vma->vm_mm;
1606 if (mm->context != NO_CONTEXT) {
1608 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1609 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1610 if (!cpumask_empty(&cpu_mask))
1611 xc3((smpfunc_t) local_ops->cache_range,
1612 (unsigned long) vma, start, end);
1613 local_ops->cache_range(vma, start, end);
1617 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1618 unsigned long start,
1621 struct mm_struct *mm = vma->vm_mm;
1623 if (mm->context != NO_CONTEXT) {
1625 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1626 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1627 if (!cpumask_empty(&cpu_mask))
1628 xc3((smpfunc_t) local_ops->tlb_range,
1629 (unsigned long) vma, start, end);
1630 local_ops->tlb_range(vma, start, end);
1634 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1636 struct mm_struct *mm = vma->vm_mm;
1638 if (mm->context != NO_CONTEXT) {
1640 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1641 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1642 if (!cpumask_empty(&cpu_mask))
1643 xc2((smpfunc_t) local_ops->cache_page,
1644 (unsigned long) vma, page);
1645 local_ops->cache_page(vma, page);
1649 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1651 struct mm_struct *mm = vma->vm_mm;
1653 if (mm->context != NO_CONTEXT) {
1655 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1656 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1657 if (!cpumask_empty(&cpu_mask))
1658 xc2((smpfunc_t) local_ops->tlb_page,
1659 (unsigned long) vma, page);
1660 local_ops->tlb_page(vma, page);
1664 static void smp_flush_page_to_ram(unsigned long page)
1666 /* Current theory is that those who call this are the one's
1667 * who have just dirtied their cache with the pages contents
1668 * in kernel space, therefore we only run this on local cpu.
1670 * XXX This experiment failed, research further... -DaveM
1673 xc1((smpfunc_t) local_ops->page_to_ram, page);
1675 local_ops->page_to_ram(page);
1678 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1681 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1682 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1683 if (!cpumask_empty(&cpu_mask))
1684 xc2((smpfunc_t) local_ops->sig_insns,
1685 (unsigned long) mm, insn_addr);
1686 local_ops->sig_insns(mm, insn_addr);
1689 static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1690 .cache_all = smp_flush_cache_all,
1691 .cache_mm = smp_flush_cache_mm,
1692 .cache_page = smp_flush_cache_page,
1693 .cache_range = smp_flush_cache_range,
1694 .tlb_all = smp_flush_tlb_all,
1695 .tlb_mm = smp_flush_tlb_mm,
1696 .tlb_page = smp_flush_tlb_page,
1697 .tlb_range = smp_flush_tlb_range,
1698 .page_to_ram = smp_flush_page_to_ram,
1699 .sig_insns = smp_flush_sig_insns,
1700 .page_for_dma = smp_flush_page_for_dma,
1704 /* Load up routines and constants for sun4m and sun4d mmu */
1705 void __init load_mmu(void)
1707 extern void ld_mmu_iommu(void);
1708 extern void ld_mmu_iounit(void);
1714 /* El switcheroo... */
1715 local_ops = sparc32_cachetlb_ops;
1717 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1718 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1719 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1720 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1721 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1724 if (poke_srmmu == poke_viking) {
1725 /* Avoid unnecessary cross calls. */
1726 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1727 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1728 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1729 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1731 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1732 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1733 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1736 /* It really is const after this point. */
1737 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1741 if (sparc_cpu_model == sun4d)
1746 if (sparc_cpu_model == sun4d)
1748 else if (sparc_cpu_model == sparc_leon)