4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
13 * Chris Zankel <chris@zankel.net>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
24 static inline void __flush_itlb_all (void)
28 for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 int e = w + (i << PAGE_SHIFT);
31 invalidate_itlb_entry_no_isync(e);
34 asm volatile ("isync\n");
37 static inline void __flush_dtlb_all (void)
41 for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 int e = w + (i << PAGE_SHIFT);
44 invalidate_dtlb_entry_no_isync(e);
47 asm volatile ("isync\n");
51 void flush_tlb_all (void)
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
59 * wie invalidate the context, thus, when that user mapping is swapped in,
60 * a new context will be assigned to it.
63 void flush_tlb_mm(struct mm_struct *mm)
65 if (mm == current->active_mm) {
67 local_irq_save(flags);
68 __get_new_mmu_context(mm);
69 __load_mmu_context(mm);
70 local_irq_restore(flags);
76 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
77 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
78 #if _ITLB_ENTRIES > _DTLB_ENTRIES
79 # define _TLB_ENTRIES _ITLB_ENTRIES
81 # define _TLB_ENTRIES _DTLB_ENTRIES
84 void flush_tlb_range (struct vm_area_struct *vma,
85 unsigned long start, unsigned long end)
87 struct mm_struct *mm = vma->vm_mm;
90 if (mm->context == NO_CONTEXT)
94 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
95 (unsigned long)mm->context, start, end);
97 local_irq_save(flags);
99 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
100 int oldpid = get_rasid_register();
101 set_rasid_register (ASID_INSERT(mm->context));
103 if (vma->vm_flags & VM_EXEC)
105 invalidate_itlb_mapping(start);
106 invalidate_dtlb_mapping(start);
111 invalidate_dtlb_mapping(start);
115 set_rasid_register(oldpid);
119 local_irq_restore(flags);
122 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
124 struct mm_struct* mm = vma->vm_mm;
128 if(mm->context == NO_CONTEXT)
131 local_irq_save(flags);
133 oldpid = get_rasid_register();
134 set_rasid_register(ASID_INSERT(mm->context));
136 if (vma->vm_flags & VM_EXEC)
137 invalidate_itlb_mapping(page);
138 invalidate_dtlb_mapping(page);
140 set_rasid_register(oldpid);
142 local_irq_restore(flags);
145 #ifdef CONFIG_DEBUG_TLB_SANITY
147 static unsigned get_pte_for_vaddr(unsigned vaddr)
149 struct task_struct *task = get_current();
150 struct mm_struct *mm = task->mm;
156 mm = task->active_mm;
157 pgd = pgd_offset(mm, vaddr);
158 if (pgd_none_or_clear_bad(pgd))
160 pmd = pmd_offset(pgd, vaddr);
161 if (pmd_none_or_clear_bad(pmd))
163 pte = pte_offset_map(pmd, vaddr);
166 return pte_val(*pte);
174 static void tlb_insane(void)
179 static void tlb_suspicious(void)
185 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
186 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
188 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
189 * marked as non-present. Non-present PTE and the page with non-zero refcount
190 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
191 * means that the page was freed prematurely. Non-zero mapcount is unusual,
192 * but does not necessary means an error, thus marked as suspicious.
194 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
196 unsigned tlbidx = w | (e << PAGE_SHIFT);
198 read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
199 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
200 unsigned pte = get_pte_for_vaddr(vpn);
201 unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
202 unsigned tlb_asid = r0 & ASID_MASK;
203 bool kernel = tlb_asid == 1;
206 if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
207 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
208 dtlb ? 'D' : 'I', w, e, vpn,
209 kernel ? "kernel" : "user");
213 if (tlb_asid == mm_asid) {
214 unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
215 read_itlb_translation(tlbidx);
216 if ((pte ^ r1) & PAGE_MASK) {
217 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
218 dtlb ? 'D' : 'I', w, e, r0, r1, pte);
219 if (pte == 0 || !pte_present(__pte(pte))) {
220 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
221 pr_err("page refcount: %d, mapcount: %d\n",
226 else if (page_mapped(p))
227 rc |= TLB_SUSPICIOUS;
236 void check_tlb_sanity(void)
242 local_irq_save(flags);
243 for (w = 0; w < DTLB_ARF_WAYS; ++w)
244 for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
245 bug |= check_tlb_entry(w, e, true);
246 for (w = 0; w < ITLB_ARF_WAYS; ++w)
247 for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
248 bug |= check_tlb_entry(w, e, false);
249 if (bug & TLB_INSANE)
251 if (bug & TLB_SUSPICIOUS)
253 local_irq_restore(flags);
256 #endif /* CONFIG_DEBUG_TLB_SANITY */