2 * Based on arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/module.h>
22 #include <linux/signal.h>
24 #include <linux/hardirq.h>
25 #include <linux/init.h>
26 #include <linux/kprobes.h>
27 #include <linux/uaccess.h>
28 #include <linux/page-flags.h>
29 #include <linux/sched.h>
30 #include <linux/highmem.h>
31 #include <linux/perf_event.h>
33 #include <asm/cpufeature.h>
34 #include <asm/exception.h>
35 #include <asm/debug-monitors.h>
37 #include <asm/sysreg.h>
38 #include <asm/system_misc.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
42 static const char *fault_name(unsigned int esr);
45 * Dump out the page tables associated with 'addr' in mm 'mm'.
47 void show_pte(struct mm_struct *mm, unsigned long addr)
54 pr_alert("pgd = %p\n", mm->pgd);
55 pgd = pgd_offset(mm, addr);
56 pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
63 if (pgd_none(*pgd) || pgd_bad(*pgd))
66 pud = pud_offset(pgd, addr);
67 printk(", *pud=%016llx", pud_val(*pud));
68 if (pud_none(*pud) || pud_bad(*pud))
71 pmd = pmd_offset(pud, addr);
72 printk(", *pmd=%016llx", pmd_val(*pmd));
73 if (pmd_none(*pmd) || pmd_bad(*pmd))
76 pte = pte_offset_map(pmd, addr);
77 printk(", *pte=%016llx", pte_val(*pte));
84 #ifdef CONFIG_ARM64_HW_AFDBM
86 * This function sets the access flags (dirty, accessed), as well as write
87 * permission, and only to a more permissive setting.
89 * It needs to cope with hardware update of the accessed/dirty state by other
90 * agents in the system and can safely skip the __sync_icache_dcache() call as,
91 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
93 * Returns whether or not the PTE actually changed.
95 int ptep_set_access_flags(struct vm_area_struct *vma,
96 unsigned long address, pte_t *ptep,
97 pte_t entry, int dirty)
102 if (pte_same(*ptep, entry))
105 /* only preserve the access flags and write permission */
106 pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
109 * PTE_RDONLY is cleared by default in the asm below, so set it in
110 * back if necessary (read-only or clean PTE).
112 if (!pte_write(entry) || !pte_sw_dirty(entry))
113 pte_val(entry) |= PTE_RDONLY;
116 * Setting the flags must be done atomically to avoid racing with the
117 * hardware update of the access/dirty state.
119 asm volatile("// ptep_set_access_flags\n"
120 " prfm pstl1strm, %2\n"
122 " and %0, %0, %3 // clear PTE_RDONLY\n"
123 " orr %0, %0, %4 // set flags\n"
124 " stxr %w1, %0, %2\n"
126 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
127 : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
129 flush_tlb_fix_spurious_fault(vma, address);
135 * The kernel tried to access some page that wasn't present.
137 static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
138 unsigned int esr, struct pt_regs *regs)
141 * Are we prepared to handle this kernel fault?
143 if (fixup_exception(regs))
147 * No handler, we'll have to terminate things with extreme prejudice.
150 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
151 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
152 "paging request", addr);
155 die("Oops", regs, esr);
161 * Something tried to access memory that isn't in our memory map. User mode
162 * accesses just cause a SIGSEGV
164 static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
165 unsigned int esr, unsigned int sig, int code,
166 struct pt_regs *regs)
170 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
171 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
172 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
174 show_pte(tsk->mm, addr);
178 tsk->thread.fault_address = addr;
179 tsk->thread.fault_code = esr;
183 si.si_addr = (void __user *)addr;
184 force_sig_info(sig, &si, tsk);
187 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
189 struct task_struct *tsk = current;
190 struct mm_struct *mm = tsk->active_mm;
193 * If we are in kernel mode at this point, we have no context to
194 * handle this fault with.
197 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
199 __do_kernel_fault(mm, addr, esr, regs);
202 #define VM_FAULT_BADMAP 0x010000
203 #define VM_FAULT_BADACCESS 0x020000
205 #define ESR_LNX_EXEC (1 << 24)
207 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
208 unsigned int mm_flags, unsigned long vm_flags,
209 struct task_struct *tsk)
211 struct vm_area_struct *vma;
214 vma = find_vma(mm, addr);
215 fault = VM_FAULT_BADMAP;
218 if (unlikely(vma->vm_start > addr))
222 * Ok, we have a good vm_area for this memory access, so we can handle
227 * Check that the permissions on the VMA allow for the fault which
228 * occurred. If we encountered a write or exec fault, we must have
229 * appropriate permissions, otherwise we allow any permission.
231 if (!(vma->vm_flags & vm_flags)) {
232 fault = VM_FAULT_BADACCESS;
236 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
239 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
245 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
246 struct pt_regs *regs)
248 struct task_struct *tsk;
249 struct mm_struct *mm;
250 int fault, sig, code;
251 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
252 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
257 /* Enable interrupts if they were enabled in the parent context. */
258 if (interrupts_enabled(regs))
262 * If we're in an interrupt or have no user context, we must not take
265 if (faulthandler_disabled() || !mm)
269 mm_flags |= FAULT_FLAG_USER;
271 if (esr & ESR_LNX_EXEC) {
273 } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
275 mm_flags |= FAULT_FLAG_WRITE;
279 * PAN bit set implies the fault happened in kernel space, but not
280 * in the arch's user access functions.
282 if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
286 * As per x86, we may deadlock here. However, since the kernel only
287 * validly references user space from well defined areas of the code,
288 * we can bug out early if this is from code which shouldn't.
290 if (!down_read_trylock(&mm->mmap_sem)) {
291 if (!user_mode(regs) && !search_exception_tables(regs->pc))
294 down_read(&mm->mmap_sem);
297 * The above down_read_trylock() might have succeeded in which
298 * case, we'll have missed the might_sleep() from down_read().
301 #ifdef CONFIG_DEBUG_VM
302 if (!user_mode(regs) && !search_exception_tables(regs->pc))
307 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
310 * If we need to retry but a fatal signal is pending, handle the
311 * signal first. We do not need to release the mmap_sem because it
312 * would already be released in __lock_page_or_retry in mm/filemap.c.
314 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
318 * Major/minor page fault accounting is only done on the initial
319 * attempt. If we go through a retry, it is extremely likely that the
320 * page will be found in page cache at that point.
323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
324 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
325 if (fault & VM_FAULT_MAJOR) {
327 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
334 if (fault & VM_FAULT_RETRY) {
336 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
339 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
340 mm_flags |= FAULT_FLAG_TRIED;
345 up_read(&mm->mmap_sem);
348 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
350 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
351 VM_FAULT_BADACCESS))))
355 * If we are in kernel mode at this point, we have no context to
356 * handle this fault with.
358 if (!user_mode(regs))
361 if (fault & VM_FAULT_OOM) {
363 * We ran out of memory, call the OOM killer, and return to
364 * userspace (which will retry the fault, or kill us if we got
367 pagefault_out_of_memory();
371 if (fault & VM_FAULT_SIGBUS) {
373 * We had some memory, but were unable to successfully fix up
380 * Something tried to access memory that isn't in our memory
384 code = fault == VM_FAULT_BADACCESS ?
385 SEGV_ACCERR : SEGV_MAPERR;
388 __do_user_fault(tsk, addr, esr, sig, code, regs);
392 __do_kernel_fault(mm, addr, esr, regs);
397 * First Level Translation Fault Handler
399 * We enter here because the first level page table doesn't contain a valid
400 * entry for the address.
402 * If the address is in kernel space (>= TASK_SIZE), then we are probably
403 * faulting in the vmalloc() area.
405 * If the init_task's first level page tables contains the relevant entry, we
406 * copy the it to this task. If not, we send the process a signal, fixup the
407 * exception, or oops the kernel.
409 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
410 * or a critical region, and should only copy the information from the master
411 * page table, nothing more.
413 static int __kprobes do_translation_fault(unsigned long addr,
415 struct pt_regs *regs)
417 if (addr < TASK_SIZE)
418 return do_page_fault(addr, esr, regs);
420 do_bad_area(addr, esr, regs);
425 * This abort handler always returns "fault".
427 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
432 static struct fault_info {
433 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
438 { do_bad, SIGBUS, 0, "ttbr address size fault" },
439 { do_bad, SIGBUS, 0, "level 1 address size fault" },
440 { do_bad, SIGBUS, 0, "level 2 address size fault" },
441 { do_bad, SIGBUS, 0, "level 3 address size fault" },
442 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
443 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
444 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
445 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
446 { do_bad, SIGBUS, 0, "unknown 8" },
447 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
448 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
449 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
450 { do_bad, SIGBUS, 0, "unknown 12" },
451 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
452 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
453 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
454 { do_bad, SIGBUS, 0, "synchronous external abort" },
455 { do_bad, SIGBUS, 0, "unknown 17" },
456 { do_bad, SIGBUS, 0, "unknown 18" },
457 { do_bad, SIGBUS, 0, "unknown 19" },
458 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
459 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
460 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
461 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
462 { do_bad, SIGBUS, 0, "synchronous parity error" },
463 { do_bad, SIGBUS, 0, "unknown 25" },
464 { do_bad, SIGBUS, 0, "unknown 26" },
465 { do_bad, SIGBUS, 0, "unknown 27" },
466 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
467 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
468 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
469 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
470 { do_bad, SIGBUS, 0, "unknown 32" },
471 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
472 { do_bad, SIGBUS, 0, "unknown 34" },
473 { do_bad, SIGBUS, 0, "unknown 35" },
474 { do_bad, SIGBUS, 0, "unknown 36" },
475 { do_bad, SIGBUS, 0, "unknown 37" },
476 { do_bad, SIGBUS, 0, "unknown 38" },
477 { do_bad, SIGBUS, 0, "unknown 39" },
478 { do_bad, SIGBUS, 0, "unknown 40" },
479 { do_bad, SIGBUS, 0, "unknown 41" },
480 { do_bad, SIGBUS, 0, "unknown 42" },
481 { do_bad, SIGBUS, 0, "unknown 43" },
482 { do_bad, SIGBUS, 0, "unknown 44" },
483 { do_bad, SIGBUS, 0, "unknown 45" },
484 { do_bad, SIGBUS, 0, "unknown 46" },
485 { do_bad, SIGBUS, 0, "unknown 47" },
486 { do_bad, SIGBUS, 0, "TLB conflict abort" },
487 { do_bad, SIGBUS, 0, "unknown 49" },
488 { do_bad, SIGBUS, 0, "unknown 50" },
489 { do_bad, SIGBUS, 0, "unknown 51" },
490 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
491 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
492 { do_bad, SIGBUS, 0, "unknown 54" },
493 { do_bad, SIGBUS, 0, "unknown 55" },
494 { do_bad, SIGBUS, 0, "unknown 56" },
495 { do_bad, SIGBUS, 0, "unknown 57" },
496 { do_bad, SIGBUS, 0, "unknown 58" },
497 { do_bad, SIGBUS, 0, "unknown 59" },
498 { do_bad, SIGBUS, 0, "unknown 60" },
499 { do_bad, SIGBUS, 0, "section domain fault" },
500 { do_bad, SIGBUS, 0, "page domain fault" },
501 { do_bad, SIGBUS, 0, "unknown 63" },
504 static const char *fault_name(unsigned int esr)
506 const struct fault_info *inf = fault_info + (esr & 63);
511 * Dispatch a data abort to the relevant handler.
513 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
514 struct pt_regs *regs)
516 const struct fault_info *inf = fault_info + (esr & 63);
519 if (!inf->fn(addr, esr, regs))
522 pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
523 inf->name, esr, addr);
525 info.si_signo = inf->sig;
527 info.si_code = inf->code;
528 info.si_addr = (void __user *)addr;
529 arm64_notify_die("", regs, &info, esr);
533 * Handle stack alignment exceptions.
535 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
537 struct pt_regs *regs)
540 struct task_struct *tsk = current;
542 if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
543 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
544 tsk->comm, task_pid_nr(tsk),
545 esr_get_class_string(esr), (void *)regs->pc,
548 info.si_signo = SIGBUS;
550 info.si_code = BUS_ADRALN;
551 info.si_addr = (void __user *)addr;
552 arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
555 int __init early_brk64(unsigned long addr, unsigned int esr,
556 struct pt_regs *regs);
559 * __refdata because early_brk64 is __init, but the reference to it is
560 * clobbered at arch_initcall time.
561 * See traps.c and debug-monitors.c:debug_traps_init().
563 static struct fault_info __refdata debug_fault_info[] = {
564 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
565 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
566 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
567 { do_bad, SIGBUS, 0, "unknown 3" },
568 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
569 { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
570 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
571 { do_bad, SIGBUS, 0, "unknown 7" },
574 void __init hook_debug_fault_code(int nr,
575 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
576 int sig, int code, const char *name)
578 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
580 debug_fault_info[nr].fn = fn;
581 debug_fault_info[nr].sig = sig;
582 debug_fault_info[nr].code = code;
583 debug_fault_info[nr].name = name;
586 asmlinkage int __exception do_debug_exception(unsigned long addr,
588 struct pt_regs *regs)
590 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
593 if (!inf->fn(addr, esr, regs))
596 pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
597 inf->name, esr, addr);
599 info.si_signo = inf->sig;
601 info.si_code = inf->code;
602 info.si_addr = (void __user *)addr;
603 arm64_notify_die("", regs, &info, 0);
608 #ifdef CONFIG_ARM64_PAN
609 void cpu_enable_pan(void *__unused)
611 config_sctlr_el1(SCTLR_EL1_SPAN, 0);
613 #endif /* CONFIG_ARM64_PAN */