2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/opcodes.h>
36 #define AARCH64_INSN_SF_BIT BIT(31)
37 #define AARCH64_INSN_N_BIT BIT(22)
39 static int aarch64_insn_encoding_class[] = {
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_UNKNOWN,
44 AARCH64_INSN_CLS_LDST,
45 AARCH64_INSN_CLS_DP_REG,
46 AARCH64_INSN_CLS_LDST,
47 AARCH64_INSN_CLS_DP_FPSIMD,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_DP_IMM,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_BR_SYS,
52 AARCH64_INSN_CLS_LDST,
53 AARCH64_INSN_CLS_DP_REG,
54 AARCH64_INSN_CLS_LDST,
55 AARCH64_INSN_CLS_DP_FPSIMD,
58 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
60 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
63 /* NOP is an alias of HINT */
64 bool __kprobes aarch64_insn_is_nop(u32 insn)
66 if (!aarch64_insn_is_hint(insn))
69 switch (insn & 0xFE0) {
70 case AARCH64_INSN_HINT_YIELD:
71 case AARCH64_INSN_HINT_WFE:
72 case AARCH64_INSN_HINT_WFI:
73 case AARCH64_INSN_HINT_SEV:
74 case AARCH64_INSN_HINT_SEVL:
81 bool aarch64_insn_is_branch_imm(u32 insn)
83 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
84 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
85 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
86 aarch64_insn_is_bcond(insn));
89 static DEFINE_RAW_SPINLOCK(patch_lock);
91 static void __kprobes *patch_map(void *addr, int fixmap)
93 unsigned long uintaddr = (uintptr_t) addr;
94 bool module = !core_kernel_text(uintaddr);
97 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
98 page = vmalloc_to_page(addr);
99 else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
100 page = virt_to_page(addr);
105 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
106 (uintaddr & ~PAGE_MASK));
109 static void __kprobes patch_unmap(int fixmap)
111 clear_fixmap(fixmap);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
122 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
124 *insnp = le32_to_cpu(val);
129 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
132 unsigned long flags = 0;
135 raw_spin_lock_irqsave(&patch_lock, flags);
136 waddr = patch_map(addr, FIX_TEXT_POKE0);
138 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140 patch_unmap(FIX_TEXT_POKE0);
141 raw_spin_unlock_irqrestore(&patch_lock, flags);
146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 insn = cpu_to_le32(insn);
149 return __aarch64_insn_write(addr, insn);
152 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
154 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
157 return aarch64_insn_is_b(insn) ||
158 aarch64_insn_is_bl(insn) ||
159 aarch64_insn_is_svc(insn) ||
160 aarch64_insn_is_hvc(insn) ||
161 aarch64_insn_is_smc(insn) ||
162 aarch64_insn_is_brk(insn) ||
163 aarch64_insn_is_nop(insn);
166 bool __kprobes aarch64_insn_uses_literal(u32 insn)
168 /* ldr/ldrsw (literal), prfm */
170 return aarch64_insn_is_ldr_lit(insn) ||
171 aarch64_insn_is_ldrsw_lit(insn) ||
172 aarch64_insn_is_adr_adrp(insn) ||
173 aarch64_insn_is_prfm_lit(insn);
176 bool __kprobes aarch64_insn_is_branch(u32 insn)
178 /* b, bl, cb*, tb*, b.cond, br, blr */
180 return aarch64_insn_is_b(insn) ||
181 aarch64_insn_is_bl(insn) ||
182 aarch64_insn_is_cbz(insn) ||
183 aarch64_insn_is_cbnz(insn) ||
184 aarch64_insn_is_tbz(insn) ||
185 aarch64_insn_is_tbnz(insn) ||
186 aarch64_insn_is_ret(insn) ||
187 aarch64_insn_is_br(insn) ||
188 aarch64_insn_is_blr(insn) ||
189 aarch64_insn_is_bcond(insn);
193 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
194 * Section B2.6.5 "Concurrent modification and execution of instructions":
195 * Concurrent modification and execution of instructions can lead to the
196 * resulting instruction performing any behavior that can be achieved by
197 * executing any sequence of instructions that can be executed from the
198 * same Exception level, except where the instruction before modification
199 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
200 * or SMC instruction.
202 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
204 return __aarch64_insn_hotpatch_safe(old_insn) &&
205 __aarch64_insn_hotpatch_safe(new_insn);
208 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
213 /* A64 instructions must be word aligned */
214 if ((uintptr_t)tp & 0x3)
217 ret = aarch64_insn_write(tp, insn);
219 flush_icache_range((uintptr_t)tp,
220 (uintptr_t)tp + AARCH64_INSN_SIZE);
225 struct aarch64_insn_patch {
232 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
235 struct aarch64_insn_patch *pp = arg;
237 /* The first CPU becomes master */
238 if (atomic_inc_return(&pp->cpu_count) == 1) {
239 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
240 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
243 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
244 * which ends with "dsb; isb" pair guaranteeing global
247 /* Notify other processors with an additional increment. */
248 atomic_inc(&pp->cpu_count);
250 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
258 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
260 struct aarch64_insn_patch patch = {
264 .cpu_count = ATOMIC_INIT(0),
270 return stop_machine(aarch64_insn_patch_text_cb, &patch,
274 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
279 /* Unsafe to patch multiple instructions without synchronizaiton */
281 ret = aarch64_insn_read(addrs[0], &insn);
285 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
287 * ARMv8 architecture doesn't guarantee all CPUs see
288 * the new instruction after returning from function
289 * aarch64_insn_patch_text_nosync(). So send IPIs to
290 * all other CPUs to achieve instruction
293 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
294 kick_all_cpus_sync();
299 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
302 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
303 u32 *maskp, int *shiftp)
309 case AARCH64_INSN_IMM_26:
313 case AARCH64_INSN_IMM_19:
317 case AARCH64_INSN_IMM_16:
321 case AARCH64_INSN_IMM_14:
325 case AARCH64_INSN_IMM_12:
329 case AARCH64_INSN_IMM_9:
333 case AARCH64_INSN_IMM_7:
337 case AARCH64_INSN_IMM_6:
338 case AARCH64_INSN_IMM_S:
342 case AARCH64_INSN_IMM_R:
356 #define ADR_IMM_HILOSPLIT 2
357 #define ADR_IMM_SIZE SZ_2M
358 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT 29
361 #define ADR_IMM_HISHIFT 5
363 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
365 u32 immlo, immhi, mask;
369 case AARCH64_INSN_IMM_ADR:
371 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
372 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
373 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
374 mask = ADR_IMM_SIZE - 1;
377 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
378 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
384 return (insn >> shift) & mask;
387 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
390 u32 immlo, immhi, mask;
394 case AARCH64_INSN_IMM_ADR:
396 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
397 imm >>= ADR_IMM_HILOSPLIT;
398 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
400 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
401 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
404 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
405 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
411 /* Update the immediate field. */
412 insn &= ~(mask << shift);
413 insn |= (imm & mask) << shift;
418 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
420 enum aarch64_insn_register reg)
424 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
425 pr_err("%s: unknown register encoding %d\n", __func__, reg);
430 case AARCH64_INSN_REGTYPE_RT:
431 case AARCH64_INSN_REGTYPE_RD:
434 case AARCH64_INSN_REGTYPE_RN:
437 case AARCH64_INSN_REGTYPE_RT2:
438 case AARCH64_INSN_REGTYPE_RA:
441 case AARCH64_INSN_REGTYPE_RM:
445 pr_err("%s: unknown register type encoding %d\n", __func__,
450 insn &= ~(GENMASK(4, 0) << shift);
451 insn |= reg << shift;
456 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
462 case AARCH64_INSN_SIZE_8:
465 case AARCH64_INSN_SIZE_16:
468 case AARCH64_INSN_SIZE_32:
471 case AARCH64_INSN_SIZE_64:
475 pr_err("%s: unknown size encoding %d\n", __func__, type);
479 insn &= ~GENMASK(31, 30);
485 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
491 * PC: A 64-bit Program Counter holding the address of the current
492 * instruction. A64 instructions must be word-aligned.
494 BUG_ON((pc & 0x3) || (addr & 0x3));
496 offset = ((long)addr - (long)pc);
497 BUG_ON(offset < -range || offset >= range);
502 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
503 enum aarch64_insn_branch_type type)
509 * B/BL support [-128M, 128M) offset
510 * ARM64 virtual address arrangement guarantees all kernel and module
511 * texts are within +/-128M.
513 offset = branch_imm_common(pc, addr, SZ_128M);
516 case AARCH64_INSN_BRANCH_LINK:
517 insn = aarch64_insn_get_bl_value();
519 case AARCH64_INSN_BRANCH_NOLINK:
520 insn = aarch64_insn_get_b_value();
524 return AARCH64_BREAK_FAULT;
527 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
531 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
532 enum aarch64_insn_register reg,
533 enum aarch64_insn_variant variant,
534 enum aarch64_insn_branch_type type)
539 offset = branch_imm_common(pc, addr, SZ_1M);
542 case AARCH64_INSN_BRANCH_COMP_ZERO:
543 insn = aarch64_insn_get_cbz_value();
545 case AARCH64_INSN_BRANCH_COMP_NONZERO:
546 insn = aarch64_insn_get_cbnz_value();
550 return AARCH64_BREAK_FAULT;
554 case AARCH64_INSN_VARIANT_32BIT:
556 case AARCH64_INSN_VARIANT_64BIT:
557 insn |= AARCH64_INSN_SF_BIT;
561 return AARCH64_BREAK_FAULT;
564 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
566 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
570 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
571 enum aarch64_insn_condition cond)
576 offset = branch_imm_common(pc, addr, SZ_1M);
578 insn = aarch64_insn_get_bcond_value();
580 BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
583 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
587 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
589 return aarch64_insn_get_hint_value() | op;
592 u32 __kprobes aarch64_insn_gen_nop(void)
594 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
597 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
598 enum aarch64_insn_branch_type type)
603 case AARCH64_INSN_BRANCH_NOLINK:
604 insn = aarch64_insn_get_br_value();
606 case AARCH64_INSN_BRANCH_LINK:
607 insn = aarch64_insn_get_blr_value();
609 case AARCH64_INSN_BRANCH_RETURN:
610 insn = aarch64_insn_get_ret_value();
614 return AARCH64_BREAK_FAULT;
617 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
620 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
621 enum aarch64_insn_register base,
622 enum aarch64_insn_register offset,
623 enum aarch64_insn_size_type size,
624 enum aarch64_insn_ldst_type type)
629 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
630 insn = aarch64_insn_get_ldr_reg_value();
632 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
633 insn = aarch64_insn_get_str_reg_value();
637 return AARCH64_BREAK_FAULT;
640 insn = aarch64_insn_encode_ldst_size(size, insn);
642 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
644 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
647 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
651 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
652 enum aarch64_insn_register reg2,
653 enum aarch64_insn_register base,
655 enum aarch64_insn_variant variant,
656 enum aarch64_insn_ldst_type type)
662 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
663 insn = aarch64_insn_get_ldp_pre_value();
665 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
666 insn = aarch64_insn_get_stp_pre_value();
668 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
669 insn = aarch64_insn_get_ldp_post_value();
671 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
672 insn = aarch64_insn_get_stp_post_value();
676 return AARCH64_BREAK_FAULT;
680 case AARCH64_INSN_VARIANT_32BIT:
681 /* offset must be multiples of 4 in the range [-256, 252] */
682 BUG_ON(offset & 0x3);
683 BUG_ON(offset < -256 || offset > 252);
686 case AARCH64_INSN_VARIANT_64BIT:
687 /* offset must be multiples of 8 in the range [-512, 504] */
688 BUG_ON(offset & 0x7);
689 BUG_ON(offset < -512 || offset > 504);
691 insn |= AARCH64_INSN_SF_BIT;
695 return AARCH64_BREAK_FAULT;
698 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
701 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
704 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
707 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
711 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
712 enum aarch64_insn_register src,
713 int imm, enum aarch64_insn_variant variant,
714 enum aarch64_insn_adsb_type type)
719 case AARCH64_INSN_ADSB_ADD:
720 insn = aarch64_insn_get_add_imm_value();
722 case AARCH64_INSN_ADSB_SUB:
723 insn = aarch64_insn_get_sub_imm_value();
725 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
726 insn = aarch64_insn_get_adds_imm_value();
728 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
729 insn = aarch64_insn_get_subs_imm_value();
733 return AARCH64_BREAK_FAULT;
737 case AARCH64_INSN_VARIANT_32BIT:
739 case AARCH64_INSN_VARIANT_64BIT:
740 insn |= AARCH64_INSN_SF_BIT;
744 return AARCH64_BREAK_FAULT;
747 BUG_ON(imm & ~(SZ_4K - 1));
749 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
751 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
753 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
756 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
757 enum aarch64_insn_register src,
759 enum aarch64_insn_variant variant,
760 enum aarch64_insn_bitfield_type type)
766 case AARCH64_INSN_BITFIELD_MOVE:
767 insn = aarch64_insn_get_bfm_value();
769 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
770 insn = aarch64_insn_get_ubfm_value();
772 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
773 insn = aarch64_insn_get_sbfm_value();
777 return AARCH64_BREAK_FAULT;
781 case AARCH64_INSN_VARIANT_32BIT:
782 mask = GENMASK(4, 0);
784 case AARCH64_INSN_VARIANT_64BIT:
785 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
786 mask = GENMASK(5, 0);
790 return AARCH64_BREAK_FAULT;
793 BUG_ON(immr & ~mask);
794 BUG_ON(imms & ~mask);
796 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
798 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
800 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
802 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
805 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
807 enum aarch64_insn_variant variant,
808 enum aarch64_insn_movewide_type type)
813 case AARCH64_INSN_MOVEWIDE_ZERO:
814 insn = aarch64_insn_get_movz_value();
816 case AARCH64_INSN_MOVEWIDE_KEEP:
817 insn = aarch64_insn_get_movk_value();
819 case AARCH64_INSN_MOVEWIDE_INVERSE:
820 insn = aarch64_insn_get_movn_value();
824 return AARCH64_BREAK_FAULT;
827 BUG_ON(imm & ~(SZ_64K - 1));
830 case AARCH64_INSN_VARIANT_32BIT:
831 BUG_ON(shift != 0 && shift != 16);
833 case AARCH64_INSN_VARIANT_64BIT:
834 insn |= AARCH64_INSN_SF_BIT;
835 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
840 return AARCH64_BREAK_FAULT;
843 insn |= (shift >> 4) << 21;
845 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
847 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
850 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
851 enum aarch64_insn_register src,
852 enum aarch64_insn_register reg,
854 enum aarch64_insn_variant variant,
855 enum aarch64_insn_adsb_type type)
860 case AARCH64_INSN_ADSB_ADD:
861 insn = aarch64_insn_get_add_value();
863 case AARCH64_INSN_ADSB_SUB:
864 insn = aarch64_insn_get_sub_value();
866 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
867 insn = aarch64_insn_get_adds_value();
869 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
870 insn = aarch64_insn_get_subs_value();
874 return AARCH64_BREAK_FAULT;
878 case AARCH64_INSN_VARIANT_32BIT:
879 BUG_ON(shift & ~(SZ_32 - 1));
881 case AARCH64_INSN_VARIANT_64BIT:
882 insn |= AARCH64_INSN_SF_BIT;
883 BUG_ON(shift & ~(SZ_64 - 1));
887 return AARCH64_BREAK_FAULT;
891 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
893 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
895 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
897 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
900 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
901 enum aarch64_insn_register src,
902 enum aarch64_insn_variant variant,
903 enum aarch64_insn_data1_type type)
908 case AARCH64_INSN_DATA1_REVERSE_16:
909 insn = aarch64_insn_get_rev16_value();
911 case AARCH64_INSN_DATA1_REVERSE_32:
912 insn = aarch64_insn_get_rev32_value();
914 case AARCH64_INSN_DATA1_REVERSE_64:
915 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
916 insn = aarch64_insn_get_rev64_value();
920 return AARCH64_BREAK_FAULT;
924 case AARCH64_INSN_VARIANT_32BIT:
926 case AARCH64_INSN_VARIANT_64BIT:
927 insn |= AARCH64_INSN_SF_BIT;
931 return AARCH64_BREAK_FAULT;
934 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
936 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
939 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
940 enum aarch64_insn_register src,
941 enum aarch64_insn_register reg,
942 enum aarch64_insn_variant variant,
943 enum aarch64_insn_data2_type type)
948 case AARCH64_INSN_DATA2_UDIV:
949 insn = aarch64_insn_get_udiv_value();
951 case AARCH64_INSN_DATA2_SDIV:
952 insn = aarch64_insn_get_sdiv_value();
954 case AARCH64_INSN_DATA2_LSLV:
955 insn = aarch64_insn_get_lslv_value();
957 case AARCH64_INSN_DATA2_LSRV:
958 insn = aarch64_insn_get_lsrv_value();
960 case AARCH64_INSN_DATA2_ASRV:
961 insn = aarch64_insn_get_asrv_value();
963 case AARCH64_INSN_DATA2_RORV:
964 insn = aarch64_insn_get_rorv_value();
968 return AARCH64_BREAK_FAULT;
972 case AARCH64_INSN_VARIANT_32BIT:
974 case AARCH64_INSN_VARIANT_64BIT:
975 insn |= AARCH64_INSN_SF_BIT;
979 return AARCH64_BREAK_FAULT;
982 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
984 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
986 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
989 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
990 enum aarch64_insn_register src,
991 enum aarch64_insn_register reg1,
992 enum aarch64_insn_register reg2,
993 enum aarch64_insn_variant variant,
994 enum aarch64_insn_data3_type type)
999 case AARCH64_INSN_DATA3_MADD:
1000 insn = aarch64_insn_get_madd_value();
1002 case AARCH64_INSN_DATA3_MSUB:
1003 insn = aarch64_insn_get_msub_value();
1007 return AARCH64_BREAK_FAULT;
1011 case AARCH64_INSN_VARIANT_32BIT:
1013 case AARCH64_INSN_VARIANT_64BIT:
1014 insn |= AARCH64_INSN_SF_BIT;
1018 return AARCH64_BREAK_FAULT;
1021 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1023 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1025 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1028 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1032 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1033 enum aarch64_insn_register src,
1034 enum aarch64_insn_register reg,
1036 enum aarch64_insn_variant variant,
1037 enum aarch64_insn_logic_type type)
1042 case AARCH64_INSN_LOGIC_AND:
1043 insn = aarch64_insn_get_and_value();
1045 case AARCH64_INSN_LOGIC_BIC:
1046 insn = aarch64_insn_get_bic_value();
1048 case AARCH64_INSN_LOGIC_ORR:
1049 insn = aarch64_insn_get_orr_value();
1051 case AARCH64_INSN_LOGIC_ORN:
1052 insn = aarch64_insn_get_orn_value();
1054 case AARCH64_INSN_LOGIC_EOR:
1055 insn = aarch64_insn_get_eor_value();
1057 case AARCH64_INSN_LOGIC_EON:
1058 insn = aarch64_insn_get_eon_value();
1060 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1061 insn = aarch64_insn_get_ands_value();
1063 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1064 insn = aarch64_insn_get_bics_value();
1068 return AARCH64_BREAK_FAULT;
1072 case AARCH64_INSN_VARIANT_32BIT:
1073 BUG_ON(shift & ~(SZ_32 - 1));
1075 case AARCH64_INSN_VARIANT_64BIT:
1076 insn |= AARCH64_INSN_SF_BIT;
1077 BUG_ON(shift & ~(SZ_64 - 1));
1081 return AARCH64_BREAK_FAULT;
1085 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1087 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1089 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1091 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1095 * Decode the imm field of a branch, and return the byte offset as a
1096 * signed value (so it can be used when computing a new branch
1099 s32 aarch64_get_branch_offset(u32 insn)
1103 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1104 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1105 return (imm << 6) >> 4;
1108 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1109 aarch64_insn_is_bcond(insn)) {
1110 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1111 return (imm << 13) >> 11;
1114 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1115 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1116 return (imm << 18) >> 16;
1119 /* Unhandled instruction */
1124 * Encode the displacement of a branch in the imm field and return the
1125 * updated instruction.
1127 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1129 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1130 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1133 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1134 aarch64_insn_is_bcond(insn))
1135 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1138 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1139 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1142 /* Unhandled instruction */
1147 * Extract the Op/CR data from a msr/mrs instruction.
1149 u32 aarch64_insn_extract_system_reg(u32 insn)
1151 return (insn & 0x1FFFE0) >> 5;
1154 bool aarch32_insn_is_wide(u32 insn)
1156 return insn >= 0xe800;
1160 * Macros/defines for extracting register numbers from instruction.
1162 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1164 return (insn & (0xf << offset)) >> offset;
1167 #define OPC2_MASK 0x7
1168 #define OPC2_OFFSET 5
1169 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1171 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1174 #define CRM_MASK 0xf
1175 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1177 return insn & CRM_MASK;
1180 static bool __kprobes __check_eq(unsigned long pstate)
1182 return (pstate & PSR_Z_BIT) != 0;
1185 static bool __kprobes __check_ne(unsigned long pstate)
1187 return (pstate & PSR_Z_BIT) == 0;
1190 static bool __kprobes __check_cs(unsigned long pstate)
1192 return (pstate & PSR_C_BIT) != 0;
1195 static bool __kprobes __check_cc(unsigned long pstate)
1197 return (pstate & PSR_C_BIT) == 0;
1200 static bool __kprobes __check_mi(unsigned long pstate)
1202 return (pstate & PSR_N_BIT) != 0;
1205 static bool __kprobes __check_pl(unsigned long pstate)
1207 return (pstate & PSR_N_BIT) == 0;
1210 static bool __kprobes __check_vs(unsigned long pstate)
1212 return (pstate & PSR_V_BIT) != 0;
1215 static bool __kprobes __check_vc(unsigned long pstate)
1217 return (pstate & PSR_V_BIT) == 0;
1220 static bool __kprobes __check_hi(unsigned long pstate)
1222 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1223 return (pstate & PSR_C_BIT) != 0;
1226 static bool __kprobes __check_ls(unsigned long pstate)
1228 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1229 return (pstate & PSR_C_BIT) == 0;
1232 static bool __kprobes __check_ge(unsigned long pstate)
1234 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1235 return (pstate & PSR_N_BIT) == 0;
1238 static bool __kprobes __check_lt(unsigned long pstate)
1240 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1241 return (pstate & PSR_N_BIT) != 0;
1244 static bool __kprobes __check_gt(unsigned long pstate)
1246 /*PSR_N_BIT ^= PSR_V_BIT */
1247 unsigned long temp = pstate ^ (pstate << 3);
1249 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1250 return (temp & PSR_N_BIT) == 0;
1253 static bool __kprobes __check_le(unsigned long pstate)
1255 /*PSR_N_BIT ^= PSR_V_BIT */
1256 unsigned long temp = pstate ^ (pstate << 3);
1258 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1259 return (temp & PSR_N_BIT) != 0;
1262 static bool __kprobes __check_al(unsigned long pstate)
1268 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1269 * it behaves identically to 0b1110 ("al").
1271 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1272 __check_eq, __check_ne, __check_cs, __check_cc,
1273 __check_mi, __check_pl, __check_vs, __check_vc,
1274 __check_hi, __check_ls, __check_ge, __check_lt,
1275 __check_gt, __check_le, __check_al, __check_al