2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class[] = {
39 AARCH64_INSN_CLS_UNKNOWN,
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47 AARCH64_INSN_CLS_DP_IMM,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_BR_SYS,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_LDST,
52 AARCH64_INSN_CLS_DP_REG,
53 AARCH64_INSN_CLS_LDST,
54 AARCH64_INSN_CLS_DP_FPSIMD,
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 if (!aarch64_insn_is_hint(insn))
68 switch (insn & 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD:
70 case AARCH64_INSN_HINT_WFE:
71 case AARCH64_INSN_HINT_WFI:
72 case AARCH64_INSN_HINT_SEV:
73 case AARCH64_INSN_HINT_SEVL:
80 static DEFINE_SPINLOCK(patch_lock);
82 static void __kprobes *patch_map(void *addr, int fixmap)
84 unsigned long uintaddr = (uintptr_t) addr;
85 bool module = !core_kernel_text(uintaddr);
88 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
89 page = vmalloc_to_page(addr);
91 page = virt_to_page(addr);
94 set_fixmap(fixmap, page_to_phys(page));
96 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
99 static void __kprobes patch_unmap(int fixmap)
101 clear_fixmap(fixmap);
104 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
107 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
112 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
114 *insnp = le32_to_cpu(val);
119 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
122 unsigned long flags = 0;
125 spin_lock_irqsave(&patch_lock, flags);
126 waddr = patch_map(addr, FIX_TEXT_POKE0);
128 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
130 patch_unmap(FIX_TEXT_POKE0);
131 spin_unlock_irqrestore(&patch_lock, flags);
136 int __kprobes aarch64_insn_write(void *addr, u32 insn)
138 insn = cpu_to_le32(insn);
139 return __aarch64_insn_write(addr, insn);
142 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
144 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
147 return aarch64_insn_is_b(insn) ||
148 aarch64_insn_is_bl(insn) ||
149 aarch64_insn_is_svc(insn) ||
150 aarch64_insn_is_hvc(insn) ||
151 aarch64_insn_is_smc(insn) ||
152 aarch64_insn_is_brk(insn) ||
153 aarch64_insn_is_nop(insn);
157 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
158 * Section B2.6.5 "Concurrent modification and execution of instructions":
159 * Concurrent modification and execution of instructions can lead to the
160 * resulting instruction performing any behavior that can be achieved by
161 * executing any sequence of instructions that can be executed from the
162 * same Exception level, except where the instruction before modification
163 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
164 * or SMC instruction.
166 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
168 return __aarch64_insn_hotpatch_safe(old_insn) &&
169 __aarch64_insn_hotpatch_safe(new_insn);
172 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
177 /* A64 instructions must be word aligned */
178 if ((uintptr_t)tp & 0x3)
181 ret = aarch64_insn_write(tp, insn);
183 flush_icache_range((uintptr_t)tp,
184 (uintptr_t)tp + AARCH64_INSN_SIZE);
189 struct aarch64_insn_patch {
196 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
199 struct aarch64_insn_patch *pp = arg;
201 /* The first CPU becomes master */
202 if (atomic_inc_return(&pp->cpu_count) == 1) {
203 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
204 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
207 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
208 * which ends with "dsb; isb" pair guaranteeing global
211 /* Notify other processors with an additional increment. */
212 atomic_inc(&pp->cpu_count);
214 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
222 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
224 struct aarch64_insn_patch patch = {
228 .cpu_count = ATOMIC_INIT(0),
234 return stop_machine(aarch64_insn_patch_text_cb, &patch,
238 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
243 /* Unsafe to patch multiple instructions without synchronizaiton */
245 ret = aarch64_insn_read(addrs[0], &insn);
249 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
251 * ARMv8 architecture doesn't guarantee all CPUs see
252 * the new instruction after returning from function
253 * aarch64_insn_patch_text_nosync(). So send IPIs to
254 * all other CPUs to achieve instruction
257 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
258 kick_all_cpus_sync();
263 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
266 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
269 u32 immlo, immhi, lomask, himask, mask;
273 case AARCH64_INSN_IMM_ADR:
276 immlo = imm & lomask;
278 immhi = imm & himask;
279 imm = (immlo << 24) | (immhi);
280 mask = (lomask << 24) | (himask);
283 case AARCH64_INSN_IMM_26:
287 case AARCH64_INSN_IMM_19:
291 case AARCH64_INSN_IMM_16:
295 case AARCH64_INSN_IMM_14:
299 case AARCH64_INSN_IMM_12:
303 case AARCH64_INSN_IMM_9:
307 case AARCH64_INSN_IMM_7:
311 case AARCH64_INSN_IMM_6:
312 case AARCH64_INSN_IMM_S:
316 case AARCH64_INSN_IMM_R:
321 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
326 /* Update the immediate field. */
327 insn &= ~(mask << shift);
328 insn |= (imm & mask) << shift;
333 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
335 enum aarch64_insn_register reg)
339 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
340 pr_err("%s: unknown register encoding %d\n", __func__, reg);
345 case AARCH64_INSN_REGTYPE_RT:
346 case AARCH64_INSN_REGTYPE_RD:
349 case AARCH64_INSN_REGTYPE_RN:
352 case AARCH64_INSN_REGTYPE_RT2:
353 case AARCH64_INSN_REGTYPE_RA:
356 case AARCH64_INSN_REGTYPE_RM:
360 pr_err("%s: unknown register type encoding %d\n", __func__,
365 insn &= ~(GENMASK(4, 0) << shift);
366 insn |= reg << shift;
371 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
377 case AARCH64_INSN_SIZE_8:
380 case AARCH64_INSN_SIZE_16:
383 case AARCH64_INSN_SIZE_32:
386 case AARCH64_INSN_SIZE_64:
390 pr_err("%s: unknown size encoding %d\n", __func__, type);
394 insn &= ~GENMASK(31, 30);
400 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
406 * PC: A 64-bit Program Counter holding the address of the current
407 * instruction. A64 instructions must be word-aligned.
409 BUG_ON((pc & 0x3) || (addr & 0x3));
411 offset = ((long)addr - (long)pc);
412 BUG_ON(offset < -range || offset >= range);
417 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
418 enum aarch64_insn_branch_type type)
424 * B/BL support [-128M, 128M) offset
425 * ARM64 virtual address arrangement guarantees all kernel and module
426 * texts are within +/-128M.
428 offset = branch_imm_common(pc, addr, SZ_128M);
431 case AARCH64_INSN_BRANCH_LINK:
432 insn = aarch64_insn_get_bl_value();
434 case AARCH64_INSN_BRANCH_NOLINK:
435 insn = aarch64_insn_get_b_value();
439 return AARCH64_BREAK_FAULT;
442 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
446 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
447 enum aarch64_insn_register reg,
448 enum aarch64_insn_variant variant,
449 enum aarch64_insn_branch_type type)
454 offset = branch_imm_common(pc, addr, SZ_1M);
457 case AARCH64_INSN_BRANCH_COMP_ZERO:
458 insn = aarch64_insn_get_cbz_value();
460 case AARCH64_INSN_BRANCH_COMP_NONZERO:
461 insn = aarch64_insn_get_cbnz_value();
465 return AARCH64_BREAK_FAULT;
469 case AARCH64_INSN_VARIANT_32BIT:
471 case AARCH64_INSN_VARIANT_64BIT:
472 insn |= AARCH64_INSN_SF_BIT;
476 return AARCH64_BREAK_FAULT;
479 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
481 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
485 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
486 enum aarch64_insn_condition cond)
491 offset = branch_imm_common(pc, addr, SZ_1M);
493 insn = aarch64_insn_get_bcond_value();
495 BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
498 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
502 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
504 return aarch64_insn_get_hint_value() | op;
507 u32 __kprobes aarch64_insn_gen_nop(void)
509 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
512 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
513 enum aarch64_insn_branch_type type)
518 case AARCH64_INSN_BRANCH_NOLINK:
519 insn = aarch64_insn_get_br_value();
521 case AARCH64_INSN_BRANCH_LINK:
522 insn = aarch64_insn_get_blr_value();
524 case AARCH64_INSN_BRANCH_RETURN:
525 insn = aarch64_insn_get_ret_value();
529 return AARCH64_BREAK_FAULT;
532 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
535 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
536 enum aarch64_insn_register base,
537 enum aarch64_insn_register offset,
538 enum aarch64_insn_size_type size,
539 enum aarch64_insn_ldst_type type)
544 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
545 insn = aarch64_insn_get_ldr_reg_value();
547 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
548 insn = aarch64_insn_get_str_reg_value();
552 return AARCH64_BREAK_FAULT;
555 insn = aarch64_insn_encode_ldst_size(size, insn);
557 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
559 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
562 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
566 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
567 enum aarch64_insn_register reg2,
568 enum aarch64_insn_register base,
570 enum aarch64_insn_variant variant,
571 enum aarch64_insn_ldst_type type)
577 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
578 insn = aarch64_insn_get_ldp_pre_value();
580 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
581 insn = aarch64_insn_get_stp_pre_value();
583 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
584 insn = aarch64_insn_get_ldp_post_value();
586 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
587 insn = aarch64_insn_get_stp_post_value();
591 return AARCH64_BREAK_FAULT;
595 case AARCH64_INSN_VARIANT_32BIT:
596 /* offset must be multiples of 4 in the range [-256, 252] */
597 BUG_ON(offset & 0x3);
598 BUG_ON(offset < -256 || offset > 252);
601 case AARCH64_INSN_VARIANT_64BIT:
602 /* offset must be multiples of 8 in the range [-512, 504] */
603 BUG_ON(offset & 0x7);
604 BUG_ON(offset < -512 || offset > 504);
606 insn |= AARCH64_INSN_SF_BIT;
610 return AARCH64_BREAK_FAULT;
613 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
616 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
619 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
622 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
626 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
627 enum aarch64_insn_register src,
628 int imm, enum aarch64_insn_variant variant,
629 enum aarch64_insn_adsb_type type)
634 case AARCH64_INSN_ADSB_ADD:
635 insn = aarch64_insn_get_add_imm_value();
637 case AARCH64_INSN_ADSB_SUB:
638 insn = aarch64_insn_get_sub_imm_value();
640 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
641 insn = aarch64_insn_get_adds_imm_value();
643 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
644 insn = aarch64_insn_get_subs_imm_value();
648 return AARCH64_BREAK_FAULT;
652 case AARCH64_INSN_VARIANT_32BIT:
654 case AARCH64_INSN_VARIANT_64BIT:
655 insn |= AARCH64_INSN_SF_BIT;
659 return AARCH64_BREAK_FAULT;
662 BUG_ON(imm & ~(SZ_4K - 1));
664 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
666 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
668 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
671 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
672 enum aarch64_insn_register src,
674 enum aarch64_insn_variant variant,
675 enum aarch64_insn_bitfield_type type)
681 case AARCH64_INSN_BITFIELD_MOVE:
682 insn = aarch64_insn_get_bfm_value();
684 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
685 insn = aarch64_insn_get_ubfm_value();
687 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
688 insn = aarch64_insn_get_sbfm_value();
692 return AARCH64_BREAK_FAULT;
696 case AARCH64_INSN_VARIANT_32BIT:
697 mask = GENMASK(4, 0);
699 case AARCH64_INSN_VARIANT_64BIT:
700 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
701 mask = GENMASK(5, 0);
705 return AARCH64_BREAK_FAULT;
708 BUG_ON(immr & ~mask);
709 BUG_ON(imms & ~mask);
711 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
713 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
715 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
717 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
720 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
722 enum aarch64_insn_variant variant,
723 enum aarch64_insn_movewide_type type)
728 case AARCH64_INSN_MOVEWIDE_ZERO:
729 insn = aarch64_insn_get_movz_value();
731 case AARCH64_INSN_MOVEWIDE_KEEP:
732 insn = aarch64_insn_get_movk_value();
734 case AARCH64_INSN_MOVEWIDE_INVERSE:
735 insn = aarch64_insn_get_movn_value();
739 return AARCH64_BREAK_FAULT;
742 BUG_ON(imm & ~(SZ_64K - 1));
745 case AARCH64_INSN_VARIANT_32BIT:
746 BUG_ON(shift != 0 && shift != 16);
748 case AARCH64_INSN_VARIANT_64BIT:
749 insn |= AARCH64_INSN_SF_BIT;
750 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
755 return AARCH64_BREAK_FAULT;
758 insn |= (shift >> 4) << 21;
760 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
762 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
765 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
766 enum aarch64_insn_register src,
767 enum aarch64_insn_register reg,
769 enum aarch64_insn_variant variant,
770 enum aarch64_insn_adsb_type type)
775 case AARCH64_INSN_ADSB_ADD:
776 insn = aarch64_insn_get_add_value();
778 case AARCH64_INSN_ADSB_SUB:
779 insn = aarch64_insn_get_sub_value();
781 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
782 insn = aarch64_insn_get_adds_value();
784 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
785 insn = aarch64_insn_get_subs_value();
789 return AARCH64_BREAK_FAULT;
793 case AARCH64_INSN_VARIANT_32BIT:
794 BUG_ON(shift & ~(SZ_32 - 1));
796 case AARCH64_INSN_VARIANT_64BIT:
797 insn |= AARCH64_INSN_SF_BIT;
798 BUG_ON(shift & ~(SZ_64 - 1));
802 return AARCH64_BREAK_FAULT;
806 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
808 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
810 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
812 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
815 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
816 enum aarch64_insn_register src,
817 enum aarch64_insn_variant variant,
818 enum aarch64_insn_data1_type type)
823 case AARCH64_INSN_DATA1_REVERSE_16:
824 insn = aarch64_insn_get_rev16_value();
826 case AARCH64_INSN_DATA1_REVERSE_32:
827 insn = aarch64_insn_get_rev32_value();
829 case AARCH64_INSN_DATA1_REVERSE_64:
830 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
831 insn = aarch64_insn_get_rev64_value();
835 return AARCH64_BREAK_FAULT;
839 case AARCH64_INSN_VARIANT_32BIT:
841 case AARCH64_INSN_VARIANT_64BIT:
842 insn |= AARCH64_INSN_SF_BIT;
846 return AARCH64_BREAK_FAULT;
849 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
851 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
854 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
855 enum aarch64_insn_register src,
856 enum aarch64_insn_register reg,
857 enum aarch64_insn_variant variant,
858 enum aarch64_insn_data2_type type)
863 case AARCH64_INSN_DATA2_UDIV:
864 insn = aarch64_insn_get_udiv_value();
866 case AARCH64_INSN_DATA2_SDIV:
867 insn = aarch64_insn_get_sdiv_value();
869 case AARCH64_INSN_DATA2_LSLV:
870 insn = aarch64_insn_get_lslv_value();
872 case AARCH64_INSN_DATA2_LSRV:
873 insn = aarch64_insn_get_lsrv_value();
875 case AARCH64_INSN_DATA2_ASRV:
876 insn = aarch64_insn_get_asrv_value();
878 case AARCH64_INSN_DATA2_RORV:
879 insn = aarch64_insn_get_rorv_value();
883 return AARCH64_BREAK_FAULT;
887 case AARCH64_INSN_VARIANT_32BIT:
889 case AARCH64_INSN_VARIANT_64BIT:
890 insn |= AARCH64_INSN_SF_BIT;
894 return AARCH64_BREAK_FAULT;
897 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
899 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
901 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
904 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
905 enum aarch64_insn_register src,
906 enum aarch64_insn_register reg1,
907 enum aarch64_insn_register reg2,
908 enum aarch64_insn_variant variant,
909 enum aarch64_insn_data3_type type)
914 case AARCH64_INSN_DATA3_MADD:
915 insn = aarch64_insn_get_madd_value();
917 case AARCH64_INSN_DATA3_MSUB:
918 insn = aarch64_insn_get_msub_value();
922 return AARCH64_BREAK_FAULT;
926 case AARCH64_INSN_VARIANT_32BIT:
928 case AARCH64_INSN_VARIANT_64BIT:
929 insn |= AARCH64_INSN_SF_BIT;
933 return AARCH64_BREAK_FAULT;
936 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
938 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
940 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
943 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
947 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
948 enum aarch64_insn_register src,
949 enum aarch64_insn_register reg,
951 enum aarch64_insn_variant variant,
952 enum aarch64_insn_logic_type type)
957 case AARCH64_INSN_LOGIC_AND:
958 insn = aarch64_insn_get_and_value();
960 case AARCH64_INSN_LOGIC_BIC:
961 insn = aarch64_insn_get_bic_value();
963 case AARCH64_INSN_LOGIC_ORR:
964 insn = aarch64_insn_get_orr_value();
966 case AARCH64_INSN_LOGIC_ORN:
967 insn = aarch64_insn_get_orn_value();
969 case AARCH64_INSN_LOGIC_EOR:
970 insn = aarch64_insn_get_eor_value();
972 case AARCH64_INSN_LOGIC_EON:
973 insn = aarch64_insn_get_eon_value();
975 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
976 insn = aarch64_insn_get_ands_value();
978 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
979 insn = aarch64_insn_get_bics_value();
983 return AARCH64_BREAK_FAULT;
987 case AARCH64_INSN_VARIANT_32BIT:
988 BUG_ON(shift & ~(SZ_32 - 1));
990 case AARCH64_INSN_VARIANT_64BIT:
991 insn |= AARCH64_INSN_SF_BIT;
992 BUG_ON(shift & ~(SZ_64 - 1));
996 return AARCH64_BREAK_FAULT;
1000 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1002 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1004 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1006 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1009 bool aarch32_insn_is_wide(u32 insn)
1011 return insn >= 0xe800;
1015 * Macros/defines for extracting register numbers from instruction.
1017 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1019 return (insn & (0xf << offset)) >> offset;
1022 #define OPC2_MASK 0x7
1023 #define OPC2_OFFSET 5
1024 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1026 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1029 #define CRM_MASK 0xf
1030 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1032 return insn & CRM_MASK;