2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/compiler.h>
21 #include <linux/kernel.h>
22 #include <linux/smp.h>
23 #include <linux/stop_machine.h>
24 #include <linux/uaccess.h>
25 #include <asm/cacheflush.h>
28 #define AARCH64_INSN_SF_BIT BIT(31)
29 #define AARCH64_INSN_N_BIT BIT(22)
31 static int aarch64_insn_encoding_class[] = {
32 AARCH64_INSN_CLS_UNKNOWN,
33 AARCH64_INSN_CLS_UNKNOWN,
34 AARCH64_INSN_CLS_UNKNOWN,
35 AARCH64_INSN_CLS_UNKNOWN,
36 AARCH64_INSN_CLS_LDST,
37 AARCH64_INSN_CLS_DP_REG,
38 AARCH64_INSN_CLS_LDST,
39 AARCH64_INSN_CLS_DP_FPSIMD,
40 AARCH64_INSN_CLS_DP_IMM,
41 AARCH64_INSN_CLS_DP_IMM,
42 AARCH64_INSN_CLS_BR_SYS,
43 AARCH64_INSN_CLS_BR_SYS,
44 AARCH64_INSN_CLS_LDST,
45 AARCH64_INSN_CLS_DP_REG,
46 AARCH64_INSN_CLS_LDST,
47 AARCH64_INSN_CLS_DP_FPSIMD,
50 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
52 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
55 /* NOP is an alias of HINT */
56 bool __kprobes aarch64_insn_is_nop(u32 insn)
58 if (!aarch64_insn_is_hint(insn))
61 switch (insn & 0xFE0) {
62 case AARCH64_INSN_HINT_YIELD:
63 case AARCH64_INSN_HINT_WFE:
64 case AARCH64_INSN_HINT_WFI:
65 case AARCH64_INSN_HINT_SEV:
66 case AARCH64_INSN_HINT_SEVL:
74 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
77 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
82 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
84 *insnp = le32_to_cpu(val);
89 int __kprobes aarch64_insn_write(void *addr, u32 insn)
91 insn = cpu_to_le32(insn);
92 return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
95 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
97 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
100 return aarch64_insn_is_b(insn) ||
101 aarch64_insn_is_bl(insn) ||
102 aarch64_insn_is_svc(insn) ||
103 aarch64_insn_is_hvc(insn) ||
104 aarch64_insn_is_smc(insn) ||
105 aarch64_insn_is_brk(insn) ||
106 aarch64_insn_is_nop(insn);
110 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
111 * Section B2.6.5 "Concurrent modification and execution of instructions":
112 * Concurrent modification and execution of instructions can lead to the
113 * resulting instruction performing any behavior that can be achieved by
114 * executing any sequence of instructions that can be executed from the
115 * same Exception level, except where the instruction before modification
116 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
117 * or SMC instruction.
119 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
121 return __aarch64_insn_hotpatch_safe(old_insn) &&
122 __aarch64_insn_hotpatch_safe(new_insn);
125 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
130 /* A64 instructions must be word aligned */
131 if ((uintptr_t)tp & 0x3)
134 ret = aarch64_insn_write(tp, insn);
136 flush_icache_range((uintptr_t)tp,
137 (uintptr_t)tp + AARCH64_INSN_SIZE);
142 struct aarch64_insn_patch {
149 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
152 struct aarch64_insn_patch *pp = arg;
154 /* The first CPU becomes master */
155 if (atomic_inc_return(&pp->cpu_count) == 1) {
156 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
157 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
160 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
161 * which ends with "dsb; isb" pair guaranteeing global
164 atomic_set(&pp->cpu_count, -1);
166 while (atomic_read(&pp->cpu_count) != -1)
174 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
176 struct aarch64_insn_patch patch = {
180 .cpu_count = ATOMIC_INIT(0),
186 return stop_machine(aarch64_insn_patch_text_cb, &patch,
190 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
195 /* Unsafe to patch multiple instructions without synchronizaiton */
197 ret = aarch64_insn_read(addrs[0], &insn);
201 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
203 * ARMv8 architecture doesn't guarantee all CPUs see
204 * the new instruction after returning from function
205 * aarch64_insn_patch_text_nosync(). So send IPIs to
206 * all other CPUs to achieve instruction
209 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
210 kick_all_cpus_sync();
215 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
218 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
221 u32 immlo, immhi, lomask, himask, mask;
225 case AARCH64_INSN_IMM_ADR:
228 immlo = imm & lomask;
230 immhi = imm & himask;
231 imm = (immlo << 24) | (immhi);
232 mask = (lomask << 24) | (himask);
235 case AARCH64_INSN_IMM_26:
239 case AARCH64_INSN_IMM_19:
243 case AARCH64_INSN_IMM_16:
247 case AARCH64_INSN_IMM_14:
251 case AARCH64_INSN_IMM_12:
255 case AARCH64_INSN_IMM_9:
259 case AARCH64_INSN_IMM_7:
263 case AARCH64_INSN_IMM_S:
267 case AARCH64_INSN_IMM_R:
272 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
277 /* Update the immediate field. */
278 insn &= ~(mask << shift);
279 insn |= (imm & mask) << shift;
284 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
286 enum aarch64_insn_register reg)
290 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
291 pr_err("%s: unknown register encoding %d\n", __func__, reg);
296 case AARCH64_INSN_REGTYPE_RT:
297 case AARCH64_INSN_REGTYPE_RD:
300 case AARCH64_INSN_REGTYPE_RN:
303 case AARCH64_INSN_REGTYPE_RT2:
306 case AARCH64_INSN_REGTYPE_RM:
310 pr_err("%s: unknown register type encoding %d\n", __func__,
315 insn &= ~(GENMASK(4, 0) << shift);
316 insn |= reg << shift;
321 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
327 case AARCH64_INSN_SIZE_8:
330 case AARCH64_INSN_SIZE_16:
333 case AARCH64_INSN_SIZE_32:
336 case AARCH64_INSN_SIZE_64:
340 pr_err("%s: unknown size encoding %d\n", __func__, type);
344 insn &= ~GENMASK(31, 30);
350 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
356 * PC: A 64-bit Program Counter holding the address of the current
357 * instruction. A64 instructions must be word-aligned.
359 BUG_ON((pc & 0x3) || (addr & 0x3));
361 offset = ((long)addr - (long)pc);
362 BUG_ON(offset < -range || offset >= range);
367 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
368 enum aarch64_insn_branch_type type)
374 * B/BL support [-128M, 128M) offset
375 * ARM64 virtual address arrangement guarantees all kernel and module
376 * texts are within +/-128M.
378 offset = branch_imm_common(pc, addr, SZ_128M);
381 case AARCH64_INSN_BRANCH_LINK:
382 insn = aarch64_insn_get_bl_value();
384 case AARCH64_INSN_BRANCH_NOLINK:
385 insn = aarch64_insn_get_b_value();
391 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
395 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
396 enum aarch64_insn_register reg,
397 enum aarch64_insn_variant variant,
398 enum aarch64_insn_branch_type type)
403 offset = branch_imm_common(pc, addr, SZ_1M);
406 case AARCH64_INSN_BRANCH_COMP_ZERO:
407 insn = aarch64_insn_get_cbz_value();
409 case AARCH64_INSN_BRANCH_COMP_NONZERO:
410 insn = aarch64_insn_get_cbnz_value();
417 case AARCH64_INSN_VARIANT_32BIT:
419 case AARCH64_INSN_VARIANT_64BIT:
420 insn |= AARCH64_INSN_SF_BIT;
426 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
428 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
432 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
433 enum aarch64_insn_condition cond)
438 offset = branch_imm_common(pc, addr, SZ_1M);
440 insn = aarch64_insn_get_bcond_value();
442 BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
445 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
449 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
451 return aarch64_insn_get_hint_value() | op;
454 u32 __kprobes aarch64_insn_gen_nop(void)
456 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
459 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
460 enum aarch64_insn_branch_type type)
465 case AARCH64_INSN_BRANCH_NOLINK:
466 insn = aarch64_insn_get_br_value();
468 case AARCH64_INSN_BRANCH_LINK:
469 insn = aarch64_insn_get_blr_value();
471 case AARCH64_INSN_BRANCH_RETURN:
472 insn = aarch64_insn_get_ret_value();
478 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
481 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
482 enum aarch64_insn_register base,
483 enum aarch64_insn_register offset,
484 enum aarch64_insn_size_type size,
485 enum aarch64_insn_ldst_type type)
490 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
491 insn = aarch64_insn_get_ldr_reg_value();
493 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
494 insn = aarch64_insn_get_str_reg_value();
500 insn = aarch64_insn_encode_ldst_size(size, insn);
502 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
504 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
507 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
511 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
512 enum aarch64_insn_register reg2,
513 enum aarch64_insn_register base,
515 enum aarch64_insn_variant variant,
516 enum aarch64_insn_ldst_type type)
522 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
523 insn = aarch64_insn_get_ldp_pre_value();
525 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
526 insn = aarch64_insn_get_stp_pre_value();
528 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
529 insn = aarch64_insn_get_ldp_post_value();
531 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
532 insn = aarch64_insn_get_stp_post_value();
539 case AARCH64_INSN_VARIANT_32BIT:
540 /* offset must be multiples of 4 in the range [-256, 252] */
541 BUG_ON(offset & 0x3);
542 BUG_ON(offset < -256 || offset > 252);
545 case AARCH64_INSN_VARIANT_64BIT:
546 /* offset must be multiples of 8 in the range [-512, 504] */
547 BUG_ON(offset & 0x7);
548 BUG_ON(offset < -512 || offset > 504);
550 insn |= AARCH64_INSN_SF_BIT;
556 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
559 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
562 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
565 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
569 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
570 enum aarch64_insn_register src,
571 int imm, enum aarch64_insn_variant variant,
572 enum aarch64_insn_adsb_type type)
577 case AARCH64_INSN_ADSB_ADD:
578 insn = aarch64_insn_get_add_imm_value();
580 case AARCH64_INSN_ADSB_SUB:
581 insn = aarch64_insn_get_sub_imm_value();
583 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
584 insn = aarch64_insn_get_adds_imm_value();
586 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
587 insn = aarch64_insn_get_subs_imm_value();
594 case AARCH64_INSN_VARIANT_32BIT:
596 case AARCH64_INSN_VARIANT_64BIT:
597 insn |= AARCH64_INSN_SF_BIT;
603 BUG_ON(imm & ~(SZ_4K - 1));
605 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
607 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
609 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
612 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
613 enum aarch64_insn_register src,
615 enum aarch64_insn_variant variant,
616 enum aarch64_insn_bitfield_type type)
622 case AARCH64_INSN_BITFIELD_MOVE:
623 insn = aarch64_insn_get_bfm_value();
625 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
626 insn = aarch64_insn_get_ubfm_value();
628 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
629 insn = aarch64_insn_get_sbfm_value();
636 case AARCH64_INSN_VARIANT_32BIT:
637 mask = GENMASK(4, 0);
639 case AARCH64_INSN_VARIANT_64BIT:
640 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
641 mask = GENMASK(5, 0);
647 BUG_ON(immr & ~mask);
648 BUG_ON(imms & ~mask);
650 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
652 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
654 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
656 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);