arm64: insn: Add aarch64_{get,set}_branch_offset
authorMarc Zyngier <marc.zyngier@arm.com>
Mon, 1 Jun 2015 09:47:39 +0000 (10:47 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 3 Jun 2015 14:43:24 +0000 (15:43 +0100)
In order to deal with branches located in alternate sequences,
but pointing to the main kernel text, it is required to extract
the relative displacement encoded in the instruction, and to be
able to update said instruction with a new offset (once it is
known).

For this, we introduce three new helpers:
- aarch64_insn_is_branch_imm is a predicate indicating if the
  instruction is an immediate branch
- aarch64_get_branch_offset returns a signed value representing
  the byte offset encoded in a branch instruction
- aarch64_set_branch_offset takes an instruction and an offset,
  and returns the corresponding updated instruction.

Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/insn.h
arch/arm64/kernel/insn.c

index f81b328d9cf4034991e41d2ce3c621c1b7182c49..30e50eb54a6795aa9c504a2d22dfabd8b8db195a 100644 (file)
@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret,   0xFFFFFC1F, 0xD65F0000)
 #undef __AARCH64_INSN_FUNCS
 
 bool aarch64_insn_is_nop(u32 insn);
+bool aarch64_insn_is_branch_imm(u32 insn);
 
 int aarch64_insn_read(void *addr, u32 *insnp);
 int aarch64_insn_write(void *addr, u32 insn);
@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
                                         int shift,
                                         enum aarch64_insn_variant variant,
                                         enum aarch64_insn_logic_type type);
+s32 aarch64_get_branch_offset(u32 insn);
+u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
index 924902083e47eca28812837c8d76583a30fdaafb..dd9671cd0bb255b2b5fa32a5e1e5a61b6decaac5 100644 (file)
@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
        }
 }
 
+bool aarch64_insn_is_branch_imm(u32 insn)
+{
+       return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
+               aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
+               aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+               aarch64_insn_is_bcond(insn));
+}
+
 static DEFINE_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
 }
 
+/*
+ * Decode the imm field of a branch, and return the byte offset as a
+ * signed value (so it can be used when computing a new branch
+ * target).
+ */
+s32 aarch64_get_branch_offset(u32 insn)
+{
+       s32 imm;
+
+       if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
+               imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+               return (imm << 6) >> 4;
+       }
+
+       if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+           aarch64_insn_is_bcond(insn)) {
+               imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
+               return (imm << 13) >> 11;
+       }
+
+       if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
+               imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
+               return (imm << 18) >> 16;
+       }
+
+       /* Unhandled instruction */
+       BUG();
+}
+
+/*
+ * Encode the displacement of a branch in the imm field and return the
+ * updated instruction.
+ */
+u32 aarch64_set_branch_offset(u32 insn, s32 offset)
+{
+       if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+               return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+                                                    offset >> 2);
+
+       if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+           aarch64_insn_is_bcond(insn))
+               return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+                                                    offset >> 2);
+
+       if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+               return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
+                                                    offset >> 2);
+
+       /* Unhandled instruction */
+       BUG();
+}
+
 bool aarch32_insn_is_wide(u32 insn)
 {
        return insn >= 0xe800;