[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 5/9] arm64/insn: introduce aarch64_insn_gen_{nop|branch_imm}() helper functions



This is copied from Linux 4.7, and the initial commit
that put this in is 5c5bf25d4f7a950382f94fc120a5818197b48fe9
"arm64: introduce aarch64_insn_gen_{nop|branch_imm}() helper functions"

This lays the groundwork for Livepatch to generate the
trampoline to jump to the new replacement function.
Also allows us to NOP the callsites.

This lays the groundwork for Livepatch to generate the
trampoline to jump to the new replacement function.
And also to NOP insns.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
--
Cc: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>

RFC: First submission
v1: The full copy of insn_gen_branch instead of just the code to make branch
---
 xen/arch/arm/arm64/insn.c        | 61 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/insn.h | 23 +++++++++++++++
 2 files changed, 84 insertions(+)

diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c
index 12b4d96..c5f7e93 100644
--- a/xen/arch/arm/arm64/insn.c
+++ b/xen/arch/arm/arm64/insn.c
@@ -157,6 +157,67 @@ u32 __kprobes aarch64_insn_encode_immediate(enum 
aarch64_insn_imm_type type,
        return insn;
 }
 
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+                                    long range)
+{
+       long offset;
+
+       if ((pc & 0x3) || (addr & 0x3)) {
+               pr_err("%s: A64 instructions must be word aligned\n", __func__);
+               return range;
+       }
+
+       offset = ((long)addr - (long)pc);
+
+       if (offset < -range || offset >= range) {
+               pr_err("%s: offset out of range\n", __func__);
+               return range;
+       }
+
+       return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+                                         enum aarch64_insn_branch_type type)
+{
+       u32 insn;
+       long offset;
+
+       /*
+        * B/BL support [-128M, 128M) offset
+        * ARM64 virtual address arrangement guarantees all kernel and module
+        * texts are within +/-128M.
+        */
+       offset = branch_imm_common(pc, addr, SZ_128M);
+       if (offset >= SZ_128M)
+               return AARCH64_BREAK_FAULT;
+
+       switch (type) {
+       case AARCH64_INSN_BRANCH_LINK:
+               insn = aarch64_insn_get_bl_value();
+               break;
+       case AARCH64_INSN_BRANCH_NOLINK:
+               insn = aarch64_insn_get_b_value();
+               break;
+       default:
+               pr_err("%s: unknown branch encoding %d\n", __func__, type);
+               return AARCH64_BREAK_FAULT;
+       }
+
+       return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+                                            offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+       return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+       return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
+
 /*
  * Decode the imm field of a branch, and return the byte offset as a
  * signed value (so it can be used when computing a new branch
diff --git a/xen/include/asm-arm/arm64/insn.h b/xen/include/asm-arm/arm64/insn.h
index 6ce37be..c8362e5 100644
--- a/xen/include/asm-arm/arm64/insn.h
+++ b/xen/include/asm-arm/arm64/insn.h
@@ -23,6 +23,15 @@
 #include <xen/types.h>
 #include <xen/stdbool.h>
 
+enum aarch64_insn_hint_op {
+       AARCH64_INSN_HINT_NOP   = 0x0 << 5,
+       AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+       AARCH64_INSN_HINT_WFE   = 0x2 << 5,
+       AARCH64_INSN_HINT_WFI   = 0x3 << 5,
+       AARCH64_INSN_HINT_SEV   = 0x4 << 5,
+       AARCH64_INSN_HINT_SEVL  = 0x5 << 5,
+};
+
 enum aarch64_insn_imm_type {
        AARCH64_INSN_IMM_ADR,
        AARCH64_INSN_IMM_26,
@@ -38,6 +47,14 @@ enum aarch64_insn_imm_type {
        AARCH64_INSN_IMM_MAX
 };
 
+enum aarch64_insn_branch_type {
+       AARCH64_INSN_BRANCH_NOLINK,
+       AARCH64_INSN_BRANCH_LINK,
+       AARCH64_INSN_BRANCH_RETURN,
+       AARCH64_INSN_BRANCH_COMP_ZERO,
+       AARCH64_INSN_BRANCH_COMP_NONZERO,
+};
+
 #define        __AARCH64_INSN_FUNCS(abbr, mask, val)   \
 static always_inline bool_t aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -51,6 +68,7 @@ __AARCH64_INSN_FUNCS(cbnz,    0x7F000000, 0x35000000)
 __AARCH64_INSN_FUNCS(tbz,      0x7F000000, 0x36000000)
 __AARCH64_INSN_FUNCS(tbnz,     0x7F000000, 0x37000000)
 __AARCH64_INSN_FUNCS(bcond,    0xFF000010, 0x54000000)
+__AARCH64_INSN_FUNCS(hint,     0xFFFFF01F, 0xD503201F)
 
 bool aarch64_insn_is_branch_imm(u32 insn);
 
@@ -61,6 +79,11 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type 
type,
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+                               enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
 /* Wrapper for common code */
 static inline bool insn_is_branch_imm(u32 insn)
 {
-- 
2.4.11


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.