[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/stack: CFI hardening



commit 954bb07fdb5fadf7e341f84c90e950ae9dbbabbf
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Oct 29 18:04:02 2021 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Feb 23 15:33:43 2022 +0000

    x86/stack: CFI hardening
    
    Control Flow Integrity schemes use toolchain and optionally hardware support
    to help protect against call/jump/return oriented programming attacks.
    
    Use cf_check to annotate function pointer targets for the toolchain.
    
    The function typecheck in switch_stack_and_jump() is incompatible with 
control
    flow typechecking.  It's ok for reset_stack_and_jump_ind(), but for
    reset_stack_and_jump(), it would force us to endbr64 the targets which are
    branched to directly.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/domain.c                  | 6 +++---
 xen/arch/x86/hvm/svm/svm.c             | 6 +++---
 xen/arch/x86/hvm/vmx/vmcs.c            | 2 +-
 xen/arch/x86/hvm/vmx/vmx.c             | 8 ++++----
 xen/arch/x86/include/asm/current.h     | 6 ++++--
 xen/arch/x86/include/asm/hvm/vmx/vmx.h | 2 +-
 xen/arch/x86/include/asm/pv/domain.h   | 4 ++--
 xen/arch/x86/pv/domain.c               | 2 +-
 xen/arch/x86/x86_64/entry.S            | 1 +
 9 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 55df2a23f8..a5048ed654 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -132,7 +132,7 @@ void play_dead(void)
         dead_idle();
 }
 
-static void noreturn idle_loop(void)
+static void noreturn cf_check idle_loop(void)
 {
     unsigned int cpu = smp_processor_id();
     /*
@@ -1791,7 +1791,7 @@ static void save_segments(struct vcpu *v)
     }
 }
 
-void paravirt_ctxt_switch_from(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v)
 {
     save_segments(v);
 
@@ -1805,7 +1805,7 @@ void paravirt_ctxt_switch_from(struct vcpu *v)
         write_debugreg(7, 0);
 }
 
-void paravirt_ctxt_switch_to(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v)
 {
     root_pgentry_t *root_pgt = this_cpu(root_pgt);
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index dedb2848e6..63535a74b5 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -944,7 +944,7 @@ static inline void svm_tsc_ratio_load(struct vcpu *v)
         wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain));
 }
 
-static void svm_ctxt_switch_from(struct vcpu *v)
+static void cf_check svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
 
@@ -969,7 +969,7 @@ static void svm_ctxt_switch_from(struct vcpu *v)
     enable_each_ist(idt_tables[cpu]);
 }
 
-static void svm_ctxt_switch_to(struct vcpu *v)
+static void cf_check svm_ctxt_switch_to(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     int cpu = smp_processor_id();
@@ -996,7 +996,7 @@ static void svm_ctxt_switch_to(struct vcpu *v)
         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
-static void noreturn svm_do_resume(void)
+static void noreturn cf_check svm_do_resume(void)
 {
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 60b506ac3f..e1e1fa14e6 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1865,7 +1865,7 @@ void vmx_vmentry_failure(void)
 
 void noreturn vmx_asm_do_vmentry(void);
 
-void vmx_do_resume(void)
+void cf_check vmx_do_resume(void)
 {
     struct vcpu *v = current;
     bool_t debug_state;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2c4804f9b8..41db538a9e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -63,8 +63,8 @@
 static bool_t __initdata opt_force_ept;
 boolean_param("force-ept", opt_force_ept);
 
-static void vmx_ctxt_switch_from(struct vcpu *v);
-static void vmx_ctxt_switch_to(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v);
 
 static int alloc_vlapic_mapping(void);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
@@ -907,7 +907,7 @@ static void cf_check vmx_fpu_leave(struct vcpu *v)
     }
 }
 
-static void vmx_ctxt_switch_from(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
 {
     /*
      * Return early if trying to do a context switch without VMX enabled,
@@ -939,7 +939,7 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
         vmx_pi_switch_from(v);
 }
 
-static void vmx_ctxt_switch_to(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v)
 {
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
diff --git a/xen/arch/x86/include/asm/current.h 
b/xen/arch/x86/include/asm/current.h
index dc0edd9ed0..da5e152a10 100644
--- a/xen/arch/x86/include/asm/current.h
+++ b/xen/arch/x86/include/asm/current.h
@@ -173,7 +173,6 @@ unsigned long get_stack_dump_bottom (unsigned long sp);
 #define switch_stack_and_jump(fn, instr, constr)                        \
     ({                                                                  \
         unsigned int tmp;                                               \
-        (void)((fn) == (void (*)(void))NULL);                           \
         BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn));                      \
         __asm__ __volatile__ (                                          \
             SHADOW_STACK_WORK                                           \
@@ -198,7 +197,10 @@ unsigned long get_stack_dump_bottom (unsigned long sp);
 
 /* The constraint may only specify non-call-clobbered registers. */
 #define reset_stack_and_jump_ind(fn)                                    \
-    switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
+    ({                                                                  \
+        (void)((fn) == (void (*)(void))NULL);                           \
+        switch_stack_and_jump(fn, "INDIRECT_JMP %", "b");               \
+    })
 
 /*
  * Which VCPU's state is currently running on each CPU?
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 5284fe931f..c2ebdd6864 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -93,7 +93,7 @@ typedef enum {
 
 void vmx_asm_vmexit_handler(struct cpu_user_regs);
 void vmx_intr_assist(void);
-void noreturn vmx_do_resume(void);
+void noreturn cf_check vmx_do_resume(void);
 void vmx_vlapic_msr_changed(struct vcpu *v);
 struct hvm_emulate_ctxt;
 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
diff --git a/xen/arch/x86/include/asm/pv/domain.h 
b/xen/arch/x86/include/asm/pv/domain.h
index 6b16da9d18..924508bbb4 100644
--- a/xen/arch/x86/include/asm/pv/domain.h
+++ b/xen/arch/x86/include/asm/pv/domain.h
@@ -118,8 +118,8 @@ static inline void pv_set_reg(struct vcpu *v, unsigned int 
reg, uint64_t val)
 
 #endif /* CONFIG_PV */
 
-void paravirt_ctxt_switch_from(struct vcpu *v);
-void paravirt_ctxt_switch_to(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v);
 
 #endif /* __X86_PV_DOMAIN_H__ */
 
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 55146c15c8..f94f28c8e2 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -351,7 +351,7 @@ void pv_domain_destroy(struct domain *d)
     FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
 }
 
-void noreturn continue_pv_domain(void);
+void noreturn cf_check continue_pv_domain(void);
 
 int pv_domain_initialise(struct domain *d)
 {
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 3eaf0e67b2..8494b97a54 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -625,6 +625,7 @@ ENTRY(dom_crash_sync_extable)
 /* No special register assumptions. */
 #ifdef CONFIG_PV
 ENTRY(continue_pv_domain)
+        ENDBR64
         call  check_wakeup_from_wait
 ret_from_intr:
         GET_CURRENT(bx)
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.