[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/nospec: Use always_inline to fix code gen for evaluate_nospec



commit 8a74707a7c75d1eccfaeff6e71fe957c85976736
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Oct 15 10:14:51 2019 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Oct 30 13:07:25 2019 +0000

    x86/nospec: Use always_inline to fix code gen for evaluate_nospec
    
    evaluate_nospec() is incredibly fragile, and this is one giant bodge.
    
    To correctly protect jumps, the generated code needs to be of the form:
    
        cmp/test <cond>
        jcc 1f
        lfence
        ...
     1: lfence
        ...
    
    Critically, the lfence must be at the head of both basic blocks, later in 
the
    instruction stream than the conditional jump in need of protection.
    
    When the compiler chooses to out-of-line the condition calculation (e.g. by
    not inlining a predicate), the code layout can end up as:
    
     pred:
        lfence
        <calculate cond>
        ret
    
        call pred
        cmp $0, %eax
        jcc 1f
        ...
     1: ...
    
    which breaks the speculative safety, as the lfences are earlier in the
    instruction stream than the jump in need of protection.
    
    Any use of evaluate_nospec() needs all static inline predicates which use it
    to be declared always_inline to prevent the optimiser having the flexibility
    to generate unsafe code.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
    Release-acked-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/domain.c               |  2 +-
 xen/arch/x86/pv/mm.h                | 12 ++++++------
 xen/include/asm-x86/event.h         |  2 +-
 xen/include/asm-x86/guest_pt.h      | 28 ++++++++++++++++------------
 xen/include/asm-x86/hvm/nestedhvm.h |  2 +-
 xen/include/asm-x86/paging.h        |  2 +-
 xen/include/xen/sched.h             | 20 ++++++++++----------
 7 files changed, 36 insertions(+), 32 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index c8d7f491ea..1b88cc2d68 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1699,7 +1699,7 @@ static void _update_runstate_area(struct vcpu *v)
  * regular per-CPU GDT frame to appear with selectors at the appropriate
  * offset.
  */
-static inline bool need_full_gdt(const struct domain *d)
+static always_inline bool need_full_gdt(const struct domain *d)
 {
     return is_pv_domain(d) && !is_idle_domain(d);
 }
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index 2d427b418d..a1bd473b29 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -88,8 +88,8 @@ static inline bool update_intpte(intpte_t *p, intpte_t old, 
intpte_t new,
                   _t ## e_get_intpte(_o), _t ## e_get_intpte(_n),   \
                   (_m), (_v), (_ad))
 
-static inline l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e,
-                                            const struct domain *d)
+static always_inline l1_pgentry_t adjust_guest_l1e(l1_pgentry_t l1e,
+                                                   const struct domain *d)
 {
     if ( likely(l1e_get_flags(l1e) & _PAGE_PRESENT) &&
          likely(!is_pv_32bit_domain(d)) )
@@ -120,8 +120,8 @@ static inline l2_pgentry_t adjust_guest_l2e(l2_pgentry_t 
l2e,
     return l2e;
 }
 
-static inline l3_pgentry_t adjust_guest_l3e(l3_pgentry_t l3e,
-                                            const struct domain *d)
+static always_inline l3_pgentry_t adjust_guest_l3e(l3_pgentry_t l3e,
+                                                   const struct domain *d)
 {
     if ( likely(l3e_get_flags(l3e) & _PAGE_PRESENT) )
         l3e_add_flags(l3e, (likely(!is_pv_32bit_domain(d))
@@ -140,8 +140,8 @@ static inline l3_pgentry_t unadjust_guest_l3e(l3_pgentry_t 
l3e,
     return l3e;
 }
 
-static inline l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e,
-                                            const struct domain *d)
+static always_inline l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e,
+                                                   const struct domain *d)
 {
     /*
      * When shadowing an L4 behind the guests back (e.g. for per-pcpu
diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
index 2f6ea54bcb..98a85233cb 100644
--- a/xen/include/asm-x86/event.h
+++ b/xen/include/asm-x86/event.h
@@ -20,7 +20,7 @@ static inline int vcpu_event_delivery_is_enabled(struct vcpu 
*v)
 }
 
 int hvm_local_events_need_delivery(struct vcpu *v);
-static inline int local_events_need_delivery(void)
+static always_inline bool local_events_need_delivery(void)
 {
     struct vcpu *v = current;
 
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index 8684b83fd6..6ab2041e48 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -202,7 +202,7 @@ static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 
flags)
 
 /* Which pagetable features are supported on this vcpu? */
 
-static inline bool guest_can_use_l2_superpages(const struct vcpu *v)
+static always_inline bool guest_can_use_l2_superpages(const struct vcpu *v)
 {
     /*
      * PV guests use Xen's paging settings.  Being 4-level, 2M
@@ -218,7 +218,7 @@ static inline bool guest_can_use_l2_superpages(const struct 
vcpu *v)
             (v->arch.hvm.guest_cr[4] & X86_CR4_PSE));
 }
 
-static inline bool guest_can_use_l3_superpages(const struct domain *d)
+static always_inline bool guest_can_use_l3_superpages(const struct domain *d)
 {
     /*
      * There are no control register settings for the hardware pagewalk on the
@@ -252,7 +252,7 @@ static inline bool guest_can_use_pse36(const struct domain 
*d)
     return paging_mode_hap(d) && cpu_has_pse36;
 }
 
-static inline bool guest_nx_enabled(const struct vcpu *v)
+static always_inline bool guest_nx_enabled(const struct vcpu *v)
 {
     if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */
         return false;
@@ -261,23 +261,23 @@ static inline bool guest_nx_enabled(const struct vcpu *v)
     return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v);
 }
 
-static inline bool guest_wp_enabled(const struct vcpu *v)
+static always_inline bool guest_wp_enabled(const struct vcpu *v)
 {
     /* PV guests can't control CR0.WP, and it is unconditionally set by Xen. */
     return is_pv_vcpu(v) || hvm_wp_enabled(v);
 }
 
-static inline bool guest_smep_enabled(const struct vcpu *v)
+static always_inline bool guest_smep_enabled(const struct vcpu *v)
 {
     return !is_pv_vcpu(v) && hvm_smep_enabled(v);
 }
 
-static inline bool guest_smap_enabled(const struct vcpu *v)
+static always_inline bool guest_smap_enabled(const struct vcpu *v)
 {
     return !is_pv_vcpu(v) && hvm_smap_enabled(v);
 }
 
-static inline bool guest_pku_enabled(const struct vcpu *v)
+static always_inline bool guest_pku_enabled(const struct vcpu *v)
 {
     return !is_pv_vcpu(v) && hvm_pku_enabled(v);
 }
@@ -285,19 +285,21 @@ static inline bool guest_pku_enabled(const struct vcpu *v)
 /* Helpers for identifying whether guest entries have reserved bits set. */
 
 /* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */
-static inline uint64_t guest_rsvd_bits(const struct vcpu *v)
+static always_inline uint64_t guest_rsvd_bits(const struct vcpu *v)
 {
     return ((PADDR_MASK &
              ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) |
             (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT)));
 }
 
-static inline bool guest_l1e_rsvd_bits(const struct vcpu *v, guest_l1e_t l1e)
+static always_inline bool guest_l1e_rsvd_bits(const struct vcpu *v,
+                                              guest_l1e_t l1e)
 {
     return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD);
 }
 
-static inline bool guest_l2e_rsvd_bits(const struct vcpu *v, guest_l2e_t l2e)
+static always_inline bool guest_l2e_rsvd_bits(const struct vcpu *v,
+                                              guest_l2e_t l2e)
 {
     uint64_t rsvd_bits = guest_rsvd_bits(v);
 
@@ -311,7 +313,8 @@ static inline bool guest_l2e_rsvd_bits(const struct vcpu 
*v, guest_l2e_t l2e)
 }
 
 #if GUEST_PAGING_LEVELS >= 3
-static inline bool guest_l3e_rsvd_bits(const struct vcpu *v, guest_l3e_t l3e)
+static always_inline bool guest_l3e_rsvd_bits(const struct vcpu *v,
+                                              guest_l3e_t l3e)
 {
     return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD |
                        (guest_can_use_l3_superpages(v->domain) ? 0 : 
_PAGE_PSE))) ||
@@ -320,7 +323,8 @@ static inline bool guest_l3e_rsvd_bits(const struct vcpu 
*v, guest_l3e_t l3e)
 }
 
 #if GUEST_PAGING_LEVELS >= 4
-static inline bool guest_l4e_rsvd_bits(const struct vcpu *v, guest_l4e_t l4e)
+static always_inline bool guest_l4e_rsvd_bits(const struct vcpu *v,
+                                              guest_l4e_t l4e)
 {
     return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD |
                      ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD)
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h 
b/xen/include/asm-x86/hvm/nestedhvm.h
index e09fa9d47d..256fed733a 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -33,7 +33,7 @@ enum nestedhvm_vmexits {
 };
 
 /* Nested HVM on/off per domain */
-static inline bool nestedhvm_enabled(const struct domain *d)
+static always_inline bool nestedhvm_enabled(const struct domain *d)
 {
     return is_hvm_domain(d) && d->arch.hvm.params &&
         d->arch.hvm.params[HVM_PARAM_NESTEDHVM];
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 8c2027c791..7544f73121 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -383,7 +383,7 @@ static inline bool gfn_valid(const struct domain *d, gfn_t 
gfn)
 }
 
 /* Maxphysaddr supportable by the paging infrastructure. */
-static inline unsigned int paging_max_paddr_bits(const struct domain *d)
+static always_inline unsigned int paging_max_paddr_bits(const struct domain *d)
 {
     unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 629a4c52e0..9f7bc69293 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -968,50 +968,50 @@ void watchdog_domain_destroy(struct domain *d);
 
 #define VM_ASSIST(d, t) (test_bit(VMASST_TYPE_ ## t, &(d)->vm_assist))
 
-static inline bool is_pv_domain(const struct domain *d)
+static always_inline bool is_pv_domain(const struct domain *d)
 {
     return IS_ENABLED(CONFIG_PV) &&
         evaluate_nospec(!(d->options & XEN_DOMCTL_CDF_hvm));
 }
 
-static inline bool is_pv_vcpu(const struct vcpu *v)
+static always_inline bool is_pv_vcpu(const struct vcpu *v)
 {
     return is_pv_domain(v->domain);
 }
 
 #ifdef CONFIG_COMPAT
-static inline bool is_pv_32bit_domain(const struct domain *d)
+static always_inline bool is_pv_32bit_domain(const struct domain *d)
 {
     return is_pv_domain(d) && d->arch.is_32bit_pv;
 }
 
-static inline bool is_pv_32bit_vcpu(const struct vcpu *v)
+static always_inline bool is_pv_32bit_vcpu(const struct vcpu *v)
 {
     return is_pv_32bit_domain(v->domain);
 }
 
-static inline bool is_pv_64bit_domain(const struct domain *d)
+static always_inline bool is_pv_64bit_domain(const struct domain *d)
 {
     return is_pv_domain(d) && !d->arch.is_32bit_pv;
 }
 
-static inline bool is_pv_64bit_vcpu(const struct vcpu *v)
+static always_inline bool is_pv_64bit_vcpu(const struct vcpu *v)
 {
     return is_pv_64bit_domain(v->domain);
 }
 #endif
-static inline bool is_hvm_domain(const struct domain *d)
+static always_inline bool is_hvm_domain(const struct domain *d)
 {
     return IS_ENABLED(CONFIG_HVM) &&
         evaluate_nospec(d->options & XEN_DOMCTL_CDF_hvm);
 }
 
-static inline bool is_hvm_vcpu(const struct vcpu *v)
+static always_inline bool is_hvm_vcpu(const struct vcpu *v)
 {
     return is_hvm_domain(v->domain);
 }
 
-static inline bool hap_enabled(const struct domain *d)
+static always_inline bool hap_enabled(const struct domain *d)
 {
     /* sanitise_domain_config() rejects HAP && !HVM */
     return IS_ENABLED(CONFIG_HVM) &&
@@ -1034,7 +1034,7 @@ static inline bool is_xenstore_domain(const struct domain 
*d)
     return d->options & XEN_DOMCTL_CDF_xs_domain;
 }
 
-static inline bool is_iommu_enabled(const struct domain *d)
+static always_inline bool is_iommu_enabled(const struct domain *d)
 {
     return evaluate_nospec(d->options & XEN_DOMCTL_CDF_iommu);
 }
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.