[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 4/6] x86/hvm: Add SMAP support to HVM guest




> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: Tuesday, April 15, 2014 6:37 PM
> To: Wu, Feng
> Cc: JBeulich@xxxxxxxx; Ian.Campbell@xxxxxxxxxx; xen-devel@xxxxxxxxxxxxx;
> Dong, Eddie; Nakajima, Jun
> Subject: Re: [Xen-devel] [PATCH v1 4/6] x86/hvm: Add SMAP support to HVM
> guest
> 
> On 15/04/14 14:02, Feng Wu wrote:
> > Intel new CPU supports SMAP (Supervisor Mode Access Prevention).
> > SMAP prevents supervisor-mode accesses to any linear address with
> > a valid translation for which the U/S flag (bit 2) is 1 in every
> > paging-structure entry controlling the translation for the linear
> > address.
> >
> > This patch enable SMAP for HVM geust.
> >
> > Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
> > ---
> >  xen/arch/x86/hvm/hvm.c        |  3 +++
> >  xen/arch/x86/mm/guest_walk.c  | 27 +++++++++++++++++++++++----
> >  xen/include/asm-x86/hvm/hvm.h | 13 +++++++++++++
> >  3 files changed, 39 insertions(+), 4 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index b0da8e7..b52476d 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -3036,6 +3036,9 @@ void hvm_cpuid(unsigned int input, unsigned int
> *eax, unsigned int *ebx,
> >          if ( (count == 0) && !cpu_has_smep )
> >              *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
> >
> > +        if ( (count == 0) && !cpu_has_smap )
> > +            *ebx &= ~cpufeat_mask(X86_FEATURE_SMAP);
> > +
> >          /* Don't expose MPX to hvm when VMX support is not available
> */
> >          if ( (count == 0) &&
> >               (!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
> > diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> > index 70460b6..1d5f1fc 100644
> > --- a/xen/arch/x86/mm/guest_walk.c
> > +++ b/xen/arch/x86/mm/guest_walk.c
> > @@ -29,6 +29,7 @@
> >  #include <xen/sched.h>
> >  #include <asm/page.h>
> >  #include <asm/guest_pt.h>
> > +#include <asm/hvm/vmx/vmx.h>
> >
> >
> >  /* Flags that are needed in a pagetable entry, with the sense of NX 
> > inverted
> */
> > @@ -144,14 +145,18 @@ guest_walk_tables(struct vcpu *v, struct
> p2m_domain *p2m,
> >      guest_l4e_t *l4p;
> >  #endif
> >      uint32_t gflags, mflags, iflags, rc = 0;
> > -    int smep;
> > +    int smep, smap;
> >      bool_t pse1G = 0, pse2M = 0;
> > +    unsigned long sel = 0;
> > +    uint64_t eflags = guest_cpu_user_regs()->eflags;
> >      p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
> >
> >      perfc_incr(guest_walk);
> >      memset(gw, 0, sizeof(*gw));
> >      gw->va = va;
> >
> > +    __vmread(GUEST_CS_SELECTOR, &sel);
> > +
> >      /* Mandatory bits that must be set in every entry.  We invert NX and
> >       * the invalid bits, to calculate as if there were an "X" bit that
> >       * allowed access.  We will accumulate, in rc, the set of flags that
> > @@ -165,7 +170,21 @@ guest_walk_tables(struct vcpu *v, struct
> p2m_domain *p2m,
> >       * whole walk as if it were a user-mode one and then invert the
> answer. */
> >      smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
> >              && (pfec & PFEC_insn_fetch) && !(pfec &
> PFEC_user_mode) );
> > -    if ( smep )
> > +
> > +    /*
> > +     * SMAP: kernel-mode data accesses from user-mode mappings should
> fault
> > +     * A fault is considered as a SMAP violation if the following
> > +     * conditions come ture:
> > +     *   - X86_CR4_SMAP is set in CR4
> > +     *   - An user page is accessed
> > +     *   - CPL = 3 or X86_EFLAGS_AC clear set)
> 
> "X86_EFLAGS_AC clear set" ? What do you mean by this?
> 
> ~Andrew

It should be "CPL = 3 or X86_EFLAGS_AC is clear", sorry for this typo.

> 
> > +     *   - Page fault in kernel mode
> > +     */
> > +    smap = ( is_hvm_vcpu(v) && hvm_smap_enabled(v)
> > +            && !(!((sel & 3) == 3) && (eflags & X86_EFLAGS_AC))
> > +            && !(pfec & PFEC_user_mode) );
> > +
> > +    if ( smep || smap )
> >          mflags |= _PAGE_USER;
> >
> >  #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
> > @@ -338,8 +357,8 @@ guest_walk_tables(struct vcpu *v, struct
> p2m_domain *p2m,
> >  #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
> >  set_ad:
> >  #endif
> > -    /* Now re-invert the user-mode requirement for SMEP. */
> > -    if ( smep )
> > +    /* Now re-invert the user-mode requirement for SMEP and SMAP */
> > +    if ( smep || smap )
> >          rc ^= _PAGE_USER;
> >
> >      /* Go back and set accessed and dirty bits only if the walk was a
> > diff --git a/xen/include/asm-x86/hvm/hvm.h
> b/xen/include/asm-x86/hvm/hvm.h
> > index dcc3483..b703b93 100644
> > --- a/xen/include/asm-x86/hvm/hvm.h
> > +++ b/xen/include/asm-x86/hvm/hvm.h
> > @@ -257,6 +257,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d,
> uint8_t dest, uint8_t dest_mode);
> >      (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_PAE))
> >  #define hvm_smep_enabled(v) \
> >      (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_SMEP))
> > +#define hvm_smap_enabled(v) \
> > +    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] &
> X86_CR4_SMAP))
> >  #define hvm_nx_enabled(v) \
> >      (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
> >
> > @@ -363,6 +365,16 @@ static inline int hvm_event_pending(struct vcpu *v)
> >  #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
> >      (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
> >
> > +static inline bool_t hvm_cpuid_has_smap(void)
> > +{
> > +    unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
> > +    unsigned int leaf = 0x7;
> > +
> > +    hvm_cpuid(leaf, &eax, &ebx, &ecx, &edx);
> > +
> > +    return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
> > +}
> > +
> >  /* These bits in CR4 cannot be set by the guest. */
> >  #define HVM_CR4_GUEST_RESERVED_BITS(_v)                 \
> >      (~((unsigned long)                                  \
> > @@ -371,6 +383,7 @@ static inline int hvm_event_pending(struct vcpu *v)
> >          X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
> >          X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
> >          (cpu_has_smep ? X86_CR4_SMEP : 0) |             \
> > +        (hvm_cpuid_has_smap() ? X86_CR4_SMAP : 0) |     \
> >          (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) |     \
> >          ((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
> >                        ? X86_CR4_VMXE : 0)  |             \

Thanks,
Feng

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.