[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH 2/3] x86/mtrr: move epte_get_entry_emt to p2m-ept.c


  • To: Roger Pau Monne <roger.pau@xxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Thu, 17 Jun 2021 09:15:33 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=intel.com; dmarc=pass action=none header.from=intel.com; dkim=pass header.d=intel.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=4d61lk7tDeQfiDDGMzQr6q7ou/egAgr+mPK3kjA1sEY=; b=U8yPP9VGFk8JIywRTSlnFseXpByX7XyIsnHE5XgfZ3T7kfD0bTRA5tq+YGGRmZSNf4z5SMJ9V8tGdPfJnBOcGN4gb+ojw++DseDp5N8hM37AXc3A7QCOxPDghMnAhO0NCHcGbN1gYODSkSVZCbHp8KkBA503/Z8yVIfHd/LDZCcsekmouupxKC7SxAk8R8hb+SEkvWuhuxVAU/WIhmKKMINcA6X3LJh7ma3O5ZXDGc01j6JH3jyTDNAqvDstBgRA2n94sKSFO45D0gPzkC/1kUQIoH+0ZiJYTzQvzr8BWECWjIOZDSkpb6U/NwJ/tF5ESrssSK/tcy0fRE9JDaC+ow==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=cQVwJHkCxpzvFsnUqqO7DW18N3onO6DZyaW0ZdbIgsb4QKAVVLOB2ynlkDeKSFUwdqqG2/sOMehNwogMpEvkb09cuYaYThJMvBiPLBBJtqC5012ecGIR+DQrJgC7jy8ezsiiAzkl89OBpXCPKRvS6EyJ2wxWyE0HFHnlNlqFX/P2lY0zbo8IKS57zuc4S08CtWMnOi+jE7dX3naLFjUP7KGmRKLPNpKa5NO2oFXJ89A6rKi7ZLR2+9F0ANsn06yCPdWNZtd6e7rDixBI3DDZawaJD0ygiChIQwubd7fbtgKAJ869+QTsc2jtFjIdiYM05hXmVakk3rr2B0IVHhMSpQ==
  • Authentication-results: citrix.com; dkim=none (message not signed) header.d=none;citrix.com; dmarc=none action=none header.from=intel.com;
  • Cc: Jan Beulich <jbeulich@xxxxxxxx>, "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, "Nakajima, Jun" <jun.nakajima@xxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>
  • Delivery-date: Thu, 17 Jun 2021 09:15:56 +0000
  • Dlp-product: dlpe-windows
  • Dlp-reaction: no-action
  • Dlp-version: 11.5.1.3
  • Ironport-sdr: 039Ty+3ijRsQdJ8lKpfpCFWII91IQbisGKrw3q0JzPXmABG/xZcnspTspBLpRVx6T5Y0CCe5mz bcT7tBwjtO0w==
  • Ironport-sdr: QZmjzlAMEbosCCYDVQnH2JcS9qDqykxcdiR9+5KupKXNd14+UzHosisJgPGCBw1zG3Ev1YDNgx k59xjiCLGGGQ==
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHXU+iAXrzV1nvK+0GBuqNHaWnAzasYCrog
  • Thread-topic: [PATCH 2/3] x86/mtrr: move epte_get_entry_emt to p2m-ept.c

> From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Sent: Saturday, May 29, 2021 1:40 AM
> 
> This is an EPT specific function, so it shouldn't live in the generic
> mtrr file. Such movement is also needed for future work that will
> require passing a p2m_type_t parameter to epte_get_entry_emt, and
> making that type visible to the mtrr users is cumbersome and
> unneeded.
> 
> Moving epte_get_entry_emt out of mtrr.c requires making the helper to
> get the MTRR type of an address from the mtrr state public. While
> there rename the function to start with the mtrr prefix, like other
> mtrr related functions.
> 
> While there fix some of the types of the function parameters.
> 
> No functional change intended.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/mtrr.c           | 107 +---------------------------
>  xen/arch/x86/hvm/vmx/vmx.c        |   4 +-
>  xen/arch/x86/mm/p2m-ept.c         | 114 ++++++++++++++++++++++++++++--
>  xen/include/asm-x86/hvm/vmx/vmx.h |   2 +
>  xen/include/asm-x86/mtrr.h        |   5 +-
>  5 files changed, 118 insertions(+), 114 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
> index 82ded1635c..4a9f3177ed 100644
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -194,8 +194,7 @@ void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
>   * May return a negative value when order > 0, indicating to the caller
>   * that the respective mapping needs splitting.
>   */
> -static int get_mtrr_type(const struct mtrr_state *m,
> -                         paddr_t pa, unsigned int order)
> +int mtrr_get_type(const struct mtrr_state *m, paddr_t pa, unsigned int
> order)
>  {
>     uint8_t     overlap_mtrr = 0;
>     uint8_t     overlap_mtrr_pos = 0;
> @@ -323,7 +322,7 @@ static uint8_t effective_mm_type(struct mtrr_state
> *m,
>       * just use it
>       */
>      if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
> -        mtrr_mtype = get_mtrr_type(m, gpa, 0);
> +        mtrr_mtype = mtrr_get_type(m, gpa, 0);
>      else
>          mtrr_mtype = gmtrr_mtype;
> 
> @@ -350,7 +349,7 @@ uint32_t get_pat_flags(struct vcpu *v,
>      guest_eff_mm_type = effective_mm_type(g, pat, gpaddr,
>                                            gl1e_flags, gmtrr_mtype);
>      /* 2. Get the memory type of host physical address, with MTRR */
> -    shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr, 0);
> +    shadow_mtrr_type = mtrr_get_type(&mtrr_state, spaddr, 0);
> 
>      /* 3. Find the memory type in PAT, with host MTRR memory type
>       * and guest effective memory type.
> @@ -789,106 +788,6 @@ void memory_type_changed(struct domain *d)
>      }
>  }
> 
> -int epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
> -                       unsigned int order, uint8_t *ipat, bool_t direct_mmio)
> -{
> -    int gmtrr_mtype, hmtrr_mtype;
> -    struct vcpu *v = current;
> -    unsigned long i;
> -
> -    *ipat = 0;
> -
> -    if ( v->domain != d )
> -        v = d->vcpu ? d->vcpu[0] : NULL;
> -
> -    /* Mask, not add, for order so it works with INVALID_MFN on unmapping
> */
> -    if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
> -                                 mfn_x(mfn) | ((1UL << order) - 1)) )
> -    {
> -        if ( !order || rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
> -                                               mfn_x(mfn) | ((1UL << order) 
> - 1)) )
> -        {
> -            *ipat = 1;
> -            return MTRR_TYPE_UNCACHABLE;
> -        }
> -        /* Force invalid memory type so resolve_misconfig() will split it */
> -        return -1;
> -    }
> -
> -    if ( !mfn_valid(mfn) )
> -    {
> -        *ipat = 1;
> -        return MTRR_TYPE_UNCACHABLE;
> -    }
> -
> -    if ( !direct_mmio && !is_iommu_enabled(d)
> && !cache_flush_permitted(d) )
> -    {
> -        *ipat = 1;
> -        return MTRR_TYPE_WRBACK;
> -    }
> -
> -    for ( i = 0; i < (1ul << order); i++ )
> -    {
> -        if ( is_special_page(mfn_to_page(mfn_add(mfn, i))) )
> -        {
> -            if ( order )
> -                return -1;
> -            *ipat = 1;
> -            return MTRR_TYPE_WRBACK;
> -        }
> -    }
> -
> -    if ( direct_mmio )
> -        return MTRR_TYPE_UNCACHABLE;
> -
> -    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, _gfn(gfn), order);
> -    if ( gmtrr_mtype >= 0 )
> -    {
> -        *ipat = 1;
> -        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
> -                                                : MTRR_TYPE_UNCACHABLE;
> -    }
> -    if ( gmtrr_mtype == -EADDRNOTAVAIL )
> -        return -1;
> -
> -    gmtrr_mtype = v ? get_mtrr_type(&v->arch.hvm.mtrr, gfn << PAGE_SHIFT,
> order)
> -                    : MTRR_TYPE_WRBACK;
> -    hmtrr_mtype = get_mtrr_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT,
> order);
> -    if ( gmtrr_mtype < 0 || hmtrr_mtype < 0 )
> -        return -1;
> -
> -    /* If both types match we're fine. */
> -    if ( likely(gmtrr_mtype == hmtrr_mtype) )
> -        return hmtrr_mtype;
> -
> -    /* If either type is UC, we have to go with that one. */
> -    if ( gmtrr_mtype == MTRR_TYPE_UNCACHABLE ||
> -         hmtrr_mtype == MTRR_TYPE_UNCACHABLE )
> -        return MTRR_TYPE_UNCACHABLE;
> -
> -    /* If either type is WB, we have to go with the other one. */
> -    if ( gmtrr_mtype == MTRR_TYPE_WRBACK )
> -        return hmtrr_mtype;
> -    if ( hmtrr_mtype == MTRR_TYPE_WRBACK )
> -        return gmtrr_mtype;
> -
> -    /*
> -     * At this point we have disagreeing WC, WT, or WP types. The only
> -     * combination that can be cleanly resolved is WT:WP. The ones involving
> -     * WC need to be converted to UC, both due to the memory ordering
> -     * differences and because WC disallows reads to be cached (WT and WP
> -     * permit this), while WT and WP require writes to go straight to memory
> -     * (WC can buffer them).
> -     */
> -    if ( (gmtrr_mtype == MTRR_TYPE_WRTHROUGH &&
> -          hmtrr_mtype == MTRR_TYPE_WRPROT) ||
> -         (gmtrr_mtype == MTRR_TYPE_WRPROT &&
> -          hmtrr_mtype == MTRR_TYPE_WRTHROUGH) )
> -        return MTRR_TYPE_WRPROT;
> -
> -    return MTRR_TYPE_UNCACHABLE;
> -}
> -
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 7e3e67fdc3..0d4b47681b 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -417,12 +417,12 @@ static int vmx_domain_initialise(struct domain *d)
>  static void domain_creation_finished(struct domain *d)
>  {
>      gfn_t gfn = gaddr_to_gfn(APIC_DEFAULT_PHYS_BASE);
> -    uint8_t ipat;
> +    bool ipat;
> 
>      if ( !has_vlapic(d) || mfn_eq(apic_access_mfn, INVALID_MFN) )
>          return;
> 
> -    ASSERT(epte_get_entry_emt(d, gfn_x(gfn), apic_access_mfn, 0, &ipat,
> +    ASSERT(epte_get_entry_emt(d, gfn, apic_access_mfn, 0, &ipat,
>                                true) == MTRR_TYPE_WRBACK);
>      ASSERT(ipat);
> 
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index a3beaf91e2..f1d1d07e92 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -20,6 +20,7 @@
>  #include <public/hvm/dm_op.h>
>  #include <asm/altp2m.h>
>  #include <asm/current.h>
> +#include <asm/iocap.h>
>  #include <asm/paging.h>
>  #include <asm/types.h>
>  #include <asm/domain.h>
> @@ -485,6 +486,108 @@ static int ept_invalidate_emt_range(struct
> p2m_domain *p2m,
>      return rc;
>  }
> 
> +int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
> +                       unsigned int order, bool *ipat, bool direct_mmio)
> +{
> +    int gmtrr_mtype, hmtrr_mtype;
> +    struct vcpu *v = current;
> +    unsigned long i;
> +
> +    *ipat = false;
> +
> +    if ( v->domain != d )
> +        v = d->vcpu ? d->vcpu[0] : NULL;
> +
> +    /* Mask, not add, for order so it works with INVALID_MFN on unmapping
> */
> +    if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
> +                                 mfn_x(mfn) | ((1UL << order) - 1)) )
> +    {
> +        if ( !order || rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
> +                                               mfn_x(mfn) | ((1UL << order) 
> - 1)) )
> +        {
> +            *ipat = true;
> +            return MTRR_TYPE_UNCACHABLE;
> +        }
> +        /* Force invalid memory type so resolve_misconfig() will split it */
> +        return -1;
> +    }
> +
> +    if ( !mfn_valid(mfn) )
> +    {
> +        *ipat = true;
> +        return MTRR_TYPE_UNCACHABLE;
> +    }
> +
> +    if ( !direct_mmio && !is_iommu_enabled(d)
> && !cache_flush_permitted(d) )
> +    {
> +        *ipat = true;
> +        return MTRR_TYPE_WRBACK;
> +    }
> +
> +    for ( i = 0; i < (1ul << order); i++ )
> +    {
> +        if ( is_special_page(mfn_to_page(mfn_add(mfn, i))) )
> +        {
> +            if ( order )
> +                return -1;
> +            *ipat = true;
> +            return MTRR_TYPE_WRBACK;
> +        }
> +    }
> +
> +    if ( direct_mmio )
> +        return MTRR_TYPE_UNCACHABLE;
> +
> +    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
> +    if ( gmtrr_mtype >= 0 )
> +    {
> +        *ipat = true;
> +        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
> +                                                : MTRR_TYPE_UNCACHABLE;
> +    }
> +    if ( gmtrr_mtype == -EADDRNOTAVAIL )
> +        return -1;
> +
> +    gmtrr_mtype = v ? mtrr_get_type(&v->arch.hvm.mtrr,
> +                                    gfn_x(gfn) << PAGE_SHIFT, order)
> +                    : MTRR_TYPE_WRBACK;
> +    hmtrr_mtype = mtrr_get_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT,
> +                                order);
> +    if ( gmtrr_mtype < 0 || hmtrr_mtype < 0 )
> +        return -1;
> +
> +    /* If both types match we're fine. */
> +    if ( likely(gmtrr_mtype == hmtrr_mtype) )
> +        return hmtrr_mtype;
> +
> +    /* If either type is UC, we have to go with that one. */
> +    if ( gmtrr_mtype == MTRR_TYPE_UNCACHABLE ||
> +         hmtrr_mtype == MTRR_TYPE_UNCACHABLE )
> +        return MTRR_TYPE_UNCACHABLE;
> +
> +    /* If either type is WB, we have to go with the other one. */
> +    if ( gmtrr_mtype == MTRR_TYPE_WRBACK )
> +        return hmtrr_mtype;
> +    if ( hmtrr_mtype == MTRR_TYPE_WRBACK )
> +        return gmtrr_mtype;
> +
> +    /*
> +     * At this point we have disagreeing WC, WT, or WP types. The only
> +     * combination that can be cleanly resolved is WT:WP. The ones involving
> +     * WC need to be converted to UC, both due to the memory ordering
> +     * differences and because WC disallows reads to be cached (WT and WP
> +     * permit this), while WT and WP require writes to go straight to memory
> +     * (WC can buffer them).
> +     */
> +    if ( (gmtrr_mtype == MTRR_TYPE_WRTHROUGH &&
> +          hmtrr_mtype == MTRR_TYPE_WRPROT) ||
> +         (gmtrr_mtype == MTRR_TYPE_WRPROT &&
> +          hmtrr_mtype == MTRR_TYPE_WRTHROUGH) )
> +        return MTRR_TYPE_WRPROT;
> +
> +    return MTRR_TYPE_UNCACHABLE;
> +}
> +
>  /*
>   * Resolve deliberately mis-configured (EMT field set to an invalid value)
>   * entries in the page table hierarchy for the given GFN:
> @@ -519,7 +622,7 @@ static int resolve_misconfig(struct p2m_domain
> *p2m, unsigned long gfn)
> 
>          if ( level == 0 || is_epte_superpage(&e) )
>          {
> -            uint8_t ipat = 0;
> +            bool ipat;
> 
>              if ( e.emt != MTRR_NUM_TYPES )
>                  break;
> @@ -535,7 +638,7 @@ static int resolve_misconfig(struct p2m_domain
> *p2m, unsigned long gfn)
>                          e.emt = 0;
>                      if ( !is_epte_valid(&e) || !is_epte_present(&e) )
>                          continue;
> -                    e.emt = epte_get_entry_emt(p2m->domain, gfn + i,
> +                    e.emt = epte_get_entry_emt(p2m->domain, _gfn(gfn + i),
>                                                 _mfn(e.mfn), 0, &ipat,
>                                                 e.sa_p2mt == p2m_mmio_direct);
>                      e.ipat = ipat;
> @@ -553,7 +656,8 @@ static int resolve_misconfig(struct p2m_domain
> *p2m, unsigned long gfn)
>              }
>              else
>              {
> -                int emt = epte_get_entry_emt(p2m->domain, gfn, _mfn(e.mfn),
> +                int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn),
> +                                             _mfn(e.mfn),
>                                               level * EPT_TABLE_ORDER, &ipat,
>                                               e.sa_p2mt == p2m_mmio_direct);
>                  bool_t recalc = e.recalc;
> @@ -788,8 +892,8 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> 
>      if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>      {
> -        uint8_t ipat = 0;
> -        int emt = epte_get_entry_emt(p2m->domain, gfn, mfn,
> +        bool ipat;
> +        int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn), mfn,
>                                       i * EPT_TABLE_ORDER, &ipat,
>                                       p2mt == p2m_mmio_direct);
> 
> diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-
> x86/hvm/vmx/vmx.h
> index 534e9fc221..f668ee1f09 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmx.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmx.h
> @@ -599,6 +599,8 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
> 
>  void ept_walk_table(struct domain *d, unsigned long gfn);
>  bool_t ept_handle_misconfig(uint64_t gpa);
> +int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
> +                       unsigned int order, bool *ipat, bool direct_mmio);
>  void setup_ept_dump(void);
>  void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
>  /* Locate an alternate p2m by its EPTP */
> diff --git a/xen/include/asm-x86/mtrr.h b/xen/include/asm-x86/mtrr.h
> index 24e5de5c22..e0fd1005ce 100644
> --- a/xen/include/asm-x86/mtrr.h
> +++ b/xen/include/asm-x86/mtrr.h
> @@ -72,12 +72,11 @@ extern int mtrr_add_page(unsigned long base,
> unsigned long size,
>                           unsigned int type, char increment);
>  extern int mtrr_del(int reg, unsigned long base, unsigned long size);
>  extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
> +extern int mtrr_get_type(const struct mtrr_state *m, paddr_t pa,
> +                         unsigned int order);
>  extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
>  extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
>                    paddr_t spaddr, uint8_t gmtrr_mtype);
> -extern int epte_get_entry_emt(struct domain *, unsigned long gfn, mfn_t
> mfn,
> -                              unsigned int order, uint8_t *ipat,
> -                              bool_t direct_mmio);
>  extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
>  extern int hold_mtrr_updates_on_aps;
>  extern void mtrr_aps_sync_begin(void);
> --
> 2.31.1


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.